iOS利用opencv庫拼接圖片的另一種方法

文章主要參考Opencv Sift和Surf特征實現圖像無縫拼接生成全景圖像,我做了一小點點的修改,同時在iOS上能正常使用。

問題說明

Xcode9中,如果直接將圖片等文件拖拽進項目中,可能會識別不到。這時候,我們通過Add Files to xxx的方式來進行添加。

項目目錄文件結構

屏幕快照 2017-10-23 下午5.29.10.png

主要代碼

一、合成代碼
#include "opencv2.framework/Headers/opencv.hpp"
#include "opencv2.framework/Headers/legacy/legacy.hpp"
#include "opencv2.framework/Headers/nonfree/nonfree.hpp"
#include <vector>
#include <iostream>

using namespace std;
using namespace cv;

//計算原始圖像點位在經過矩陣變換后在目標圖像上對應位置
Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri){
    Mat originelP,targetP;
    originelP=(Mat_<double>(3,1)<<originalPoint.x,originalPoint.y,1.0);
    targetP=transformMaxtri*originelP;
    float x=targetP.at<double>(0,0)/targetP.at<double>(2,0);
    float y=targetP.at<double>(1,0)/targetP.at<double>(2,0);
    return Point2f(x,y);
}

- (UIImage *)composeImage{

    NSString *path01 = [[NSBundle mainBundle] pathForResource:@"test01" ofType:@"jpg"];
    NSString *path02 = [[NSBundle mainBundle] pathForResource:@"test02" ofType:@"jpg"];
    Mat img01;
    Mat img02;
    if (path01 == nil && path02 == nil) {
        return [UIImage new];
    }
    else{
        img01 = imread([path01 UTF8String]);
        img02 = imread([path02 UTF8String]);

        //如果沒有讀取到image
        if (!img01.data && !img02.data) {
            return [UIImage new];
        }

        //灰度圖轉換
        Mat img_h_01 ,img_h_02;
        cvtColor(img01, img_h_01, CV_RGB2GRAY);
        cvtColor(img02, img_h_02, CV_RGB2GRAY);

        //提取特征點
        SiftFeatureDetector siftDetector(800);
        vector<KeyPoint> keyPoint1,KeyPoint2;
        siftDetector.detect(img_h_01, keyPoint1);
        siftDetector.detect(img_h_02, KeyPoint2);

        //特征點描述,為下面的特征點匹配做準備
        SiftDescriptorExtractor siftDescriptor;
        Mat img_description_01,img_description_02;
        siftDescriptor.compute(img_h_01, keyPoint1, img_description_01);
        siftDescriptor.compute(img_h_02, KeyPoint2, img_description_02);

        //獲得匹配特征點,并提取最優配對
        FlannBasedMatcher matcher;
        vector<DMatch> matchePoints;
        matcher.match(img_description_01,img_description_02,matchePoints,Mat());
        sort(matchePoints.begin(), matchePoints.end());//特征點排序

        //獲取排在前N個的最優配對
        vector<Point2f> imagePoints1,imagePoints2;
        for (int i = 0; i < 10; i++) {
            imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
            imagePoints2.push_back(KeyPoint2[matchePoints[i].trainIdx].pt);
        }

        //獲取img1到img2的投影映射矩陣,尺寸為3*3
        Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
        Mat adjustMat = (Mat_<double>(3,3)<<1.0,0,img01.cols,0,1.0,0,0,0,1.0);
        Mat adjustHomo = adjustMat * homo;

        //獲得最強配對點在原始圖像和矩陣變換后圖像上的對應位置,用于圖像拼接點的定位
        Point2f originalLinkPoint,targetLintPoint,basedImagePoint;
        originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt;
        targetLintPoint = getTransformPoint(originalLinkPoint, adjustHomo);
        basedImagePoint = KeyPoint2[matchePoints[0].trainIdx].pt;

        //圖像配準
        Mat imageTransform1;
        warpPerspective(img01, imageTransform1, adjustHomo, cv::Size(img02.cols+img01.cols+110,img02.rows));

        //在最強配準點左側的重疊區域進行累加,使銜接穩定過度,消除突變
        Mat image01OverLap,image02OverLap;
        image01OverLap = imageTransform1(cv::Rect(cv::Point(targetLintPoint.x - basedImagePoint.x,0),cv::Point(targetLintPoint.x,img02.rows)));
        image02OverLap = img02(cv::Rect(0,0,image01OverLap.cols,image01OverLap.rows));

        //復制img01的重疊部分
        Mat image01ROICOPY = image01OverLap.clone();
        for (int i = 0; i < image01OverLap.rows; i++) {
            for (int j = 0; j < image01OverLap.cols;j++) {
                double weight;
                //隨距離改變而改變的疊加體系
                weight = (double)j/image01OverLap.cols;
                image01OverLap.at<Vec3b>(i,j)[0] = (1 - weight)*image01ROICOPY.at<Vec3b>(i,j)[0]+weight*image02OverLap.at<Vec3b>(i,j)[0];
                image01OverLap.at<Vec3b>(i,j)[1] = (1 - weight)*image01ROICOPY.at<Vec3b>(i,j)[1]+weight*image02OverLap.at<Vec3b>(i,j)[1];
                image01OverLap.at<Vec3b>(i,j)[2] = (1 - weight)*image01ROICOPY.at<Vec3b>(i,j)[2]+weight*image02OverLap.at<Vec3b>(i,j)[2];
            }
        }

        Mat ROIMat = img02(cv::Rect(cv::Point(image01OverLap.cols,0),cv::Point(img02.cols,img02.rows)));
        ROIMat.copyTo(Mat(imageTransform1,cv::Rect(targetLintPoint.x,0,ROIMat.cols,img02.rows)));
        return [self imageWithCVMat:imageTransform1];
    }
}

二、CVMatUIImage
- (UIImage *)imageWithCVMat:(const cv::Mat&)cvMat
{
    NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];
    CGColorSpaceRef colorSpace;
    if (cvMat.elemSize() == 1) {
        colorSpace = CGColorSpaceCreateDeviceGray();
    } else {
        colorSpace = CGColorSpaceCreateDeviceRGB();
    }
    CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
    // Creating CGImage from cv::Mat
    CGImageRef imageRef = CGImageCreate(cvMat.cols,                                 //width
                                        cvMat.rows,                                 //height
                                        8,                                          //bits per component
                                        8 * cvMat.elemSize(),                       //bits per pixel
                                        cvMat.step[0],                              //bytesPerRow
                                        colorSpace,                                 //colorspace
                                        kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
                                        provider,                                   //CGDataProviderRef
                                        NULL,                                       //decode
                                        false,                                      //should interpolate
                                        kCGRenderingIntentDefault                   //intent
                                        );

    UIImage *cvImage = [[UIImage alloc]initWithCGImage:imageRef];
    CGImageRelease(imageRef);
    CGDataProviderRelease(provider);
    CGColorSpaceRelease(colorSpace);
    return cvImage;
}
三、顯示合成的圖片
- (void)viewDidLoad {
    [super viewDidLoad];

    double start = [[NSDate date] timeIntervalSince1970]*1000;
    NSLog(@"start time= %f ", (start));

    UIImageView *img = [[UIImageView alloc]initWithFrame:self.view.bounds];
    img.contentMode = UIViewContentModeScaleAspectFit;
    img.image = [self composeImage];
    [self.view addSubview:img];

    double end = [[NSDate date] timeIntervalSince1970]*1000;
    NSLog(@"end time= %f ", (end));
    NSLog(@"use time =%f millisecond ", (end-start)); 
}

不足的地方,還請各位多多指教,謝謝了。

最后編輯于
?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。

推薦閱讀更多精彩內容