【iOS】CMSampleBuffer轉UIImage,UIImage轉CVPixelBuffer

CMSampleBuffer轉UIImage

    // CMSampleBuffer -> UIImage
    func sampleBufferToImage(sampleBuffer: CMSampleBuffer) -> UIImage {
        // 獲取CMSampleBuffer的核心視頻圖像緩沖的媒體數據
        let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)! as CVPixelBuffer

        // 鎖定像素緩沖區的基址
        CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))

        // 獲取像素緩沖區的每行字節數
        let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer)
        
        // 獲取像素緩沖區的每行字節數
        let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
        // 獲取像素緩沖的寬度和高度
        let width = CVPixelBufferGetWidth(imageBuffer)
        let height = CVPixelBufferGetHeight(imageBuffer)
        
        // 創建一個設備相關的RGB顏色空間
        let colorSpace = CGColorSpaceCreateDeviceRGB()
        
        // 使用示例緩沖區數據創建位圖圖形上下文
        let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8,
                                bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
        // 根據位圖圖形上下文中的像素數據創建一個Quartz圖像
        let quartzImage:CGImage = context!.makeImage()!
        // 解鎖像素緩沖區
        CVPixelBufferUnlockBaseAddress(imageBuffer,CVPixelBufferLockFlags(rawValue: 0))
        
        let image = UIImage(cgImage: quartzImage)
        return image
    }

UIImage轉CVPixelBuffer

    // UIImage -> CVPixelBuffer
    func imageToCVPixelBuffer(image:UIImage) -> CVPixelBuffer? {
        let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
        var pixelBuffer : CVPixelBuffer?
        let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, attrs, &pixelBuffer)
        guard (status == kCVReturnSuccess) else {
            return nil
        }
        
        CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer!)
        
        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
        let context = CGContext(data: pixelData, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
        
        context?.translateBy(x: 0, y: image.size.height)
        context?.scaleBy(x: 1.0, y: -1.0)
        
        UIGraphicsPushContext(context!)
        image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
        UIGraphicsPopContext()
        CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        return pixelBuffer
    }
?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。

推薦閱讀更多精彩內容

  • 公司項目原因,接觸了一下視頻流H264的編解碼知識,之前項目使用的是FFMpeg多媒體庫,利用CPU做視頻的編碼和...
    Ethan_Struggle閱讀 34,725評論 29 124
  • 最近在做一些人臉美妝的工作,用到了CVPixelBufferRef的相關知識,踩到一些坑,做一個小小的總結。 CV...
    little_ma閱讀 7,671評論 3 13
  • 公司的項目里有拉取H.264視頻流進行解碼播放的功能,本來是采用FFMpeg多媒體庫,用CPU做視頻的編碼和解碼,...
    江小凡閱讀 13,244評論 11 36
  • 上一篇我們侃侃而談了下Android下的App音視頻開發雜談,我們從入手到深入再到實際項目的遇到的問題以及解決方案...
    耗子_wo閱讀 1,847評論 0 3
  • iOS系統中H264硬解及顯示說明蘋果在iOS 8.0系統之前,沒有開放系統的硬件編碼解碼功能,不過Mac OS系...
    rogerwu1228閱讀 1,896評論 0 4