AVFoundation 教你如何處理混音,拼接,消音,快進倒放等功能! 如果你對于AVfoundation如何處理視頻數據,請先參考這篇文章
==> GitHub直通車(終于抽空把demo整理了一份發到了GitHub.demo如有出錯可以聯系我!)
通過上篇文章我們得到處理好的音視頻軌,我們如何輸出,把編輯好的數據寫到文件中去呢?那么我們一般使用的是AVassetwriter和AVassetReader結合.我們處理音視軌之后會獲得三個實例,分別為 AVMutableComposition
AVMutableVideoComposition
AVMutableAudioMix
拿到了這三個實例我們就要去創建AVAssetReader實例對象.
- (AVAssetReader*)createAssetReader:(AVComposition *)compositon
videoComposition:(AVVideoComposition *)videoComposition
audioMix:(AVAudioMix *)audioMix{
NSError *error = nil;
AVAssetReader *assetReader = [AVAssetReader assetReaderWithAsset:compositon error:&error];
assetReader.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeMake(compositon.duration.value, compositon.duration.timescale));
NSDictionary *outputSettings = @{(id)kCVPixelBufferPixelFormatTypeKey: @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)};
AVAssetReaderVideoCompositionOutput *readerVideoOutput = [AVAssetReaderVideoCompositionOutput assetReaderVideoCompositionOutputWithVideoTracks:[compositon tracksWithMediaType:AVMediaTypeVideo]
videoSettings:outputSettings];
#if ! TARGET_IPHONE_SIMULATOR
if( [AVVideoComposition isKindOfClass:[AVMutableVideoComposition class]] )
[(AVMutableVideoComposition*)videoComposition setRenderScale:1.0];
#endif
readerVideoOutput.videoComposition = videoComposition;
readerVideoOutput.alwaysCopiesSampleData = NO;
[assetReader addOutput:readerVideoOutput];
NSArray *audioTracks = [compositon tracksWithMediaType:AVMediaTypeAudio];
BOOL shouldRecordAudioTrack = ([audioTracks count] > 0);//modify be Chen siyang 2017-04-08
AVAssetReaderAudioMixOutput *readerAudioOutput = nil;
if (shouldRecordAudioTrack)
{
readerAudioOutput = [AVAssetReaderAudioMixOutput assetReaderAudioMixOutputWithAudioTracks:audioTracks audioSettings:nil];
readerAudioOutput.audioMix = audioMix;
readerAudioOutput.alwaysCopiesSampleData = NO;
[assetReader addOutput:readerAudioOutput];
}
return assetReader;
}
AVAssetReaderTrackOutput
我們創建AVassetReader的方法里創建了兩個AVAssetReaderTrackOutput 分別是AVAssetReaderVideoCompositionOutput
和 AVAssetReaderAudioMixOutput
兩個實例,都是AVAssetReaderTrackOutput的子類.分別包含了 視頻的輸出和音頻的輸出.
我們也可以從 AVassetReader 中獲取到這個變量
//獲取輸出音視頻數據
AVAssetReaderTrackOutput *readerVideoTrackOutput = nil;
AVAssetReaderAudioMixOutput *readerAudioOutput = nil;
for( AVAssetReaderTrackOutput *output in assetReader.outputs ) {
if( [output.mediaType isEqualToString:AVMediaTypeVideo] ) {
readerVideoTrackOutput = output;
}
if ([output.mediaType isEqualToString:AVMediaTypeAudio]) {
readerAudioOutput = (AVAssetReaderAudioMixOutput *)output;
}
}
AVassetWriter
AVassetWriter 該類的作用是把 AVassetReader 的輸出(AVAssetReaderTrackOutput) 通過回調 寫成文件.
創建 AVassetWriter
//初始化avassetWriter
//輸出地址
NSFileManager *fileManager = [NSFileManager defaultManager];
[fileManager removeItemAtURL:outPutUrl error:nil];
AVAssetWriter *assetWriter = [AVAssetWriter assetWriterWithURL:outPutUrl fileType:AVFileTypeQuickTimeMovie error:nil];
//音頻
NSDictionary *audioInputSetting = [self configAudioInput];
//此處配置音頻輸入設置,audioInputSetting可以為nil,表示不經過處理
AVAssetWriterInput *audioTrackInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeAudio outputSettings:audioInputSetting];
//視頻
NSDictionary *videoInputSetting = [self configVideoInput] ;
//此處配置視頻頻輸入設置,videoInputSetting可以為nil,表示不經過處理
AVAssetWriterInput *videoTrackInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:videoInputSetting];
if ([assetWriter canAddInput:audioTrackInput]) {
[assetWriter addInput:audioTrackInput];
}
if ([assetWriter canAddInput:videoTrackInput]) {
[assetWriter addInput:videoTrackInput];
}
這里對應的音視頻InputSetting 是 對你寫出的音視頻格式的設置.如果你不設置,就會把數據全部寫出來.是無損無壓縮的,大小大概會比一般的MP3格式 打1000倍.
編碼設置
音頻編碼
/**
編碼音頻
@return 返回編碼字典
*/
- (NSDictionary *)configAudioInput{
AudioChannelLayout channelLayout = {
.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo,
.mChannelBitmap = kAudioChannelBit_Left,
.mNumberChannelDescriptions = 0
};
NSData *channelLayoutData = [NSData dataWithBytes:&channelLayout length:offsetof(AudioChannelLayout, mChannelDescriptions)];
NSDictionary *audioInputSetting = @{
AVFormatIDKey: @(kAudioFormatMPEG4AAC),
AVSampleRateKey: @(44100),
AVNumberOfChannelsKey: @(2),
AVChannelLayoutKey:channelLayoutData
};
return audioInputSetting;
}
視頻編碼
/**
編碼視頻
@return 返回編碼字典
*/
- (NSDictionary *)configVideoInput{
NSDictionary *videoInputSetting = @{
AVVideoCodecKey:AVVideoCodecH264,
AVVideoWidthKey: @(374),
AVVideoHeightKey: @(666)
};
return videoInputSetting;
}
SampleBufferRef
到這里我們AVassetWriter和AVassetReader的設置都已經設置成功,就開始讀和寫了.
[assetReader startReading];
[assetWriter startWriting];
然后我們創建兩個同步隊列,同時解析音頻和視頻的outPut生成的sampleBufferRef
dispatch_queue_t rwAudioSerializationQueue = dispatch_queue_create("Audio Queue", DISPATCH_QUEUE_SERIAL);
dispatch_queue_t rwVideoSerializationQueue = dispatch_queue_create("Video Queue", DISPATCH_QUEUE_SERIAL);
dispatch_group_t dispatchGroup = dispatch_group_create();
//這里開始時間是可以自己設置的
[assetWriter startSessionAtSourceTime:kCMTimeZero];
dispatch_group_enter(dispatchGroup);
__block BOOL isAudioFirst = YES;
[assetWriterAudioInput requestMediaDataWhenReadyOnQueue:rwAudioSerializationQueue usingBlock:^{
while ([assetWriterAudioInput isReadyForMoreMediaData]&&assetReader.status == AVAssetReaderStatusReading) {
CMSampleBufferRef nextSampleBuffer = [assetReaderAudioOutput copyNextSampleBuffer];
if (isAudioFirst) {
isAudioFirst = !isAudioFirst;
continue;
}
if (nextSampleBuffer) {
[assetWriterAudioInput appendSampleBuffer:nextSampleBuffer];
CFRelease(nextSampleBuffer);
} else {
[assetWriterAudioInput markAsFinished];
dispatch_group_leave(dispatchGroup);
break;
}
}
}];
dispatch_group_enter(dispatchGroup);
__block BOOL isVideoFirst = YES;
[assetWriterVideoInput requestMediaDataWhenReadyOnQueue:rwVideoSerializationQueue usingBlock:^{
while ([assetWriterVideoInput isReadyForMoreMediaData]&&assetReader.status == AVAssetReaderStatusReading) {
CMSampleBufferRef nextSampleBuffer = [assetReaderVideoOutput copyNextSampleBuffer];
if (isVideoFirst) {
isVideoFirst = !isVideoFirst;
continue;
}
if (nextSampleBuffer) {
[assetWriterVideoInput appendSampleBuffer:nextSampleBuffer];
CFRelease(nextSampleBuffer);
NSLog(@"加載");
} else {
[assetWriterVideoInput markAsFinished];
dispatch_group_leave(dispatchGroup);
break;
}
}
}];
dispatch_group_notify(dispatchGroup, dispatch_get_main_queue(), ^{
[assetWriter finishWritingWithCompletionHandler:^{
if (assetWriter.status == AVAssetWriterStatusCompleted) {
NSLog(@"加載完畢");
} else {
NSLog(@"加載失敗");
}
if ([self.delegate respondsToSelector:@selector(synthesisResult)]) {
[self.delegate synthesisResult];
}
}];
});
到這里我們的音視頻的混音功能和保存就完成了.對于拼接 和 消音,快進和倒放就之后會抽時間也寫出來.其實原理都在這了.demo 我后面再補!
原創文章轉載需獲授權并注明出處
請在后臺留言聯系轉載