ffmpeg_sample解讀_remuxing

/**

  • @file
  • libavformat/libavcodec demuxing and muxing API example.
  • Remux streams from one container format to another.
  • @example remuxing.c
    */

include <libavutil/timestamp.h>

include <libavformat/avformat.h>

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;

printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
       tag,
       av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
       av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
       av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
       pkt->stream_index);

}
//重新封裝數據為完整的格式
//根據輸出文件后綴名來決定使用什么封裝格式,

int remuxing_main(int argc, char **argv)
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
const char *in_filename, *out_filename;
int ret, i;
int stream_index = 0;
int *stream_mapping = NULL;
int stream_mapping_size = 0;

if (argc < 3) {
    printf("usage: %s input output\n"
           "API example program to remux a media file with libavformat and libavcodec.\n"
           "The output format is guessed according to the file extension.\n"
           "\n", argv[0]);
    return 1;
}

//輸入輸出的文件名
in_filename  = argv[1];
out_filename = argv[2];

//打開輸入文件.初始化格式上下文,此時數據到了格式上下文里
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
//找到流信息
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
//打印輸入文件的格式信息
av_dump_format(ifmt_ctx, 0, in_filename, 0);
//分配輸出的格式上下文空間
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}

//所有流的總數
stream_mapping_size = ifmt_ctx->nb_streams;
//分配接收流的新的數組,每個流占一項
stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
if (!stream_mapping) {
    ret = AVERROR(ENOMEM);
    goto end;
}

ofmt = ofmt_ctx->oformat;

//遍歷處理所有的流,從輸入流中拷貝編解碼相關參數到輸出流中
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *out_stream;
AVStream *in_stream = ifmt_ctx->streams[i];
//輸入流的編解碼參數
AVCodecParameters *in_codecpar = in_stream->codecpar;
//只處理視頻,音頻,字母流
if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
stream_mapping[i] = -1;
continue;
}
//這里存的就是每個音頻視頻流的索引
stream_mapping[i] = stream_index++;
//輸出格式上下文創建一個新的流,此時這個流已經和上下文綁定
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
//把輸入流的參數信息拷貝到輸出流的編解碼參數中
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
if (ret < 0) {
fprintf(stderr, "Failed to copy codec parameters\n");
goto end;
}
out_stream->codecpar->codec_tag = 0;
}
//打印輸出格式上下文中第一個流的信息
av_dump_format(ofmt_ctx, 0, out_filename, 1);

if (!(ofmt->flags & AVFMT_NOFILE)) {
    //初始化io上下文.用來之后向輸出文件寫出數據
    ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
    if (ret < 0) {
        fprintf(stderr, "Could not open output file '%s'", out_filename);
        goto end;
    }
}

//寫出header信息到文件,也就是把流的頭部信息寫出到文件中,
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}

while (1) {
    AVStream *in_stream, *out_stream;
    //從輸入上下文讀取一個packet
    ret = av_read_frame(ifmt_ctx, &pkt);
    if (ret < 0)
        break;

//packet對應的流
in_stream = ifmt_ctx->streams[pkt.stream_index];
if (pkt.stream_index >= stream_mapping_size ||
stream_mapping[pkt.stream_index] < 0) {
av_packet_unref(&pkt);
continue;
}
//設置輸出流的索引,把索引對齊
pkt.stream_index = stream_mapping[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
log_packet(ifmt_ctx, &pkt, "in");

    /* copy packet *///a * bq / cq 其實就是根據兩個流的時間基重新設置參數
    //pts 渲染時間戳.真正展示的時候的時間序列 dts.編解碼時間戳.決定幀按照什么順序寫入文件
    pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
    pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
    pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
    pkt.pos = -1;
    log_packet(ofmt_ctx, &pkt, "out");

//交錯著把解碼出的packet寫入到輸出格式上下文中
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_packet_unref(&pkt);
}
//信息寫出到文件中
av_write_trailer(ofmt_ctx);
end:
//釋放資源
avformat_close_input(&ifmt_ctx);

/* close output */
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
    avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);

av_freep(&stream_mapping);

if (ret < 0 && ret != AVERROR_EOF) {
    fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
    return 1;
}

return 0;

}

?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。

推薦閱讀更多精彩內容