博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
FFMPEG函数read_frame_internal()
阅读量:7102 次
发布时间:2019-06-28

本文共 14668 字,大约阅读时间需要 48 分钟。

hot3.png

FFMPEG版本3.2release,文件位于libavformat/utils.c

函数read_frame_internal()主要功能分为三部分

static int read_frame_internal(AVFormatContext *s, AVPacket *pkt){    int ret = 0, i, got_packet = 0;    AVDictionary *metadata = NULL;    av_init_packet(pkt);    while (!got_packet && !s->internal->parse_queue) {        AVStream *st;        AVPacket cur_pkt;        /* read next packet */        ret = ff_read_packet(s, &cur_pkt);        if (ret < 0) {            if (ret == AVERROR(EAGAIN))                return ret;            /* flush the parsers */            for (i = 0; i < s->nb_streams; i++) {                st = s->streams[i];                if (st->parser && st->need_parsing)                    parse_packet(s, NULL, st->index);            }            /* all remaining packets are now in parse_queue =>             * really terminate parsing */            break;        }        ret = 0;        st  = s->streams[cur_pkt.stream_index];        /* update context if required */        if (st->internal->need_context_update) {            if (avcodec_is_open(st->internal->avctx)) {                av_log(s, AV_LOG_DEBUG, "Demuxer context update while decoder is open, closing and trying to re-open\n");                avcodec_close(st->internal->avctx);                st->info->found_decoder = 0;            }            ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar);            if (ret < 0)                return ret;#if FF_API_LAVF_AVCTXFF_DISABLE_DEPRECATION_WARNINGS            /* update deprecated public codec context */            ret = avcodec_parameters_to_context(st->codec, st->codecpar);            if (ret < 0)                return ret;FF_ENABLE_DEPRECATION_WARNINGS#endif            st->internal->need_context_update = 0;        }        if (cur_pkt.pts != AV_NOPTS_VALUE &&            cur_pkt.dts != AV_NOPTS_VALUE &&            cur_pkt.pts < cur_pkt.dts) {            av_log(s, AV_LOG_WARNING,                   "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",                   cur_pkt.stream_index,                   av_ts2str(cur_pkt.pts),                   av_ts2str(cur_pkt.dts),                   cur_pkt.size);        }        if (s->debug & FF_FDEBUG_TS)            av_log(s, AV_LOG_DEBUG,                   "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n",                   cur_pkt.stream_index,                   av_ts2str(cur_pkt.pts),                   av_ts2str(cur_pkt.dts),                   cur_pkt.size, cur_pkt.duration, cur_pkt.flags);        if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {            st->parser = av_parser_init(st->codecpar->codec_id);            if (!st->parser) {                av_log(s, AV_LOG_VERBOSE, "parser not found for codec "                       "%s, packets or times may be invalid.\n",                       avcodec_get_name(st->codecpar->codec_id));                /* no parser available: just output the raw packets */                st->need_parsing = AVSTREAM_PARSE_NONE;            } else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)                st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;            else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)                st->parser->flags |= PARSER_FLAG_ONCE;            else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)                st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;        }        if (!st->need_parsing || !st->parser) {            /* no parsing needed: we just output the packet as is */            *pkt = cur_pkt;            compute_pkt_fields(s, st, NULL, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE);            if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&                (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {                ff_reduce_index(s, st->index);                av_add_index_entry(st, pkt->pos, pkt->dts,                                   0, 0, AVINDEX_KEYFRAME);            }            got_packet = 1;        } else if (st->discard < AVDISCARD_ALL) {            if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)                return ret;            st->codecpar->sample_rate = st->internal->avctx->sample_rate;            st->codecpar->bit_rate = st->internal->avctx->bit_rate;            st->codecpar->channels = st->internal->avctx->channels;            st->codecpar->channel_layout = st->internal->avctx->channel_layout;            st->codecpar->codec_id = st->internal->avctx->codec_id;        } else {            /* free packet */            av_packet_unref(&cur_pkt);        }        if (pkt->flags & AV_PKT_FLAG_KEY)            st->skip_to_keyframe = 0;        if (st->skip_to_keyframe) {            av_packet_unref(&cur_pkt);            if (got_packet) {                *pkt = cur_pkt;            }            got_packet = 0;        }    }    if (!got_packet && s->internal->parse_queue)        ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);    if (ret >= 0) {        AVStream *st = s->streams[pkt->stream_index];        int discard_padding = 0;        if (st->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) {            int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0);            int64_t sample = ts_to_samples(st, pts);            int duration = ts_to_samples(st, pkt->duration);            int64_t end_sample = sample + duration;            if (duration > 0 && end_sample >= st->first_discard_sample &&                sample < st->last_discard_sample)                discard_padding = FFMIN(end_sample - st->first_discard_sample, duration);        }        if (st->start_skip_samples && (pkt->pts == 0 || pkt->pts == RELATIVE_TS_BASE))            st->skip_samples = st->start_skip_samples;        if (st->skip_samples || discard_padding) {            uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);            if (p) {                AV_WL32(p, st->skip_samples);                AV_WL32(p + 4, discard_padding);                av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d / discard %d\n", st->skip_samples, discard_padding);            }            st->skip_samples = 0;        }        if (st->inject_global_side_data) {            for (i = 0; i < st->nb_side_data; i++) {                AVPacketSideData *src_sd = &st->side_data[i];                uint8_t *dst_data;                if (av_packet_get_side_data(pkt, src_sd->type, NULL))                    continue;                dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);                if (!dst_data) {                    av_log(s, AV_LOG_WARNING, "Could not inject global side data\n");                    continue;                }                memcpy(dst_data, src_sd->data, src_sd->size);            }            st->inject_global_side_data = 0;        }        if (!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))            av_packet_merge_side_data(pkt);    }    av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);    if (metadata) {        s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;        av_dict_copy(&s->metadata, metadata, 0);        av_dict_free(&metadata);        av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);    }#if FF_API_LAVF_AVCTX    update_stream_avctx(s);#endif    if (s->debug & FF_FDEBUG_TS)        av_log(s, AV_LOG_DEBUG,               "read_frame_internal stream=%d, pts=%s, dts=%s, "               "size=%d, duration=%"PRId64", flags=%d\n",               pkt->stream_index,               av_ts2str(pkt->pts),               av_ts2str(pkt->dts),               pkt->size, pkt->duration, pkt->flags);    return ret;}

 

函数ff_read_packet(AVFormatContext *s, AVPacket *pkt)

从指针AVFormatContext *pb读取音频或者视频数据,保存到pkt->data,同时得到音视频的编解码格式。

int ff_read_packet(AVFormatContext *s, AVPacket *pkt){    int ret, i, err;    AVStream *st;    for (;;) {        AVPacketList *pktl = s->internal->raw_packet_buffer;        if (pktl) {            *pkt = pktl->pkt;            st   = s->streams[pkt->stream_index];            if (s->internal->raw_packet_buffer_remaining_size <= 0)                if ((err = probe_codec(s, st, NULL)) < 0)                    return err;            if (st->request_probe <= 0) {                s->internal->raw_packet_buffer                 = pktl->next;                s->internal->raw_packet_buffer_remaining_size += pkt->size;                av_free(pktl);                return 0;            }        }        pkt->data = NULL;        pkt->size = 0;        av_init_packet(pkt);        ret = s->iformat->read_packet(s, pkt);        if (ret < 0) {            /* Some demuxers return FFERROR_REDO when they consume               data and discard it (ignored streams, junk, extradata).               We must re-call the demuxer to get the real packet. */            if (ret == FFERROR_REDO)                continue;            if (!pktl || ret == AVERROR(EAGAIN))                return ret;            for (i = 0; i < s->nb_streams; i++) {                st = s->streams[i];                if (st->probe_packets || st->request_probe > 0)                    if ((err = probe_codec(s, st, NULL)) < 0)                        return err;                av_assert0(st->request_probe <= 0);            }            continue;        }        if (!pkt->buf) {            AVPacket tmp = { 0 };            ret = av_packet_ref(&tmp, pkt);            if (ret < 0)                return ret;            *pkt = tmp;        }        if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&            (pkt->flags & AV_PKT_FLAG_CORRUPT)) {            av_log(s, AV_LOG_WARNING,                   "Dropped corrupted packet (stream = %d)\n",                   pkt->stream_index);            av_packet_unref(pkt);            continue;        }        if (pkt->stream_index >= (unsigned)s->nb_streams) {            av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);            continue;        }        st = s->streams[pkt->stream_index];        if (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {            // correct first time stamps to negative values            if (!is_relative(st->first_dts))                st->first_dts = wrap_timestamp(st, st->first_dts);            if (!is_relative(st->start_time))                st->start_time = wrap_timestamp(st, st->start_time);            if (!is_relative(st->cur_dts))                st->cur_dts = wrap_timestamp(st, st->cur_dts);        }        pkt->dts = wrap_timestamp(st, pkt->dts);        pkt->pts = wrap_timestamp(st, pkt->pts);        force_codec_ids(s, st);        /* TODO: audio: time filter; video: frame reordering (pts != dts) */        if (s->use_wallclock_as_timestamps)            pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);        if (!pktl && st->request_probe <= 0)            return ret;        err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,                            &s->internal->raw_packet_buffer_end, 0);        if (err)            return err;        s->internal->raw_packet_buffer_remaining_size -= pkt->size;        if ((err = probe_codec(s, st, pkt)) < 0)            return err;    }}

 

 

函数 parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)

如果需要解析包数据的话,分析pkt包的数据,并将pkt添加到AVFormatContext *s->internal->parse_queue。

选择哪个分析器哪? 通过调用函数av_parser_init(int codec_id),根据codec_id选择分析器。以h264为例,codec_id为AV_CODEC_ID_H264,所以分析器为ff_h264_parser。

 

#define REGISTER_PARSER(X, x)                                           \    {                                                                   \        extern AVCodecParser ff_##x##_parser;                           \        if (CONFIG_##X##_PARSER)                                        \            av_register_codec_parser(&ff_##x##_parser);                 \    }void avcodec_register_all(void){... ... /* parsers */    REGISTER_PARSER(AAC,                aac);    REGISTER_PARSER(AAC_LATM,           aac_latm);    REGISTER_PARSER(AC3,                ac3);    REGISTER_PARSER(ADX,                adx);    REGISTER_PARSER(BMP,                bmp);    REGISTER_PARSER(CAVSVIDEO,          cavsvideo);    REGISTER_PARSER(COOK,               cook);    REGISTER_PARSER(DCA,                dca);    REGISTER_PARSER(DIRAC,              dirac);    REGISTER_PARSER(DNXHD,              dnxhd);    REGISTER_PARSER(DPX,                dpx);    REGISTER_PARSER(DVAUDIO,            dvaudio);    REGISTER_PARSER(DVBSUB,             dvbsub);    REGISTER_PARSER(DVDSUB,             dvdsub);    REGISTER_PARSER(DVD_NAV,            dvd_nav);    REGISTER_PARSER(FLAC,               flac);    REGISTER_PARSER(G729,               g729);    REGISTER_PARSER(GSM,                gsm);    REGISTER_PARSER(H261,               h261);    REGISTER_PARSER(H263,               h263);    REGISTER_PARSER(H264,               h264);    REGISTER_PARSER(HEVC,               hevc);    REGISTER_PARSER(MJPEG,              mjpeg);    REGISTER_PARSER(MLP,                mlp);    REGISTER_PARSER(MPEG4VIDEO,         mpeg4video);    REGISTER_PARSER(MPEGAUDIO,          mpegaudio);    REGISTER_PARSER(MPEGVIDEO,          mpegvideo);    REGISTER_PARSER(OPUS,               opus);    REGISTER_PARSER(PNG,                png);    REGISTER_PARSER(PNM,                pnm);    REGISTER_PARSER(RV30,               rv30);    REGISTER_PARSER(RV40,               rv40);    REGISTER_PARSER(TAK,                tak);    REGISTER_PARSER(VC1,                vc1);    REGISTER_PARSER(VORBIS,             vorbis);    REGISTER_PARSER(VP3,                vp3);    REGISTER_PARSER(VP8,                vp8);    REGISTER_PARSER(VP9,                vp9);}

read_frame_internal(AVFormatContext *s, AVPacket *pkt)函数pkt数据获取

AVFormatContext *s->streams[index]->need_parsing 或者s->streams[index]->parser为空时,函数ff_read_packet(AVFormatContext *s, AVPacket *pkt)获取到pkt内容。

或者通过函数 parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index),pkt指针指向的数据存储到AVFormatContext *s->internal->parse_queue。然后调用函数

read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);

 

static int read_from_packet_buffer(AVPacketList **pkt_buffer,                                   AVPacketList **pkt_buffer_end,                                   AVPacket      *pkt){    AVPacketList *pktl;    av_assert0(*pkt_buffer);    pktl        = *pkt_buffer;    *pkt        = pktl->pkt;    *pkt_buffer = pktl->next;    if (!pktl->next)        *pkt_buffer_end = NULL;    av_freep(&pktl);    return 0;}

 

总结:

函数read_frame_internal()主要作用为:

从指针AVFormatContext *pb读取音频或者视频数据,保存到pkt->data,同时得到音视频的编解码格式。

 

转载于:https://my.oschina.net/u/2326611/blog/809099

你可能感兴趣的文章
【枚举+数学】【HDU1271】整数对 难度:五颗星
查看>>
[20171113]修改表结构删除列相关问题3.txt
查看>>
面向对象聊天机器人
查看>>
课后笔记--html
查看>>
使用dll查看器dll文件中的内容
查看>>
iOS中系统自带正则表达式的应用
查看>>
Quartz 2D编程指南(7) - 阴影(Shadows)
查看>>
CSS实现vip闪光特效
查看>>
JQuery中 JSON 兼容性问题(针对ie8)
查看>>
openstack知识---hypervisor
查看>>
1.1 python 安装(Windows)
查看>>
redis分布式锁的具体应用
查看>>
四中居中方法
查看>>
23. Merge K Sorted Lists (Java, 归并排序的思路)
查看>>
转载:JAR包介绍大全用途作用详解JAVA
查看>>
java 操作符(翻译自Java Tutorials)
查看>>
hive实例讲解实现in和not in子句
查看>>
java 线程池
查看>>
element-ui的el-tabel组件怎么使用type=“expand”实现表格嵌套并且在子表格没有数据的时候隐藏展开按钮...
查看>>
SlickUpload使用(一)
查看>>