ffmpeg 编码(视屏)
Posted 长风破浪会有时,直挂云帆济沧海
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了ffmpeg 编码(视屏)相关的知识,希望对你有一定的参考价值。
分析ffmpeg_3.3.2 muxing
1:分析主函数,代码如下:
1 int main(int argc, char **argv) 2 { 3 OutputStream video_st = { 0 }, audio_st = { 0 }; 4 const char *filename; 5 AVOutputFormat *fmt; 6 AVFormatContext *oc; 7 AVCodec *audio_codec, *video_codec; 8 int ret; 9 int have_video = 0, have_audio = 0; 10 int encode_video = 0, encode_audio = 0; 11 AVDictionary *opt = NULL; 12 int i; 13 14 /* Initialize libavcodec, and register all codecs and formats. */ 15 av_register_all(); 16 17 if (argc < 2) { 18 printf("usage: %s output_file\n" 19 "API example program to output a media file with libavformat.\n" 20 "This program generates a synthetic audio and video stream, encodes and\n" 21 "muxes them into a file named output_file.\n" 22 "The output format is automatically guessed according to the file extension.\n" 23 "Raw images can also be output by using ‘%%d‘ in the filename.\n" 24 "\n", argv[0]); 25 return 1; 26 } 27 28 filename = argv[1]; 29 for (i = 2; i+1 < argc; i+=2) { 30 if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags")) 31 av_dict_set(&opt, argv[i]+1, argv[i+1], 0); 32 } 33 34 /* allocate the output media context */ 35 avformat_alloc_output_context2(&oc, NULL, NULL, filename); 36 if (!oc) { 37 printf("Could not deduce output format from file extension: using MPEG.\n"); 38 avformat_alloc_output_context2(&oc, NULL, "mpeg", filename); 39 } 40 if (!oc) 41 return 1; 42 43 fmt = oc->oformat; 44 45 /* Add the audio and video streams using the default format codecs 46 * and initialize the codecs. */ 47 if (fmt->video_codec != AV_CODEC_ID_NONE) { 48 add_stream(&video_st, oc, &video_codec, fmt->video_codec); 49 have_video = 1; 50 encode_video = 1; 51 } 52 if (fmt->audio_codec != AV_CODEC_ID_NONE) { 53 add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec); 54 have_audio = 1; 55 encode_audio = 1; 56 } 57 58 /* Now that all the parameters are set, we can open the audio and 59 * video codecs and allocate the necessary encode buffers. */ 60 if (have_video) 61 open_video(oc, video_codec, &video_st, opt); 62 63 if (have_audio) 64 open_audio(oc, audio_codec, &audio_st, opt); 65 66 av_dump_format(oc, 0, filename, 1); 67 68 /* open the output file, if needed */ 69 if (!(fmt->flags & AVFMT_NOFILE)) { 70 ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); 71 if (ret < 0) { 72 fprintf(stderr, "Could not open ‘%s‘: %s\n", filename, 73 av_err2str(ret)); 74 return 1; 75 } 76 } 77 78 /* Write the stream header, if any. */ 79 ret = avformat_write_header(oc, &opt); 80 if (ret < 0) { 81 fprintf(stderr, "Error occurred when opening output file: %s\n", 82 av_err2str(ret)); 83 return 1; 84 } 85 86 while (encode_video || encode_audio) { 87 /* select the stream to encode */ 88 if (encode_video && 89 (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base, 90 audio_st.next_pts, audio_st.enc->time_base) <= 0)) { 91 encode_video = !write_video_frame(oc, &video_st); 92 } else { 93 encode_audio = !write_audio_frame(oc, &audio_st); 94 } 95 } 96 97 /* Write the trailer, if any. The trailer must be written before you 98 * close the CodecContexts open when you wrote the header; otherwise 99 * av_write_trailer() may try to use memory that was freed on 100 * av_codec_close(). */ 101 av_write_trailer(oc); 102 103 /* Close each codec. */ 104 if (have_video) 105 close_stream(oc, &video_st); 106 if (have_audio) 107 close_stream(oc, &audio_st); 108 109 if (!(fmt->flags & AVFMT_NOFILE)) 110 /* Close the output file. */ 111 avio_closep(&oc->pb); 112 113 /* free the stream */ 114 avformat_free_context(oc); 115 116 return 0; 117 }
2:首先,定义了一个自定义结构体OutputStream,分别来代表一个音频,视频流。
1 // a wrapper around a single output AVStream 2 typedef struct OutputStream { 3 AVStream *st; 4 AVCodecContext *enc; 5 6 /* pts of the next frame that will be generated */ 7 int64_t next_pts; 8 int samples_count; 9 10 AVFrame *frame; 11 AVFrame *tmp_frame; 12 13 float t, tincr, tincr2; 14 15 struct SwsContext *sws_ctx; 16 struct SwrContext *swr_ctx; 17 } OutputStream;
st是创建的视频,音频流,enc为编码器上下文等等。
3:主函数中首先使用av_register_all()注册所有编码器和格式.
创建一个输出媒体上下文:avformat_alloc_output_context2(&oc,NULL,NULL,filename);
创建一个视屏轨道和音频轨道:add_stream(&video_st, oc, &video_codec, fmt->video_codec);
4:分析add_stream函数:
1 static void add_stream(OutputStream *ost, AVFormatContext *oc, 2 AVCodec **codec, 3 enum AVCodecID codec_id) 4 { 5 AVCodecContext *c; 6 int i; 7 8 /* find the encoder */ 9 *codec = avcodec_find_encoder(codec_id); 10 if (!(*codec)) { 11 fprintf(stderr, "Could not find encoder for ‘%s‘\n", 12 avcodec_get_name(codec_id)); 13 exit(1); 14 } 15 16 ost->st = avformat_new_stream(oc, NULL); 17 if (!ost->st) { 18 fprintf(stderr, "Could not allocate stream\n"); 19 exit(1); 20 } 21 ost->st->id = oc->nb_streams-1; 22 c = avcodec_alloc_context3(*codec); 23 if (!c) { 24 fprintf(stderr, "Could not alloc an encoding context\n"); 25 exit(1); 26 } 27 ost->enc = c; 28 29 switch ((*codec)->type) { 30 case AVMEDIA_TYPE_AUDIO: 31 c->sample_fmt = (*codec)->sample_fmts ? 32 (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP; 33 c->bit_rate = 64000; 34 c->sample_rate = 44100; 35 if ((*codec)->supported_samplerates) { 36 c->sample_rate = (*codec)->supported_samplerates[0]; 37 for (i = 0; (*codec)->supported_samplerates[i]; i++) { 38 if ((*codec)->supported_samplerates[i] == 44100) 39 c->sample_rate = 44100; 40 } 41 } 42 c->channels = av_get_channel_layout_nb_channels(c->channel_layout); 43 c->channel_layout = AV_CH_LAYOUT_STEREO; 44 if ((*codec)->channel_layouts) { 45 c->channel_layout = (*codec)->channel_layouts[0]; 46 for (i = 0; (*codec)->channel_layouts[i]; i++) { 47 if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO) 48 c->channel_layout = AV_CH_LAYOUT_STEREO; 49 } 50 } 51 c->channels = av_get_channel_layout_nb_channels(c->channel_layout); 52 ost->st->time_base = (AVRational){ 1, c->sample_rate }; 53 break; 54 55 case AVMEDIA_TYPE_VIDEO: 56 c->codec_id = codec_id; 57 58 c->bit_rate = 400000; 59 /* Resolution must be a multiple of two. */ 60 c->width = 352; 61 c->height = 288; 62 /* timebase: This is the fundamental unit of time (in seconds) in terms 63 * of which frame timestamps are represented. For fixed-fps content, 64 * timebase should be 1/framerate and timestamp increments should be 65 * identical to 1. */ 66 ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE }; 67 c->time_base = ost->st->time_base; 68 69 c->gop_size = 12; /* emit one intra frame every twelve frames at most */ 70 c->pix_fmt = STREAM_PIX_FMT; 71 if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { 72 /* just for testing, we also add B-frames */ 73 c->max_b_frames = 2; 74 } 75 if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { 76 /* Needed to avoid using macroblocks in which some coeffs overflow. 77 * This does not happen with normal video, it just happens here as 78 * the motion of the chroma plane does not match the luma plane. */ 79 c->mb_decision = 2; 80 } 81 break; 82 83 default: 84 break; 85 } 86 87 /* Some formats want stream headers to be separate. */ 88 if (oc->oformat->flags & AVFMT_GLOBALHEADER) 89 c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; 90 }
5:创建完音频,视屏轨道后,则打开视频,音频编码器,准备编码数据 open_video(oc, video_codec, &video_st, opt);
1 static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) 2 { 3 int ret; 4 AVCodecContext *c = ost->enc; 5 AVDictionary *opt = NULL; 6 7 av_dict_copy(&opt, opt_arg, 0); 8 9 /* open the codec */ 10 ret = avcodec_open2(c, codec, &opt); 11 av_dict_free(&opt); 12 if (ret < 0) { 13 fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret)); 14 exit(1); 15 } 16 17 /* allocate and init a re-usable frame */ 18 ost->frame = alloc_picture(c->pix_fmt, c->width, c->height); 19 if (!ost->frame) { 20 fprintf(stderr, "Could not allocate video frame\n"); 21 exit(1); 22 } 23 24 /* If the output format is not YUV420P, then a temporary YUV420P 25 * picture is needed too. It is then converted to the required 26 * output format. */ 27 ost->tmp_frame = NULL; 28 if (c->pix_fmt != AV_PIX_FMT_YUV420P) { 29 ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height); 30 if (!ost->tmp_frame) { 31 fprintf(stderr, "Could not allocate temporary picture\n"); 32 exit(1); 33 } 34 } 35 36 /* copy the stream parameters to the muxer */ 37 ret = avcodec_parameters_from_context(ost->st->codecpar, c); 38 if (ret < 0) { 39 fprintf(stderr, "Could not copy the stream parameters\n"); 40 exit(1); 41 } 42 }
注意:static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height),创建一个指定格式,宽和高的帧。
tmp_frame:如果格式不是YUV420P,则该帧用于转换格式。否则直接使用frame则可。
1 static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) 2 { 3 AVFrame *picture; 4 int ret; 5 6 picture = av_frame_alloc(); 7 if (!picture) 8 return NULL; 9 10 picture->format = pix_fmt; 11 picture->width = width; 12 picture->height = height; 13 14 /* allocate the buffers for the frame data */ 15 ret = av_frame_get_buffer(picture, 32); 16 if (ret < 0) { 17 fprintf(stderr, "Could not allocate frame data.\n"); 18 exit(1); 19 } 20 21 return picture; 22 }
6:打开输出文件:
1 if (!(fmt->flags & AVFMT_NOFILE)) { 2 ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); 3 if (ret < 0) { 4 fprintf(stderr, "Could not open ‘%s‘: %s\n", filename, 5 av_err2str(ret)); 6 return 1; 7 } 8 }
7:写入文件头
1 ret = avformat_write_header(oc, &opt); 2 if (ret < 0) { 3 fprintf(stderr, "Error occurred when opening output file: %s\n", 4 av_err2str(ret)); 5 return 1; 6 }
8:循环编码写入数据
1 while (encode_video || encode_audio) { 2 /* select the stream to encode */ 3 if (encode_video && 4 (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base, 5 audio_st.next_pts, audio_st.enc->time_base) <= 0)) { 6 encode_video = !write_video_frame(oc, &video_st); 7 } else { 8 encode_audio = !write_audio_frame(oc, &audio_st); 9 } 10 }
分析static int write_video_frame(AVFormatContext *oc, OutputStream *ost),编码一帧视屏,并且送入muxer(复合器),编码完成返回1,否则返回0
static int write_video_frame(AVFormatContext *oc, OutputStream *ost) { int ret; AVCodecContext *c; AVFrame *frame; int got_packet = 0; AVPacket pkt = { 0 }; c = ost->enc; frame = get_video_frame(ost); av_init_packet(&pkt); /* encode the image */ ret = avcodec_encode_video2(c, &pkt, frame, &got_packet); if (ret < 0) { fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); exit(1); } if (got_packet) { ret = write_frame(oc, &c->time_base, ost->st, &pkt); } else { ret = 0; } if (ret < 0) { fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); exit(1); } return (frame || got_packet) ? 0 : 1; }
创建一帧数据:frame = get_video_frame(ost);
1 static AVFrame *get_video_frame(OutputStream *ost) 2 { 3 AVCodecContext *c = ost->enc; 4 5 /* check if we want to generate more frames */ 6 if (av_compare_ts(ost->next_pts, c->time_base, 7 STREAM_DURATION, (AVRational){ 1, 1 }) >= 0) 8 return NULL; 9 10 /* when we pass a frame to the encoder, it may keep a reference to it 11 * internally; make sure we do not overwrite it here */ 12 if (av_frame_make_writable(ost->frame) < 0) 13 exit(1); 14 15 if (c->pix_fmt != AV_PIX_FMT_YUV420P) { 16 /* as we only generate a YUV420P picture, we must convert it 17 * to the codec pixel format if needed */ 18 if (!ost->sws_ctx) { 19 ost->sws_ctx = sws_getContext(c->width, c->height, 20 AV_PIX_FMT_YUV420P, 21 c->width, c->height, 22 c->pix_fmt, 23 SCALE_FLAGS, NULL, NULL, NULL); 24 if (!ost->sws_ctx) { 25 fprintf(stderr, 26 "Could not initialize the conversion context\n"); 27 exit(1); 28 } 29 } 30 fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height); 31 sws_scale(ost->sws_ctx, 32 (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize, 33 0, c->height, ost->frame->data, ost->frame->linesize); 34 } else { 35 fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height); 36 } 37 38 ost->frame->pts = ost->next_pts++; 39 40 return ost->frame; 41 }
使用static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)填充一帧数据。
1 static void fill_yuv_image(AVFrame *pict, int frame_index, 2 int width, int height) 3 { 4 int x, y, i; 5 6 i = frame_index; 7 8 /* Y */ 9 for (y = 0; y < height; y++) 10 for (x = 0; x < width; x++) 11 pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; 12 13 /* Cb and Cr */ 14 for (y = 0; y < height / 2; y++) { 15 for (x = 0; x < width / 2; x++) { 16 pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; 17 pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; 18 } 19 } 20 }
当编码成功后,使用自定义函数 write_frame(oc, &c->time_base, ost->st, &pkt),将一帧数据包写入上下文。
1 static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt) 2 { 3 /* rescale output packet timestamp values from codec to stream timebase */ 4 av_packet_rescale_ts(pkt, *time_base, st->time_base); 5 pkt->stream_index = st->index; 6 7 /* Write the compressed frame to the media file. */ 8 log_packet(fmt_ctx, pkt); 9 return av_interleaved_write_frame(fmt_ctx, pkt); 10 }
9:写尾部,尾部必须在关闭编码器上下文之前写入:
1 av_write_trailer(oc);
10:最后编码每个编码器和输出文件,释放上下文
1 /* Close each codec. */ 2 if (have_video) 3 close_stream(oc, &video_st); 4 if (have_audio) 5 close_stream(oc, &audio_st); 6 7 if (!(fmt->flags & AVFMT_NOFILE)) 8 /* Close the output file. */ 9 avio_closep(&oc->pb); 10 11 /* free the stream */ 12 avformat_free_context(oc);
11:close_stream的实现如下:
1 static void close_stream(AVFormatContext *oc, OutputStream *ost) 2 { 3 avcodec_free_context(&ost->enc); 4 av_frame_free(&ost->frame); 5 av_frame_free(&ost->tmp_frame); 6 sws_freeContext(ost->sws_ctx); 7 swr_free(&ost->swr_ctx); 8 }
以上是关于ffmpeg 编码(视屏)的主要内容,如果未能解决你的问题,请参考以下文章
php代码片段: sendFile/videoStream/sendEmail/phpexcel/ffmpeg/zip
[java]通过Java,在ubuntu使用mencoder进行webm格式视屏剪辑合并