parent
3160e2bf99
commit
dbe941c078
@ -1,151 +1,546 @@ |
|||||||
/**
|
/**
|
||||||
使用FFMpeg可以很方便的对音视频进行编码,并且写文件。 |
这段时间看了FFMpeg提供的例子muxing.c,我略微修改了下源代码,使其生成一个MP4文件,音频使用AAC编码,视频使用H.264编码。代码很简单,我就不做说明了,代码如下。 |
||||||
|
|
||||||
下面的代码是将5幅1280*720大小的图片进行编码,并且写到文件中。 |
以后我们继续写如何将DirectShow中采集的音视频数据编码并生成MP4文件。 |
||||||
|
*/ |
||||||
|
|
||||||
|
/* 5 seconds stream duration */ |
||||||
|
#define STREAM_DURATION 5.0 |
||||||
|
#define STREAM_FRAME_RATE 25 /* 25 images/s */ |
||||||
|
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) |
||||||
|
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */ |
||||||
|
|
||||||
|
static int sws_flags = SWS_BICUBIC; |
||||||
|
|
||||||
|
/**************************************************************/ |
||||||
|
/* audio output */ |
||||||
|
|
||||||
代码有些乱,但希望能抛砖引玉,对学习这方面的朋友有帮助。 |
static float t, tincr, tincr2; |
||||||
|
static int16_t *samples; |
||||||
|
static uint8_t *audio_outbuf; |
||||||
|
static int audio_outbuf_size; |
||||||
|
static int audio_input_frame_size; |
||||||
|
|
||||||
|
/*
|
||||||
|
* add an audio output stream |
||||||
*/ |
*/ |
||||||
|
static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id) |
||||||
|
{ |
||||||
|
AVCodecContext *c; |
||||||
|
AVStream *st; |
||||||
|
|
||||||
|
st = avformat_new_stream(oc, NULL); |
||||||
|
if (!st) { |
||||||
|
fprintf(stderr, "Could not alloc stream\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
|
st->id = 1; |
||||||
|
|
||||||
|
c = st->codec; |
||||||
|
c->codec_id = codec_id; |
||||||
|
c->codec_type = AVMEDIA_TYPE_AUDIO; |
||||||
|
|
||||||
|
/* put sample parameters */ |
||||||
|
c->sample_fmt = AV_SAMPLE_FMT_S16; |
||||||
|
c->bit_rate = 64000; |
||||||
|
c->sample_rate = 44100; |
||||||
|
c->channels = 2; |
||||||
|
|
||||||
|
// some formats want stream headers to be separate
|
||||||
|
if (oc->oformat->flags & AVFMT_GLOBALHEADER) |
||||||
|
c->flags |= CODEC_FLAG_GLOBAL_HEADER; |
||||||
|
|
||||||
|
return st; |
||||||
|
} |
||||||
|
|
||||||
|
static void open_audio(AVFormatContext *oc, AVStream *st) |
||||||
|
{ |
||||||
|
AVCodecContext *c; |
||||||
|
AVCodec *codec; |
||||||
|
|
||||||
CFile file[5]; |
c = st->codec; |
||||||
BYTE *szTxt[5]; |
|
||||||
|
|
||||||
int nWidth = 0; |
/* find the audio encoder */ |
||||||
int nHeight= 0; |
codec = avcodec_find_encoder(c->codec_id); |
||||||
|
if (!codec) { |
||||||
|
fprintf(stderr, "codec not found\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
|
|
||||||
int nDataLen=0; |
/* open it */ |
||||||
|
if (avcodec_open(c, codec) < 0) { |
||||||
|
fprintf(stderr, "could not open codec\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
|
|
||||||
int nLen; |
/* init signal generator */ |
||||||
|
t = 0; |
||||||
|
tincr = 2 * M_PI * 110.0 / c->sample_rate; |
||||||
|
/* increment frequency by 110 Hz per second */ |
||||||
|
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; |
||||||
|
|
||||||
|
audio_outbuf_size = 10000; |
||||||
|
audio_outbuf = (uint8_t *)av_malloc(audio_outbuf_size); |
||||||
|
|
||||||
|
/* ugly hack for PCM codecs (will be removed ASAP with new PCM
|
||||||
|
support to compute the input frame size in samples */ |
||||||
|
if (c->frame_size <= 1) { |
||||||
|
audio_input_frame_size = audio_outbuf_size / c->channels; |
||||||
|
switch(st->codec->codec_id) { |
||||||
|
case CODEC_ID_PCM_S16LE: |
||||||
|
case CODEC_ID_PCM_S16BE: |
||||||
|
case CODEC_ID_PCM_U16LE: |
||||||
|
case CODEC_ID_PCM_U16BE: |
||||||
|
audio_input_frame_size >>= 1; |
||||||
|
break; |
||||||
|
default: |
||||||
|
break; |
||||||
|
} |
||||||
|
} else { |
||||||
|
audio_input_frame_size = c->frame_size; |
||||||
|
} |
||||||
|
samples = (int16_t *)av_malloc(audio_input_frame_size * 2 * c->channels); |
||||||
|
} |
||||||
|
|
||||||
CString csFileName; |
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||||
for (int fileI = 1; fileI <= 5; fileI ++) |
'nb_channels' channels */ |
||||||
|
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) |
||||||
{ |
{ |
||||||
csFileName.Format("e:\\pics\\%d.bmp", fileI); |
int j, i, v; |
||||||
file[fileI - 1].Open(csFileName,CFile::modeRead | CFile::typeBinary); |
int16_t *q; |
||||||
nLen = file[fileI - 1].GetLength(); |
|
||||||
|
q = samples; |
||||||
|
for (j = 0; j < frame_size; j++) { |
||||||
|
v = (int)(sin(t) * 10000); |
||||||
|
for(i = 0; i < nb_channels; i++) |
||||||
|
*q++ = v; |
||||||
|
t += tincr; |
||||||
|
tincr += tincr2; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
static void write_audio_frame(AVFormatContext *oc, AVStream *st) |
||||||
|
{ |
||||||
|
AVCodecContext *c; |
||||||
|
AVPacket pkt; |
||||||
|
av_init_packet(&pkt); |
||||||
|
|
||||||
szTxt[fileI -1] = new BYTE[nLen]; |
c = st->codec; |
||||||
file[fileI - 1].Read(szTxt[fileI - 1], nLen); |
|
||||||
file[fileI - 1].Close(); |
|
||||||
|
|
||||||
//BMP bmi;//BITMAPINFO bmi;
|
get_audio_frame(samples, audio_input_frame_size, c->channels); |
||||||
//int nHeadLen = sizeof(BMP);
|
|
||||||
BITMAPFILEHEADER bmpFHeader; |
|
||||||
BITMAPINFOHEADER bmpIHeader; |
|
||||||
memcpy(&bmpFHeader,szTxt[fileI -1],sizeof(BITMAPFILEHEADER)); |
|
||||||
|
|
||||||
int nHeadLen = bmpFHeader.bfOffBits - sizeof(BITMAPFILEHEADER); |
pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples); |
||||||
memcpy(&bmpIHeader,szTxt[fileI - 1]+sizeof(BITMAPFILEHEADER),nHeadLen); |
|
||||||
|
|
||||||
nWidth = bmpIHeader.biWidth;// 464;// bmi.bmpInfo.bmiHeader.biWidth;// ;
|
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE) |
||||||
nHeight = bmpIHeader.biHeight;//362;// bmi.bmpInfo.bmiHeader.biHeight;// ;
|
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); |
||||||
|
pkt.flags |= AV_PKT_FLAG_KEY; |
||||||
|
pkt.stream_index = st->index; |
||||||
|
pkt.data = audio_outbuf; |
||||||
|
|
||||||
szTxt[fileI - 1] += bmpFHeader.bfOffBits; |
/* write the compressed frame in the media file */ |
||||||
nDataLen = nLen-bmpFHeader.bfOffBits; |
if (av_interleaved_write_frame(oc, &pkt) != 0) { |
||||||
|
fprintf(stderr, "Error while writing audio frame\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
} |
} |
||||||
|
|
||||||
av_register_all(); |
static void close_audio(AVFormatContext *oc, AVStream *st) |
||||||
avcodec_register_all(); |
|
||||||
AVFrame *m_pRGBFrame = new AVFrame[1]; //RGB帧数据
|
|
||||||
AVFrame *m_pYUVFrame = new AVFrame[1];; //YUV帧数据
|
|
||||||
AVCodecContext *c= NULL; |
|
||||||
AVCodecContext *in_c= NULL; |
|
||||||
AVCodec *pCodecH264; //编码器
|
|
||||||
uint8_t * yuv_buff;//
|
|
||||||
|
|
||||||
//查找h264编码器
|
|
||||||
pCodecH264 = avcodec_find_encoder(CODEC_ID_H264); |
|
||||||
if(!pCodecH264) |
|
||||||
{ |
{ |
||||||
fprintf(stderr, "h264 codec not found\n"); |
avcodec_close(st->codec); |
||||||
|
|
||||||
|
av_free(samples); |
||||||
|
av_free(audio_outbuf); |
||||||
|
} |
||||||
|
|
||||||
|
/**************************************************************/ |
||||||
|
/* video output */ |
||||||
|
|
||||||
|
static AVFrame *picture, *tmp_picture; |
||||||
|
static uint8_t *video_outbuf; |
||||||
|
static int frame_count, video_outbuf_size; |
||||||
|
|
||||||
|
/* add a video output stream */ |
||||||
|
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) |
||||||
|
{ |
||||||
|
AVCodecContext *c; |
||||||
|
AVStream *st; |
||||||
|
AVCodec *codec; |
||||||
|
|
||||||
|
st = avformat_new_stream(oc, NULL); |
||||||
|
if (!st) { |
||||||
|
fprintf(stderr, "Could not alloc stream\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
|
|
||||||
|
c = st->codec; |
||||||
|
|
||||||
|
/* find the video encoder */ |
||||||
|
codec = avcodec_find_encoder(codec_id); |
||||||
|
if (!codec) { |
||||||
|
fprintf(stderr, "codec not found\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
|
avcodec_get_context_defaults3(c, codec); |
||||||
|
|
||||||
|
c->codec_id = codec_id; |
||||||
|
|
||||||
|
/* put sample parameters */ |
||||||
|
c->bit_rate = /*400000*/3000000; |
||||||
|
/* resolution must be a multiple of two */ |
||||||
|
c->width = /*352*/640; |
||||||
|
c->height = /*288*/480; |
||||||
|
/* time base: this is the fundamental unit of time (in seconds) in terms
|
||||||
|
of which frame timestamps are represented. for fixed-fps content, |
||||||
|
timebase should be 1/framerate and timestamp increments should be |
||||||
|
identically 1. */ |
||||||
|
c->time_base.den = STREAM_FRAME_RATE; |
||||||
|
c->time_base.num = 1; |
||||||
|
c->gop_size = 12; /* emit one intra frame every twelve frames at most */ |
||||||
|
c->pix_fmt = STREAM_PIX_FMT; |
||||||
|
if (c->codec_id == CODEC_ID_MPEG2VIDEO) { |
||||||
|
/* just for testing, we also add B frames */ |
||||||
|
c->max_b_frames = 2; |
||||||
|
} |
||||||
|
if (c->codec_id == CODEC_ID_MPEG1VIDEO){ |
||||||
|
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||||
|
This does not happen with normal video, it just happens here as |
||||||
|
the motion of the chroma plane does not match the luma plane. */ |
||||||
|
c->mb_decision=2; |
||||||
|
} |
||||||
|
// some formats want stream headers to be separate
|
||||||
|
if (oc->oformat->flags & AVFMT_GLOBALHEADER) |
||||||
|
c->flags |= CODEC_FLAG_GLOBAL_HEADER; |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
return st; |
||||||
|
} |
||||||
|
|
||||||
|
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height) |
||||||
|
{ |
||||||
|
AVFrame *picture; |
||||||
|
uint8_t *picture_buf; |
||||||
|
int size; |
||||||
|
|
||||||
|
picture = avcodec_alloc_frame(); |
||||||
|
if (!picture) |
||||||
|
return NULL; |
||||||
|
size = avpicture_get_size(pix_fmt, width, height); |
||||||
|
picture_buf = (uint8_t *)av_malloc(size); |
||||||
|
if (!picture_buf) { |
||||||
|
av_free(picture); |
||||||
|
return NULL; |
||||||
|
} |
||||||
|
avpicture_fill((AVPicture *)picture, picture_buf, |
||||||
|
pix_fmt, width, height); |
||||||
|
return picture; |
||||||
|
} |
||||||
|
|
||||||
|
static void open_video(AVFormatContext *oc, AVStream *st) |
||||||
|
{ |
||||||
|
AVCodec *codec; |
||||||
|
AVCodecContext *c; |
||||||
|
|
||||||
|
c = st->codec; |
||||||
|
|
||||||
|
/* find the video encoder */ |
||||||
|
codec = avcodec_find_encoder(c->codec_id); |
||||||
|
if (!codec) { |
||||||
|
fprintf(stderr, "codec not found\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
|
|
||||||
|
/* open the codec */ |
||||||
|
if (avcodec_open(c, codec) < 0) { |
||||||
|
fprintf(stderr, "could not open codec\n"); |
||||||
exit(1); |
exit(1); |
||||||
|
} |
||||||
|
|
||||||
|
video_outbuf = NULL; |
||||||
|
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) { |
||||||
|
/* allocate output buffer */ |
||||||
|
/* XXX: API change will be done */ |
||||||
|
/* buffers passed into lav* can be allocated any way you prefer,
|
||||||
|
as long as they're aligned enough for the architecture, and |
||||||
|
they're freed appropriately (such as using av_free for buffers |
||||||
|
allocated with av_malloc) */ |
||||||
|
video_outbuf_size = 200000; |
||||||
|
video_outbuf = (uint8_t *)av_malloc(video_outbuf_size); |
||||||
|
} |
||||||
|
|
||||||
|
/* allocate the encoded raw picture */ |
||||||
|
picture = alloc_picture(c->pix_fmt, c->width, c->height); |
||||||
|
if (!picture) { |
||||||
|
fprintf(stderr, "Could not allocate picture\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
|
|
||||||
|
/* if the output format is not YUV420P, then a temporary YUV420P
|
||||||
|
picture is needed too. It is then converted to the required |
||||||
|
output format */ |
||||||
|
tmp_picture = NULL; |
||||||
|
if (c->pix_fmt != PIX_FMT_YUV420P) { |
||||||
|
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height); |
||||||
|
if (!tmp_picture) { |
||||||
|
fprintf(stderr, "Could not allocate temporary picture\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/* prepare a dummy image */ |
||||||
|
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height) |
||||||
|
{ |
||||||
|
int x, y, i; |
||||||
|
|
||||||
|
i = frame_index; |
||||||
|
|
||||||
|
/* Y */ |
||||||
|
for (y = 0; y < height; y++) { |
||||||
|
for (x = 0; x < width; x++) { |
||||||
|
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/* Cb and Cr */ |
||||||
|
for (y = 0; y < height/2; y++) { |
||||||
|
for (x = 0; x < width/2; x++) { |
||||||
|
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; |
||||||
|
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; |
||||||
|
} |
||||||
|
} |
||||||
} |
} |
||||||
|
|
||||||
c= avcodec_alloc_context3(pCodecH264); |
static void write_video_frame(AVFormatContext *oc, AVStream *st) |
||||||
c->bit_rate = 3000000;// put sample parameters
|
|
||||||
c->width =nWidth;//
|
|
||||||
c->height = nHeight;//
|
|
||||||
|
|
||||||
// frames per second
|
|
||||||
AVRational rate; |
|
||||||
rate.num = 1; |
|
||||||
rate.den = 25; |
|
||||||
c->time_base= rate;//(AVRational){1,25};
|
|
||||||
c->gop_size = 10; // emit one intra frame every ten frames
|
|
||||||
c->max_b_frames=1; |
|
||||||
c->thread_count = 1; |
|
||||||
c->pix_fmt = PIX_FMT_YUV420P;//PIX_FMT_RGB24;
|
|
||||||
|
|
||||||
//av_opt_set(c->priv_data, /*"preset"*/"libvpx-1080p.ffpreset", /*"slow"*/NULL, 0);
|
|
||||||
//打开编码器
|
|
||||||
if(avcodec_open2(c,pCodecH264,NULL)<0) |
|
||||||
TRACE("不能打开编码库"); |
|
||||||
|
|
||||||
int size = c->width * c->height; |
|
||||||
|
|
||||||
yuv_buff = (uint8_t *) malloc((size * 3) / 2); // size for YUV 420
|
|
||||||
|
|
||||||
//将rgb图像数据填充rgb帧
|
|
||||||
uint8_t * rgb_buff = new uint8_t[nDataLen]; |
|
||||||
|
|
||||||
//图象编码
|
|
||||||
int outbuf_size=100000; |
|
||||||
uint8_t * outbuf= (uint8_t*)malloc(outbuf_size); |
|
||||||
int u_size = 0; |
|
||||||
FILE *f=NULL; |
|
||||||
char * filename = "e:\\pics\\myData.h264"; |
|
||||||
f = fopen(filename, "wb"); |
|
||||||
if (!f) |
|
||||||
{ |
{ |
||||||
TRACE( "could not open %s\n", filename); |
int out_size, ret; |
||||||
|
AVCodecContext *c; |
||||||
|
static struct SwsContext *img_convert_ctx; |
||||||
|
|
||||||
|
c = st->codec; |
||||||
|
|
||||||
|
if (frame_count >= STREAM_NB_FRAMES) { |
||||||
|
/* no more frame to compress. The codec has a latency of a few
|
||||||
|
frames if using B frames, so we get the last frames by |
||||||
|
passing the same picture again */ |
||||||
|
} else { |
||||||
|
if (c->pix_fmt != PIX_FMT_YUV420P) { |
||||||
|
/* as we only generate a YUV420P picture, we must convert it
|
||||||
|
to the codec pixel format if needed */ |
||||||
|
if (img_convert_ctx == NULL) { |
||||||
|
img_convert_ctx = sws_getContext(c->width, c->height, |
||||||
|
PIX_FMT_YUV420P, |
||||||
|
c->width, c->height, |
||||||
|
c->pix_fmt, |
||||||
|
sws_flags, NULL, NULL, NULL); |
||||||
|
if (img_convert_ctx == NULL) { |
||||||
|
fprintf(stderr, "Cannot initialize the conversion context\n"); |
||||||
exit(1); |
exit(1); |
||||||
|
} |
||||||
|
} |
||||||
|
fill_yuv_image(tmp_picture, frame_count, c->width, c->height); |
||||||
|
sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize, |
||||||
|
0, c->height, picture->data, picture->linesize); |
||||||
|
} else { |
||||||
|
fill_yuv_image(picture, frame_count, c->width, c->height); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
if (oc->oformat->flags & AVFMT_RAWPICTURE) { |
||||||
|
/* raw video case. The API will change slightly in the near
|
||||||
|
future for that. */ |
||||||
|
AVPacket pkt; |
||||||
|
av_init_packet(&pkt); |
||||||
|
|
||||||
|
pkt.flags |= AV_PKT_FLAG_KEY; |
||||||
|
pkt.stream_index = st->index; |
||||||
|
pkt.data = (uint8_t *)picture; |
||||||
|
pkt.size = sizeof(AVPicture); |
||||||
|
|
||||||
|
ret = av_interleaved_write_frame(oc, &pkt); |
||||||
|
} else { |
||||||
|
/* encode the image */ |
||||||
|
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture); |
||||||
|
/* if zero size, it means the image was buffered */ |
||||||
|
if (out_size > 0) { |
||||||
|
AVPacket pkt; |
||||||
|
av_init_packet(&pkt); |
||||||
|
|
||||||
|
if (c->coded_frame->pts != AV_NOPTS_VALUE) |
||||||
|
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); |
||||||
|
if(c->coded_frame->key_frame) |
||||||
|
pkt.flags |= AV_PKT_FLAG_KEY; |
||||||
|
pkt.stream_index = st->index; |
||||||
|
pkt.data = video_outbuf; |
||||||
|
pkt.size = out_size; |
||||||
|
|
||||||
|
// printf("pts %d \n", c->coded_frame->pts);
|
||||||
|
|
||||||
|
/* write the compressed frame in the media file */ |
||||||
|
ret = av_interleaved_write_frame(oc, &pkt); |
||||||
|
} else { |
||||||
|
ret = 0; |
||||||
|
} |
||||||
|
} |
||||||
|
if (ret != 0) { |
||||||
|
fprintf(stderr, "Error while writing video frame\n"); |
||||||
|
exit(1); |
||||||
|
} |
||||||
|
frame_count++; |
||||||
} |
} |
||||||
|
|
||||||
//初始化SwsContext
|
static void close_video(AVFormatContext *oc, AVStream *st) |
||||||
SwsContext * scxt = sws_getContext(c->width,c->height,PIX_FMT_BGR24,c->width,c->height,PIX_FMT_YUV420P,SWS_POINT,NULL,NULL,NULL); |
{ |
||||||
|
avcodec_close(st->codec); |
||||||
|
av_free(picture->data[0]); |
||||||
|
av_free(picture); |
||||||
|
if (tmp_picture) { |
||||||
|
av_free(tmp_picture->data[0]); |
||||||
|
av_free(tmp_picture); |
||||||
|
} |
||||||
|
av_free(video_outbuf); |
||||||
|
} |
||||||
|
|
||||||
|
/**************************************************************/ |
||||||
|
/* media file output */ |
||||||
|
|
||||||
AVPacket avpkt; |
|
||||||
|
|
||||||
//AVFrame *pTFrame=new AVFrame
|
int main(int argc, char **argv) |
||||||
for (int i=0;i<250;++i) |
|
||||||
{ |
{ |
||||||
|
const char *filename; |
||||||
|
AVOutputFormat *fmt; |
||||||
|
AVFormatContext *oc; |
||||||
|
AVStream *audio_st, *video_st; |
||||||
|
double audio_pts, video_pts; |
||||||
|
int i; |
||||||
|
|
||||||
|
/* initialize libavcodec, and register all codecs and formats */ |
||||||
|
av_register_all(); |
||||||
|
|
||||||
|
#if 0 |
||||||
|
if (argc != 2) { |
||||||
|
printf("usage: %s output_file\n" |
||||||
|
"API example program to output a media file with libavformat.\n" |
||||||
|
"The output format is automatically guessed according to the file extension.\n" |
||||||
|
"Raw images can also be output by using '%%d' in the filename\n" |
||||||
|
"\n", argv[0]); |
||||||
|
return 1; |
||||||
|
} |
||||||
|
|
||||||
//AVFrame *m_pYUVFrame = new AVFrame[1];
|
filename = argv[1]; |
||||||
|
#endif |
||||||
|
|
||||||
int index = (i / 25) % 5; |
//#define RTMP_STREAM
|
||||||
memcpy(rgb_buff,szTxt[index],nDataLen); |
#ifdef RTMP_STREAM |
||||||
|
filename = "rtmp://192.168.0.239/live/livestream"; |
||||||
|
#else |
||||||
|
filename = "1.mp4"; |
||||||
|
#endif |
||||||
|
|
||||||
avpicture_fill((AVPicture*)m_pRGBFrame, (uint8_t*)rgb_buff, PIX_FMT_RGB24, nWidth, nHeight); |
/* allocate the output media context */ |
||||||
|
avformat_alloc_output_context2(&oc, NULL, NULL, filename); |
||||||
|
if (!oc) |
||||||
|
{ |
||||||
|
printf("Could not deduce output format from file extension: using MPEG.\n"); |
||||||
|
avformat_alloc_output_context2(&oc, NULL, /*"mpeg"*/"flv", filename); |
||||||
|
} |
||||||
|
|
||||||
//将YUV buffer 填充YUV Frame
|
if (!oc) |
||||||
avpicture_fill((AVPicture*)m_pYUVFrame, (uint8_t*)yuv_buff, PIX_FMT_YUV420P, nWidth, nHeight); |
{ |
||||||
|
return 1; |
||||||
|
} |
||||||
|
|
||||||
|
// 强制指定 264 编码
|
||||||
|
oc->oformat->video_codec = CODEC_ID_H264; |
||||||
|
oc->oformat->audio_codec = CODEC_ID_AAC; |
||||||
|
|
||||||
// 翻转RGB图像
|
fmt = oc->oformat; |
||||||
m_pRGBFrame->data[0] += m_pRGBFrame->linesize[0] * (nHeight - 1); |
|
||||||
m_pRGBFrame->linesize[0] *= -1; |
|
||||||
m_pRGBFrame->data[1] += m_pRGBFrame->linesize[1] * (nHeight / 2 - 1); |
|
||||||
m_pRGBFrame->linesize[1] *= -1; |
|
||||||
m_pRGBFrame->data[2] += m_pRGBFrame->linesize[2] * (nHeight / 2 - 1); |
|
||||||
m_pRGBFrame->linesize[2] *= -1; |
|
||||||
|
|
||||||
|
/* add the audio and video streams using the default format codecs
|
||||||
|
and initialize the codecs */ |
||||||
|
video_st = NULL; |
||||||
|
audio_st = NULL; |
||||||
|
if (fmt->video_codec != CODEC_ID_NONE) { |
||||||
|
video_st = add_video_stream(oc, fmt->video_codec); |
||||||
|
} |
||||||
|
if (fmt->audio_codec != CODEC_ID_NONE) { |
||||||
|
audio_st = add_audio_stream(oc, fmt->audio_codec); |
||||||
|
} |
||||||
|
|
||||||
//将RGB转化为YUV
|
av_dump_format(oc, 0, filename, 1); |
||||||
sws_scale(scxt,m_pRGBFrame->data,m_pRGBFrame->linesize,0,c->height,m_pYUVFrame->data,m_pYUVFrame->linesize); |
|
||||||
|
|
||||||
int got_packet_ptr = 0; |
/* now that all the parameters are set, we can open the audio and
|
||||||
av_init_packet(&avpkt); |
video codecs and allocate the necessary encode buffers */ |
||||||
avpkt.data = outbuf; |
if (video_st) |
||||||
avpkt.size = outbuf_size; |
open_video(oc, video_st); |
||||||
u_size = avcodec_encode_video2(c, &avpkt, m_pYUVFrame, &got_packet_ptr); |
if (audio_st) |
||||||
if (u_size == 0) |
open_audio(oc, audio_st); |
||||||
|
|
||||||
|
/* open the output file, if needed */ |
||||||
|
if (!(fmt->flags & AVFMT_NOFILE)) { |
||||||
|
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) { |
||||||
|
fprintf(stderr, "Could not open '%s'\n", filename); |
||||||
|
return 1; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/* write the stream header, if any */ |
||||||
|
avformat_write_header(oc, NULL); |
||||||
|
picture->pts = 0; |
||||||
|
for(;;) |
||||||
{ |
{ |
||||||
fwrite(avpkt.data, 1, avpkt.size, f); |
/* compute current audio and video time */ |
||||||
|
if (audio_st) |
||||||
|
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; |
||||||
|
else |
||||||
|
audio_pts = 0.0; |
||||||
|
|
||||||
|
if (video_st) |
||||||
|
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; |
||||||
|
else |
||||||
|
video_pts = 0.0; |
||||||
|
|
||||||
|
if ((!audio_st || audio_pts >= STREAM_DURATION) && |
||||||
|
(!video_st || video_pts >= STREAM_DURATION)) |
||||||
|
break; |
||||||
|
|
||||||
|
/* write interleaved audio and video frames */ |
||||||
|
if (!video_st || (video_st && audio_st && audio_pts < video_pts)) { |
||||||
|
write_audio_frame(oc, audio_st); |
||||||
|
|
||||||
|
} else { |
||||||
|
write_video_frame(oc, video_st); |
||||||
|
|
||||||
|
picture->pts++; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/* write the trailer, if any. the trailer must be written
|
||||||
|
* before you close the CodecContexts open when you wrote the |
||||||
|
* header; otherwise write_trailer may try to use memory that |
||||||
|
* was freed on av_codec_close() */ |
||||||
|
av_write_trailer(oc); |
||||||
|
|
||||||
|
/* close each codec */ |
||||||
|
if (video_st) |
||||||
|
close_video(oc, video_st); |
||||||
|
if (audio_st) |
||||||
|
close_audio(oc, audio_st); |
||||||
|
|
||||||
|
/* free the streams */ |
||||||
|
for(i = 0; i < oc->nb_streams; i++) { |
||||||
|
av_freep(&oc->streams[i]->codec); |
||||||
|
av_freep(&oc->streams[i]); |
||||||
} |
} |
||||||
} |
|
||||||
|
|
||||||
fclose(f); |
if (!(fmt->flags & AVFMT_NOFILE)) { |
||||||
delete []m_pRGBFrame; |
/* close the output file */ |
||||||
delete []m_pYUVFrame; |
avio_close(oc->pb); |
||||||
delete []rgb_buff; |
} |
||||||
free(outbuf); |
|
||||||
avcodec_close(c); |
/* free the stream */ |
||||||
av_free(c); |
av_free(oc); |
||||||
|
|
||||||
|
return 0; |
||||||
|
} |
||||||
|
Loading…
Reference in new issue