当前位置: 首页 > 业界动态 > 技术实现 > 本文


ffmpeg_sdk搭建




发布时间: 2012-8-1 13:43:12  

网上有很多编译ffmpeg源码的文章,但是通常我们不研究ffmpeg的话,目的就是直接用ffmpeg:

编程环境:win7 + VC6 + FFmpeg-full-SDK-3.2 ;

1: 下载 FFmpeg-full-SDK-3.2.rar(目前较新的版本),可以到http://www.ffmpeg.com.cn/index.php/%E9%A6%96%E9%A1%B5这里下载;

2:解压后 有 bin、lib、include 三个文件夹,我们可以将解压后的文件夹FFmpeg-full-SDK-3.2放到指定的盘符中;

3:在VC6中,可以这么配置:Tool -> Directoris。。。,然后配置相关设置,(当然也可也将 bin、lib、include 直接放到你的工程目录下);

4:添加相应的.h文件和.lib文件,如下:

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
}
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avdevice.lib")
#pragma comment(lib,"avfilter.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"swscale.lib")

这样,你就可以在VC中调用ffmpeg了;注意:ffmpeg 和 cximage 有的定义有冲突的地方,配置VC的时候,只保留ffmpeg就可以了;

在 FFmpeg中有一个例子:output_example.c,我们下载就需要在当前的VC环境中调试这个例子,当然,ffmpeg源程序文件夹下还有例子,可以搜索“example”关键字,

当然还有很多可以借鉴的c文件,还有一本书《ffmpeg+tutoria》,里面包含了一些例子,可参考;

为了方便在VC中调用,

1:修改 main函数的名字为任何名字,如miantest;当然参数目前先不要变;

2:在VC6中编译是提示有一些错误,不过这些提示都是强制转换的提示,所以添加强制转换就可以了;

3:将bin文件中的dll放到当前工程中,或自己配置一些工程;

4:为了方便应用,我这里定义一个 myffmpeg.h 这个文件:头文件包含 + lib 包含 + output_example.c 的修正内容,配置完成ffmpeg后,添加myffmpeg.h文件到工程,

myffmpeg.h 如下:

#pragma once

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
}
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avdevice.lib")
#pragma comment(lib,"avfilter.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"swscale.lib")


/*
* Libavformat API example: Output a media file in any supported
* libavformat format. The default codecs are used.
*
* Copyright (c) 2003 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include
#include
#include
#include

#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif

#include "libavformat/avformat.h"
#include "libswscale/swscale.h"

#undef exit //?

/* 5 seconds stream duration */
#define STREAM_DURATION 5.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */

static int sws_flags = SWS_BICUBIC;//SWS_BICUBIC?

/**************************************************************/
/* audio output */

float t, tincr, tincr2;
int16_t *samples;
uint8_t *audio_outbuf;
int audio_outbuf_size;
int audio_input_frame_size;

/*
* add an audio output stream
*/
static AVStream *add_audio_stream(AVFormatContext *oc, int codec_id)
{
AVCodecContext *c;
AVStream *st;

st = av_new_stream(oc, 1);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}

c = st->codec;
c->codec_id = (CodecID)codec_id;
c->codec_type = CODEC_TYPE_AUDIO;

/* put sample parameters */
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
return st;
}
//实际上就是初始化
static void open_audio(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVCodec *codec;

c = st->codec;

/* find the audio encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}

/* open it */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}

/* init signal generator */
t = 0;
tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
//上面三行 如何初始化的?
audio_outbuf_size = 10000;
audio_outbuf = (uint8_t*)av_malloc(audio_outbuf_size);

/* ugly hack for PCM codecs (will be removed ASAP with new PCM
support to compute the input frame size in samples */
if (c->frame_size <= 1) {
audio_input_frame_size = audio_outbuf_size / c->channels;
switch(st->codec->codec_id) {
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
case CODEC_ID_PCM_U16BE:
audio_input_frame_size >>= 1;
break;
default:
break;
}
} else {
audio_input_frame_size = c->frame_size;
}
samples = (int16_t*)av_malloc(audio_input_frame_size * 2 * c->channels);
}

/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
'nb_channels' channels */
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
{
int j, i, v;
int16_t *q;

q = samples;
v = (int)(sin(t) * 10000);
for(i = 0; i < nb_channels; i++)
for(j=0;j*q++ = v;
t += tincr;
tincr += tincr2;
}
}
//重点
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVPacket pkt;
av_init_packet(&pkt);

c = st->codec;

get_audio_frame(samples, audio_input_frame_size, c->channels);

pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);

if (c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= audio_outbuf;

/* write the compressed frame in the media file */
if (av_write_frame(oc, &pkt) != 0) {
fprintf(stderr, "Error while writing audio frame\n");
exit(1);
}
}

static void close_audio(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);

av_free(samples);
av_free(audio_outbuf);
}

/**************************************************************/
/* video output */

AVFrame *picture, *tmp_picture;
uint8_t *video_outbuf;
int frame_count, video_outbuf_size;

/* add a video output stream */
static AVStream *add_video_stream(AVFormatContext *oc, int codec_id)
{
AVCodecContext *c;
AVStream *st;

st = av_new_stream(oc, 0);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}

c = st->codec;
c->codec_id = (CodecID)codec_id;
c->codec_type = CODEC_TYPE_VIDEO;

/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == CODEC_ID_MPEG1VIDEO){
/* Needed to avoid using macroblocks in which some coeffs overflow.
This does not happen with normal video, it just happens here as
the motion of the chroma plane does not match the luma plane. */
c->mb_decision=2;
}
// some formats want stream headers to be separate
if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
c->flags |= CODEC_FLAG_GLOBAL_HEADER;

return st;
}

static AVFrame *alloc_picture(int pix_fmt, int width, int height)
{
AVFrame *picture;
uint8_t *picture_buf;
int size;

picture = avcodec_alloc_frame();
if (!picture)
return NULL;
size = avpicture_get_size(pix_fmt, width, height);
picture_buf = (uint8_t *)av_malloc(size);
if (!picture_buf) {
av_free(picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
pix_fmt, width, height);
return picture;
}

static void open_video(AVFormatContext *oc, AVStream *st)
{
AVCodec *codec;
AVCodecContext *c;

c = st->codec;

/* find the video encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}

/* open the codec */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}

video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */
/* XXX: API change will be done */
/* buffers passed into lav* can be allocated any way you prefer,
as long as they're aligned enough for the architecture, and
they're freed appropriately (such as using av_free for buffers
allocated with av_malloc) */
video_outbuf_size = 200000;
video_outbuf = (uint8_t*)av_malloc(video_outbuf_size);
}

/* allocate the encoded raw picture */
picture = alloc_picture(c->pix_fmt, c->width, c->height);
if (!picture) {
fprintf(stderr, "Could not allocate picture\n");
exit(1);
}

/* if the output format is not YUV420P, then a temporary YUV420P
picture is needed too. It is then converted to the required
output format */
tmp_picture = NULL;
if (c->pix_fmt != PIX_FMT_YUV420P) {
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
if (!tmp_picture) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
}

/* prepare a dummy image */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
int x, y, i;

i = frame_index;

/* Y */
for(y=0;yfor(x=0;xpict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
}
}

/* Cb and Cr */
for(y=0;yfor(x=0;xpict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}

static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
int out_size, ret;
AVCodecContext *c;
static struct SwsContext *img_convert_ctx;

c = st->codec;

if (frame_count >= STREAM_NB_FRAMES) {
/* no more frame to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by
passing the same picture again */
} else {
if (c->pix_fmt != PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
to the codec pixel format if needed */
if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(c->width, c->height,
PIX_FMT_YUV420P,
c->width, c->height,
c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
0, c->height, picture->data, picture->linesize);
} else {
fill_yuv_image(picture, frame_count, c->width, c->height);
}
}


if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
AVPacket pkt;
av_init_packet(&pkt);

pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= (uint8_t *)picture;
pkt.size= sizeof(AVPicture);

ret = av_write_frame(oc, &pkt);
} else {
/* encode the image */
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
/* if zero size, it means the image was buffered */
if (out_size > 0) {
AVPacket pkt;
av_init_packet(&pkt);

if (c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
if(c->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= video_outbuf;
pkt.size= out_size;

/* write the compressed frame in the media file */
ret = av_write_frame(oc, &pkt);
} else {
ret = 0;
}
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame\n");
exit(1);
}
frame_count++;
}

static void close_video(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(picture->data[0]);
av_free(picture);
if (tmp_picture) {
av_free(tmp_picture->data[0]);
av_free(tmp_picture);
}
av_free(video_outbuf);
}

/**************************************************************/
/* media file output */

int maintest(int argc, char **argv)//int main(int argc, char **argv)
{
const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *audio_st, *video_st;
double audio_pts, video_pts;
int i;

/* initialize libavcodec, and register all codecs and formats */
av_register_all();
//下面一小段
if (argc != 2) {
printf("usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename\n"
"\n", argv[0]);
exit(1);
}

filename = argv[1];

/* auto detect the output format from the name. default is
mpeg. */
fmt = guess_format(NULL, filename, NULL);
if (!fmt) {
printf("Could not deduce output format from file extension: using MPEG.\n");
fmt = guess_format("mpeg", NULL, NULL);
}
if (!fmt) {
fprintf(stderr, "Could not find suitable output format\n");
exit(1);
}

/* allocate the output media context */
oc = av_alloc_format_context();
if (!oc) {
fprintf(stderr, "Memory error\n");
exit(1);
}
oc->oformat = fmt;
snprintf(oc->filename, sizeof(oc->filename), "%s", filename);//注意下

/* add the audio and video streams using the default format codecs
and initialize the codecs */
video_st = NULL;
audio_st = NULL;
if (fmt->video_codec != CODEC_ID_NONE) {
video_st = add_video_stream(oc, fmt->video_codec);
}
if (fmt->audio_codec != CODEC_ID_NONE) {
audio_st = add_audio_stream(oc, fmt->audio_codec);
}

/* set the output parameters (must be done even if no
parameters). */
if (av_set_parameters(oc, NULL) < 0) {
fprintf(stderr, "Invalid output format parameters\n");
exit(1);
}

dump_format(oc, 0, filename, 1);
//从上面到这儿的部分非常的重要
/* now that all the parameters are set, we can open the audio and
video codecs and allocate the necessary encode buffers */
if (video_st)
open_video(oc, video_st);
if (audio_st)
open_audio(oc, audio_st);

/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
fprintf(stderr, "Could not open '%s'\n", filename);
exit(1);
}
}

/* write the stream header, if any */
av_write_header(oc);

for(;;) {
/* compute current audio and video time */
if (audio_st)
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
else
audio_pts = 0.0;

if (video_st)
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
else
video_pts = 0.0;

if ((!audio_st || audio_pts >= STREAM_DURATION) &&
(!video_st || video_pts >= STREAM_DURATION))
break;

/* write interleaved audio and video frames */
if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
write_audio_frame(oc, audio_st);
} else {
write_video_frame(oc, video_st);
}
}

/* close each codec */
if (video_st)
close_video(oc, video_st);
if (audio_st)
close_audio(oc, audio_st);

/* write the trailer, if any */
av_write_trailer(oc);

/* free the streams */
for(i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]);
}

if (!(fmt->flags & AVFMT_NOFILE)) {
/* close the output file */
url_fclose(oc->pb);
}

/* free the stream */
av_free(oc);

return 0;
}

本文来源:CSDN

 

相关文章推荐:基于RIA和云存储的Web2.0 OS设计与实现

分享到:
阅读:2389次
推荐阅读:

版权所有 © 2011-2016 南京云创大数据科技股份有限公司(股票代码:835305), 保留一切权利。(苏ICP备11060547号-1)  
云创大数据-领先的云存储、大数据、云计算产品供应商