发布网友 发布时间:2022-04-22 10:06
共2个回答
热心网友 时间:2023-10-31 18:00
#include <stdio.h>
#include <windows.h>
#include "string.h"
#include "stdlib.h"
// #include "avcodec.h"
// #include "avformat.h"
// #include "avutil.h"
// #include "flvdec.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"swscale.lib")
bool GetNextFrame(AVFormatContext *pFormatCtx, AVCodecContext *pCodecCtx, int videoStream, AVFrame *pFrame)
{
static AVPacket packet;
static int bytesRemaining=0;
static uint8_t *rawData;
static bool fFirstTime=true;
int bytesDecoded;
int frameFinished;
int ii = 0;
AVPacket packettest;
// First time we're called, set packet.data to NULL to indicate it
// doesn't have to be freed
if(fFirstTime)
{
fFirstTime=false;
packet.data=NULL;
}
// Decode packets until we have decoded a complete frame
while(true)
{
// Work on the current packet until we have decoded all of it
while(bytesRemaining > 0)
{
// Decode the next chunk of data
bytesDecoded=avcodec_decode_video2(pCodecCtx, pFrame,
&frameFinished, &packet);//, rawData, bytesRemaining);
// Was there an error?
if(bytesDecoded < 0)
{
fprintf(stderr, "Error while decoding frame\n");
return false;
}
bytesRemaining-=bytesDecoded;
rawData+=bytesDecoded;
// Did we finish the current frame? Then we can return
if(frameFinished)
return true;
}
// Read the next packet, skipping all packets that aren't for this
// stream
do
{
++ii;
// Free old packet
if(packet.data!=NULL)
av_free_packet(&packet);
// Read new packet
if(/*av_read_packet*/av_read_frame(pFormatCtx, &packet)<0)
goto loop_exit;
memcpy(&packettest, &packet, sizeof(AVPacket));
printf("%d\n", packet.stream_index);
} while(packet.stream_index!=videoStream);
bytesRemaining=packet.size;
rawData=packet.data;
}
loop_exit:
// Decode the rest of the last frame
bytesDecoded=avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet);//, rawData, bytesRemaining);
// Free last packet
if(packet.data!=NULL)
av_free_packet(&packet);
return frameFinished!=0;
}
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
{
FILE *pFile;
char szFilename[32];
int y;
/////////////////////
BITMAPFILEHEADER m_fileHeader;
BITMAPINFOHEADER m_infoHeader;
unsigned int uiTmp, uiTmp2;
unsigned char *ucTmp = NULL;
unsigned char ucRGB;
int i;
uiTmp = (width*3+3)/4*4*height;
uiTmp2 = width*height*3;
//文件标识"BM"(即0x4D42)表示位图
m_fileHeader.bfType = 0x4D42;
//整个文件的大小(单位:字节)
m_fileHeader.bfSize = sizeof(m_fileHeader) + sizeof(m_infoHeader) + uiTmp;
//保留。设置为0
m_fileHeader.bfReserved1 = 0;
//保留。设置为0
m_fileHeader.bfReserved2 = 0;
//从文件开始到位图数据的偏移量(单位:字节)
m_fileHeader.bfOffBits = sizeof(m_fileHeader) + sizeof(m_infoHeader);
//信息头长度(单位:字节)。典型值为28
m_infoHeader.biSize = 0x28;
//位图宽度(单位:像素)
m_infoHeader.biWidth = width;
//位图高度(单位:像素)。若其为正,表示倒向的位图。若为负,表示正向的位图
m_infoHeader.biHeight = height;
//位图的面数(为1)
m_infoHeader.biPlanes = 1;
//每个像素的位数
m_infoHeader.biBitCount = 24;
//压缩说明。0(BI_RGB)表示不压缩
m_infoHeader.biCompression = 0;
//用字节数表示的位图数据的大小(为4的位数)
m_infoHeader.biSizeImage = uiTmp;
//水平分辨率(单位:像素/米)
m_infoHeader.biXPelsPerMeter = 0;
//垂直分辨率(单位:像素/米)
m_infoHeader.biYPelsPerMeter = 0;
//位图使用的颜色数
m_infoHeader.biClrUsed = 0;
//重要的颜色数
m_infoHeader.biClrImportant = 0;
/////////////////////
// Open file
sprintf(szFilename, "frame%d.bmp", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;
// Write header
// fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data
fwrite(&m_fileHeader, sizeof(m_fileHeader), 1, pFile);
fwrite(&m_infoHeader, sizeof(m_infoHeader), 1, pFile);
for(y=height-1; y>=0; y--) {
if(ucTmp != NULL) {
delete []ucTmp;
ucTmp = NULL;
}
ucTmp = new unsigned char[width*3];
memcpy(ucTmp, pFrame->data[0]+y*pFrame->linesize[0], width*3);
for(i = 0; i < width; i++) {
ucRGB = ucTmp[3*i];
ucTmp[3*i] = ucTmp[3*i+2];
ucTmp[3*i+2] = ucRGB;
}
ucRGB = 0;
fwrite(ucTmp, 1, width*3, pFile);
fwrite(&ucRGB, 1, (uiTmp-uiTmp2)/height , pFile);
}
// Close file
fclose(pFile);
}
int main(/*int argc, char *argv[]*/)
{
AVFormatContext *pFormatCtx;
unsigned int i;
int videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameRGB;
int numBytes;
uint8_t *buffer;
AVInputFormat *fmt;
char filename[50];
//printf("please input a filename:\n");
// scanf("%s",filename);
strcpy(filename,"chicken.avi");
// Register all formats and codecs
av_register_all();
//avformat_network_init();
//pFormatCtx=av_alloc_format_context();
// fmt = av_find_input_format("flv");
// pFormatCtx->iformat = fmt;
// Open video file
if(av_open_input_file(&pFormatCtx,filename, /*fmt*/NULL, 0, NULL)!=0)
return -1; // Couldn't open file
// Retrieve stream information
if(av_find_stream_info(pFormatCtx)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
mp_format(pFormatCtx, 0, filename, false);
// Find the first video stream
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
{
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL)
return -1; // Codec not found
// Inform the codec that we can handle truncated bitstreams -- i.e.,
// bitstreams where frame boundaries can fall in the middle of packets
/*
if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;*/
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
return -1; // Could not open codec
// Hack to correct wrong frame rates that seem to be generated by some
// codecs
/*
if(pCodecCtx->frame_rate>1000 && pCodecCtx->frame_rate_base==1)
pCodecCtx->frame_rate_base=1000;*/
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24/*PIX_FMT_YUV420P*/, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t*)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// Read frames and save first five frames to disk
i=0;
while(GetNextFrame(pFormatCtx, pCodecCtx, videoStream, pFrame))
{
// img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, (AVPicture*)pFrame,
// pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
// other codes
static struct SwsContext *img_convert_ctx;
// other codes
img_convert_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width,
pCodecCtx->height,
PIX_FMT_RGB24,
SWS_BICUBIC, NULL,
NULL,
NULL);
// other codes
// Convert the image from its native format to RGB
sws_scale(img_convert_ctx,
(const uint8_t* const*)pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize);
// Save the frame to disk
if(++i<=512)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
}
printf("\n%d",i);
// Free the RGB image
// delete [] buffer;
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
return 0;
}
这是网上找的代码,自己修改了一下,可以读取视频,并把视频中的帧转成对应的bmp图片。解码后的视频本来就是yuv格式的,只需要写入到相应的文件就可以了。
这个程序还需要相应的dll,lib和头文件。不过,这样没有办法传给你。追问请问这位朋友,在这个代码中也包括了您所说的写入相应文件吗?
追答是将视频中的帧按顺序提取出来,然后写成bmp图片的,可以用图片查看器直接打开查看的。如果你想要写成yuv的,在GetNextFrame函数中修改一下就可以了。
这是用vc6.0写的一个工程的主要代码,可以实现提取帧图片的。
热心网友 时间:2023-10-31 18:00
5555555555555555555555555555555555555555