准备
FFmpeg 4.4版本
extern "C"
{
#include "libavformat/avformat.h"
//引入时间
#include "libavutil/time.h"
}
//引入库
#pragma comment(lib,"avformat.lib")
//工具库,包括获取错误信息等
#pragma comment(lib,"avutil.lib")
//编解码的库
#pragma comment(lib,"avcodec.lib")
int avError(int errNum)
{
char buf[1024] = { 0 };
av_strerror(errNum, buf, sizeof(buf));
std::cout << "failed info:" << buf << std::endl;
return -1;
}
int TestH264ToMP4()
{
int videoIndex = -1;
avformat_network_init();
const char* pszFile = "D:/hls/mytest.h264";
const char* pszRTMPURL = "F:/hls/home/test.mp4";
AVFormatContext* pInputAVFormatContext = NULL;
AVOutputFormat* pAVOutputFormat = NULL;
int nRet = avformat_open_input(&pInputAVFormatContext, pszFile, 0, NULL);
if (nRet < 0)
{
return avError(nRet);
}
nRet = avformat_find_stream_info(pInputAVFormatContext, 0);
if (nRet != 0)
{
return avError(nRet);
}
av_dump_format(pInputAVFormatContext, 0, pszFile, 0);
AVFormatContext* pOutputAVFormatContext = NULL;
nRet = avformat_alloc_output_context2(&pOutputAVFormatContext, NULL, "mp4", pszRTMPURL);
if (nRet < 0)
{
return avError(nRet);
}
pAVOutputFormat = pOutputAVFormatContext->oformat;
for (int i = 0; i < pInputAVFormatContext->nb_streams; i++)
{
AVStream* pInputAVStream = pInputAVFormatContext->streams[i];
AVStream* pOutputAVStream = avformat_new_stream(pOutputAVFormatContext, 0);
nRet = avcodec_parameters_copy(pOutputAVStream->codecpar, pInputAVStream->codecpar);
if (nRet < 0)
{
return avError(nRet);
}
pOutputAVStream->codecpar->codec_tag = 0;
}
for (int i = 0; i < pInputAVFormatContext->nb_streams; i++)
{
if (pInputAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoIndex = i;
break;
}
}
av_dump_format(pOutputAVFormatContext, 0, pszRTMPURL, 1);
nRet = avio_open(&pOutputAVFormatContext->pb, pszRTMPURL, AVIO_FLAG_WRITE);
if (nRet < 0)
{
return avError(nRet);
}
nRet = avformat_write_header(pOutputAVFormatContext, NULL);
if (nRet < 0)
{
return avError(nRet);
}
AVPacket pkt;
std::int64_t llStartTime = av_gettime();
std::int64_t llFrameIndex = 0;
while (true)
{
AVStream* pInputStream = NULL;
AVStream* pOutputStream = NULL;
nRet = av_read_frame(pInputAVFormatContext, &pkt);
if (nRet < 0)
{
break;
}
if (pkt.pts == AV_NOPTS_VALUE)
{
AVRational time_base1 = pInputAVFormatContext->streams[videoIndex]->time_base;
std::int64_t llCalcDuration = (double)AV_TIME_BASE / av_q2d(pInputAVFormatContext->streams[videoIndex]->r_frame_rate);
pkt.pts = (double)(llFrameIndex * llCalcDuration) / (double(av_q2d(time_base1)*AV_TIME_BASE));
pkt.dts = pkt.pts;
pkt.duration = (double)llCalcDuration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
}
if (pkt.stream_index == videoIndex)
{
AVRational time_base = pInputAVFormatContext->streams[videoIndex]->time_base;
AVRational time_base_q = { 1, AV_TIME_BASE };
std::int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q);
std::int64_t now_time = av_gettime() - llStartTime;
}
pInputStream = pInputAVFormatContext->streams[pkt.stream_index];
pOutputStream = pOutputAVFormatContext->streams[pkt.stream_index];
pkt.pts = av_rescale_q_rnd(pkt.pts, pInputStream->time_base, pOutputStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, pInputStream->time_base, pOutputStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = (int)av_rescale_q(pkt.duration, pInputStream->time_base, pOutputStream->time_base);
//字节流的位置,-1 表示不知道字节流位置
pkt.pos = -1;
if (pkt.stream_index == videoIndex)
{
llFrameIndex++;
}
nRet = av_interleaved_write_frame(pOutputAVFormatContext, &pkt);
if (nRet < 0) {
printf("发送数据包出错\n");
break;
}
av_packet_unref(&pkt);
}
av_write_trailer(pOutputAVFormatContext);
if (!(pOutputAVFormatContext->oformat->flags & AVFMT_NOFILE))
{
avio_close(pOutputAVFormatContext->pb);
}
avformat_free_context(pOutputAVFormatContext);
avformat_close_input(&pInputAVFormatContext);
return 0;
}