Fetch the repository succeeded.
This action will force synchronization from 九战梦想/rtmp-push-test, which will overwrite any changes that you have made since you forked the repository, and can not be recovered!!!
Synchronous operation will process in the background and will refresh the page when finishing processing. Please be patient.
#include <stdio.h>
#include "ffrtmp.h"
#if 0
#define AV_Write_Frame av_write_frame
#else
#define AV_Write_Frame av_interleaved_write_frame // AVFormatContext.max_interleave_delta = 10s (default)
#endif
int ff_init_rtmp(ff_rtmp_ctx *myctx,const char *out)
{
if(myctx==NULL||out==NULL)
return -1;
memset(myctx,0,sizeof(ff_rtmp_ctx));
myctx->video_index = myctx->audio_index = -1;
strncpy(myctx->outurl,out,sizeof(myctx->outurl)-1);
// init ffmpeg libs
av_register_all();
avformat_network_init();
// init output context
avformat_alloc_output_context2(&myctx->ofmt_ctx, NULL, "flv", myctx->outurl); //RTMP
if(myctx->ofmt_ctx==NULL)
{
printf("avformat_alloc_output_context2 for %s fail\n",myctx->outurl);
return -4;
}
myctx->ofmt_ctx->max_interleave_delta = 3;
return 0;
}
int ff_exit_rtmp(ff_rtmp_ctx *myctx)
{
printf("ff_exit_rtmp...1\n");
if(myctx->bsf_ctx) {
av_bsf_free(&myctx->bsf_ctx);
}
int i = 0;
for(i=0;i<2;i++)
{
if(myctx->ifmt_ctx[i])
{
avformat_close_input(&myctx->ifmt_ctx[i]);
}
}
/* close output */
if (myctx->ofmt_ctx )
{
if(!(myctx->ofmt_ctx->oformat->flags & AVFMT_NOFILE))
avio_close(myctx->ofmt_ctx->pb);
avformat_free_context(myctx->ofmt_ctx);
}
//reset ff_rtmp_ctx
memset(myctx,0,sizeof(ff_rtmp_ctx));
myctx->video_index = myctx->audio_index = -1;
printf("ff_exit_rtmp...4\n");
return 0;
}
int ff_add_streams_from_file(ff_rtmp_ctx *myctx, const char *infile)
{//support suffixs: .mov, .mkv, .mp4, .h264, .ts, .flv, .aac
if(myctx == NULL || infile==NULL)
return -1;
int ret = 0;
int pos = myctx->infile_num++;
if(myctx->infile_num >=2 || (myctx->video_index>=0 && myctx->audio_index>=0) )
{
printf("infile/instreams num %d >= 2!\n",myctx->infile_num);
return -1;
}
if ((ret = avformat_open_input(&myctx->ifmt_ctx[pos], infile, 0, 0)) < 0) {
printf( "Could not open input file.\n");
return -2;
}
if ((ret = avformat_find_stream_info(myctx->ifmt_ctx[pos], 0)) < 0) {
printf( "Failed to retrieve input stream information\n");
return -3;
}
size_t i = 0;
AVFormatContext *ifmt_ctx = myctx->ifmt_ctx[pos];
for(i=0; i<ifmt_ctx->nb_streams; i++)
{
AVStream *in_stream = ifmt_ctx->streams[i];
if(in_stream->codecpar->codec_type==AVMEDIA_TYPE_VIDEO)
{
if(myctx->video_index >= 0)
{
printf("has video stream added!\n");
return -4;
}
myctx->video_index=i;
AVCodecParameters *video_params = in_stream->codecpar;
printf("Video CodecParameters: type = %d,id = %d,tag = %d,extradata = %p,extradata_size = %d,format = %d,width = %d,height = %d,profile = %d,level = %d,bit_rate = %jd,video_delay = %d\n",
video_params->codec_type,video_params->codec_id,video_params->codec_tag,video_params->extradata,video_params->extradata_size,
video_params->format, video_params->width,video_params->height,
video_params->profile,video_params->level,video_params->bit_rate,video_params->video_delay);
printf("\tSample Aspect Ratio = {%d,%d},bits_per_coded_sample = %d,bits_per_raw_sample = %d,field_order = %d\n",
video_params->sample_aspect_ratio.num,video_params->sample_aspect_ratio.den,
video_params->bits_per_coded_sample,video_params->bits_per_raw_sample,video_params->field_order);
printf("\tColorSpace range = %d,primaries = %d,trc = %d,space = %d,chroma_location = %d\n",
video_params->color_range,video_params->color_primaries,video_params->color_trc,
video_params->color_space,video_params->chroma_location);
int j = 0;
printf("\tExtraData(Hex):");
for(j=0;j<video_params->extradata_size;j++)
{
printf("%02x ",video_params->extradata[j]);
}
printf("\n");
}
else if(in_stream->codecpar->codec_type==AVMEDIA_TYPE_AUDIO)
{
if(myctx->audio_index >= 0)
{
printf("has audio stream added!\n");
return -4;
}
myctx->audio_index = i;
AVCodecParameters *audio_params = in_stream->codecpar;
printf("Audio CodecParameters: chnlayout = %jd,channels = %d,sample_rate = %d,profile = %d,level = %d,bits_per_raw_sample = %d\n",
audio_params->channel_layout, audio_params->channels,audio_params->sample_rate,
audio_params->profile,audio_params->level,audio_params->bits_per_raw_sample);
if(audio_params->codec_id == AV_CODEC_ID_AAC)
{
// init Bitstream Filter: aac_adtstoasc
int ret = 0;
const AVBitStreamFilter *filter = av_bsf_get_by_name("aac_adtstoasc");
if(!filter)
{
av_log(NULL,AV_LOG_ERROR,"Unkonw bitstream filter");
return -4;
}
ret = av_bsf_alloc(filter, &myctx->bsf_ctx);
if(ret < 0)
{
av_log(NULL,AV_LOG_ERROR,"alloc bitstream filter failed!");
return -4;
}
}
}
// FIXME: use {1,1000} timebase
in_stream->time_base.num = 1;
in_stream->time_base.den = 1000;
/*
* Create output AVStream according to input AVStream
*/
AVStream *out_stream = avformat_new_stream(myctx->ofmt_ctx, avcodec_find_encoder(in_stream->codecpar->codec_id));
if (!out_stream) {
printf( "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
return ret;
}
//Copy the settings of AVCodecContext
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
if (ret < 0) {
printf( "Failed to copy context from input to output stream codec context\n");
return ret;
}
if(in_stream->codecpar->codec_type==AVMEDIA_TYPE_VIDEO)
{//Need to manual set r_frame_rate,because it not in AVCodecContext
AVRational frate = av_stream_get_r_frame_rate(in_stream);
av_stream_set_r_frame_rate(out_stream,frate);
#if 0
AVCodecParameters *video_params = out_stream->codecpar;
printf("VO-CodecParameters: type = %d,id = %d,tag = %d,extradata = %p,extradata_size = %d,format = %d,width = %d,height = %d,profile = %d,level = %d,bit_rate = %ld,video_delay = %d\n",
video_params->codec_type,video_params->codec_id,video_params->codec_tag,video_params->extradata,video_params->extradata_size,
video_params->format, video_params->width,video_params->height,
video_params->profile,video_params->level,video_params->bit_rate,video_params->video_delay);
printf("\tSample Aspect Ratio = {%d,%d},bits_per_coded_sample = %d,bits_per_raw_sample = %d,field_order = %d\n",
video_params->sample_aspect_ratio.num,video_params->sample_aspect_ratio.den,
video_params->bits_per_coded_sample,video_params->bits_per_raw_sample,video_params->field_order);
printf("\tColorSpace range = %d,primaries = %d,trc = %d,space = %d,chroma_location = %d\n",
video_params->color_range,video_params->color_primaries,video_params->color_trc,
video_params->color_space,video_params->chroma_location);
#endif
}
out_stream->codecpar->codec_tag = 0;
//if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
// out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
av_dump_format(myctx->ifmt_ctx[pos], 0, infile, 0);
return 0;
}
int ff_start_rtmp(ff_rtmp_ctx *myctx)
{
int ret = 0;
if(myctx->video_index == myctx->audio_index)
{//when video and audio stream from different raw stream file,we deal this!
myctx->audio_index = 1 - myctx->video_index;
}
//Dump Format------------------
av_dump_format(myctx->ofmt_ctx, 0, myctx->outurl, 1);
//Open output URL
AVOutputFormat *ofmt = myctx->ofmt_ctx->oformat;
if (!(ofmt->flags & AVFMT_NOFILE)) {
AVDictionary *options = NULL;
av_dict_set(&options,"rw_timeout","3000000",0);// 3s -> 3000000us
//ret = avio_open(&myctx->ofmt_ctx->pb, myctx->outurl, AVIO_FLAG_WRITE);
ret = avio_open2(&myctx->ofmt_ctx->pb, myctx->outurl, AVIO_FLAG_WRITE,NULL,&options);
AVDictionaryEntry *e = NULL;
while((e=av_dict_get(options,"",NULL,AV_DICT_IGNORE_SUFFIX)))
{
printf("Options %s are unrecognized by avio_open2 !\n",e->key);
}
av_dict_free(&options);
if (ret < 0) {
printf( "Could not open output URL '%s' \n", myctx->outurl);
return -1;
}
}
//Write file header
ret = avformat_write_header(myctx->ofmt_ctx, NULL);
if (ret < 0) {
printf( "Error occurred when opening output URL\n");
return -2;
}
myctx->start_time=av_gettime();
printf("~~~ffrtmp [%ld] ctx start time = %jd\n",time(NULL),myctx->start_time);
return 0;
}
int ff_stop_rtmp(ff_rtmp_ctx *myctx)
{
//flush data buffered within the muxer or flush the interleaving queues.
AV_Write_Frame(myctx->ofmt_ctx, NULL);
printf("Total send %d video frames,%d audio frames to output URL...End!\n",
myctx->video_frames_num,myctx->audio_frames_num);
//Write file trailer
av_write_trailer(myctx->ofmt_ctx);
return 0;
}
int ff_add_264_stream(ff_rtmp_ctx *myctx,int w,int h,int fps,int brate, uint8_t *extra,int extrasize,void *h264ctx,ffrtmp_read_func h264cb)
{
if(myctx==NULL || h264ctx==NULL || h264cb==NULL)
return -1;
/*
* NAL Type:5bit,[3,7] 5 - IDR SLICE, 6 - 补充增强信息单元(SEI), 7 - SPS, 8 - PPS, 9 - 序列结束, 10 - 序列结束
*/
myctx->ctx_h264 = h264ctx;
myctx->h264_func = h264cb;
AVStream *out_stream = avformat_new_stream(myctx->ofmt_ctx, avcodec_find_encoder(AV_CODEC_ID_H264));
if (!out_stream) {
printf( "Failed allocating output stream for H264\n");
return -1;
}
myctx->video_index = myctx->ofmt_ctx->nb_streams-1;
AVCodecParameters *codecpar = out_stream->codecpar;
codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
codecpar->codec_id = AV_CODEC_ID_H264;
codecpar->codec_tag = 0;
codecpar->format = AV_PIX_FMT_YUV420P;
codecpar->width = w;
codecpar->height = h;
codecpar->bit_rate = 0;
codecpar->profile = 66;//Baseline
codecpar->level = 31;
codecpar->bits_per_raw_sample = 8;
codecpar->sample_aspect_ratio.num = 0;//unknown
codecpar->sample_aspect_ratio.den = 1;
codecpar->field_order = AV_FIELD_PROGRESSIVE;
codecpar->chroma_location = AVCHROMA_LOC_LEFT;//H264 4:2:0
codecpar->video_delay = 0;
codecpar->extradata_size = extrasize;
if(extrasize > 0)
{
codecpar->extradata = (uint8_t*)av_malloc(extrasize + AV_INPUT_BUFFER_PADDING_SIZE);
memset(codecpar->extradata,0,extrasize + AV_INPUT_BUFFER_PADDING_SIZE);
memcpy(codecpar->extradata,extra,extrasize);
}
AVRational framerate = {fps,1};
av_stream_set_r_frame_rate(out_stream,framerate);
out_stream->time_base.num = 1;
out_stream->time_base.den = 1000;
out_stream->codecpar->codec_tag = 0;
AVCodecParameters *video_params = codecpar;
printf("Video CodecParameters: type = %d,id = %d,tag = %d,extradata = %p,format = %d,width = %d,height = %d,profile = %d,level = %d,bit_rate = %jd,video_delay = %d\n",
video_params->codec_type,video_params->codec_id,video_params->codec_tag,video_params->extradata,
video_params->format, video_params->width,video_params->height,
video_params->profile,video_params->level,video_params->bit_rate,video_params->video_delay);
printf("\tSample Aspect Ratio = {%d,%d},bits_per_coded_sample = %d,bits_per_raw_sample = %d,field_order = %d\n",
video_params->sample_aspect_ratio.num,video_params->sample_aspect_ratio.den,
video_params->bits_per_coded_sample,video_params->bits_per_raw_sample,video_params->field_order);
printf("\tColorSpace range = %d,primaries = %d,trc = %d,space = %d,chroma_location = %d\n",
video_params->color_range,video_params->color_primaries,video_params->color_trc,
video_params->color_space,video_params->chroma_location);
return 0;
}
int ff_add_aac_stream(ff_rtmp_ctx *myctx,int chns,int samplerate,int bits, void *aactx,ffrtmp_read_func aacb)
{
if(myctx==NULL||aactx==NULL||aacb==NULL)
return -1;
myctx->ctx_aac = aactx;
myctx->aacs_func = aacb;
AVStream *out_stream = avformat_new_stream(myctx->ofmt_ctx, avcodec_find_encoder(AV_CODEC_ID_AAC));
if (!out_stream) {
printf( "Failed allocating output stream for AAC\n");
return -1;
}
myctx->audio_index = myctx->ofmt_ctx->nb_streams-1;
AVCodecParameters *audio_codecpar = out_stream->codecpar;
audio_codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
audio_codecpar->codec_id = AV_CODEC_ID_AAC;
audio_codecpar->codec_tag = 0;
audio_codecpar->channel_layout = (chns==2)?3:4;// 3 - stereo, 4 - mono
audio_codecpar->channels = chns;
audio_codecpar->sample_rate = samplerate;
audio_codecpar->bits_per_coded_sample = bits;
audio_codecpar->bits_per_raw_sample = bits;
audio_codecpar->format = (bits==32)?AV_SAMPLE_FMT_FLTP:AV_SAMPLE_FMT_S16P;
out_stream->time_base.num = 1;
out_stream->time_base.den = 1000;
out_stream->codecpar->codec_tag = 0;
return 0;
}
int ff_add_pcma_stream(ff_rtmp_ctx *myctx,int chns,int samplerate,int bits,void *g711ctx,ffrtmp_read_func g711cb)
{
if(myctx==NULL||g711ctx==NULL||g711cb==NULL)
return -1;
myctx->ctx_g711 = g711ctx;
myctx->g711_func = g711cb;
AVStream *out_stream = avformat_new_stream(myctx->ofmt_ctx, avcodec_find_encoder(AV_CODEC_ID_PCM_ALAW));
if (!out_stream) {
printf( "Failed allocating output stream for PCM_ALAW\n");
return -1;
}
myctx->audio_index = myctx->ofmt_ctx->nb_streams-1;
AVCodecParameters *audio_codecpar = out_stream->codecpar;
audio_codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
audio_codecpar->codec_id = AV_CODEC_ID_PCM_ALAW;
audio_codecpar->codec_tag = 0;
audio_codecpar->channel_layout = (chns==2)?3:4;// 3 - stereo, 4 - mono
audio_codecpar->channels = chns;
audio_codecpar->sample_rate = samplerate;
audio_codecpar->bits_per_coded_sample = bits;
audio_codecpar->bits_per_raw_sample = bits;
audio_codecpar->format = (bits==16)?AV_SAMPLE_FMT_S16:AV_SAMPLE_FMT_U8;
out_stream->time_base.num = 1;
out_stream->time_base.den = 1000;
out_stream->codecpar->codec_tag = 0;
return 0;
}
#if 0
int ff_write_rtmp_frame(ff_rtmp_ctx *myctx)
{
int ret = 0;
AVPacket pkt, apkt;
AVStream *in_stream = NULL, *out_stream = NULL;
int audioindex = myctx->audio_index;
int videoindex = myctx->video_index;
//Get an AVPacket
if(myctx->ctx_g711 && myctx->audio_frames_num < myctx->video_frames_num)
{
char pcmaBuf[640]={0};
int pcmaSize = 0;
int64_t pts = AV_NOPTS_VALUE;
pcmaSize = myctx->g711_func(myctx->ctx_g711,pcmaBuf,sizeof(pcmaBuf),&pts,NULL);
if(pcmaSize < 0)
{
printf("read g711 frame fail,ret = %d\n",pcmaSize);
return -1;
}
else if(pcmaSize == 0)
{
//printf("read g711 frame empty!\n");
return 0;
}
uint8_t *pdata = (uint8_t*)av_malloc(pcmaSize);
memcpy(pdata,pcmaBuf,pcmaSize);
ret = av_packet_from_data(&pkt,pdata,pcmaSize);
if(ret < 0)
{
printf("av_packet_from_data fail,ret = %d\n",ret);
av_free(pdata);
return -1;
}
pkt.pts = pts;
pkt.dts = pts;
pkt.stream_index = audioindex;// audio index
}
else if(myctx->ifmt_ctx[1]!=NULL && myctx->audio_frames_num < myctx->video_frames_num)
{
AVFormatContext *afmt_ctx = myctx->ifmt_ctx[1];
ret = av_read_frame(afmt_ctx,&pkt);
//printf("read audio ret = %d,pkt size = %d,pts = %ld\n",ret,pkt.size,pkt.pts);
if(ret < 0)
{
printf("read audio ret = %d,it's end!\n",ret);
return -2;
}
in_stream = afmt_ctx->streams[0];
pkt.pts = AV_NOPTS_VALUE;//reset it!
pkt.stream_index = audioindex; // Audio Stream
}
else if(myctx->ifmt_ctx[0])
{//video, or video+audio muxer
AVFormatContext *ifmt_ctx = myctx->ifmt_ctx[0];
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
{
printf("read video frame fail,ret = %d\n",ret);
return -3;
}
in_stream = ifmt_ctx->streams[pkt.stream_index];
}
else if(myctx->ctx_h264 && myctx->h264_func)
{
char frame[512*1024]={0};
int keyflag = 0;
int64_t pts = AV_NOPTS_VALUE;
int size = myctx->h264_func(myctx->ctx_h264,frame,sizeof(frame),&pts,&keyflag);
if(size < 0)
{
printf("read video frame fail,ret = %d\n",size);
return -1;
}
else if(size == 0)
{
//printf("read video frame empty!\n");
return 0;
}
uint8_t *pdata = (uint8_t*)av_malloc(size);
memcpy(pdata,frame,size);
ret = av_packet_from_data(&pkt,pdata,size);
if(ret < 0)
{
printf("av_packet_from_data fail,ret = %d\n",ret);
av_free(pdata);
return -1;
}
if(keyflag) pkt.flags = AV_PKT_FLAG_KEY;
pkt.pts = pts;
pkt.dts = pts;
pkt.stream_index = videoindex ;
}
else
{
return -4;
}
int wr_size = pkt.size;
AVFormatContext *ifmt_ctx = myctx->ifmt_ctx[0];
AVFormatContext *ofmt_ctx = myctx->ofmt_ctx;
out_stream = ofmt_ctx->streams[pkt.stream_index];
AVRational video_time_base=ofmt_ctx->streams[videoindex]->time_base;
AVRational video_frate = ofmt_ctx->streams[videoindex]->r_frame_rate;//{25,1};
//printf("video frate index = %d,frate = %d/%d\n",videoindex,video_frate.num,video_frate.den);
if(ifmt_ctx) { video_time_base=ifmt_ctx->streams[videoindex]->time_base; }
//FIX:No PTS (Example: Raw H.264)
//Simple Write PTS
if(pkt.pts==AV_NOPTS_VALUE)
{ //FIXME: Write PTS reference video
//Duration between 2 frames (us)
int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(video_frate);
pkt.pts=(double)(myctx->video_frames_num*calc_duration)/(double)(av_q2d(video_time_base)*AV_TIME_BASE);
//Parameters,unit : stream->time_base
pkt.dts=pkt.pts;
pkt.duration=(double)calc_duration/(double)(av_q2d(video_time_base)*AV_TIME_BASE);
}
//Important:Delay
if(pkt.stream_index==videoindex)
{
AVRational time_base_q={1,AV_TIME_BASE};
int64_t pts_time = av_rescale_q(pkt.dts, video_time_base, time_base_q);
int64_t now_time = av_gettime() - myctx->start_time;
int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(video_frate);
if(myctx->video_frames_num==0) {
printf("video frate index = %d,frate = %d/%d\n",videoindex,video_frate.num,video_frate.den);
printf("video timebase = %d/%d\n",video_time_base.num,video_time_base.den);
printf("video duration = %jd,pkt.dts = %jd\n",calc_duration,pkt.dts);
printf("-----video frame num %d ,pts = %jd,now = %jd\n", myctx->video_frames_num,pts_time,now_time);
}
if (pts_time > now_time)
{
int64_t sleep_us = pts_time - now_time;
if(sleep_us >= 50*1000) {
printf("video frate index = %d,frate = %d/%d\n",videoindex,video_frate.num,video_frate.den);
printf("video timebase = %d/%d\n",video_time_base.num,video_time_base.den);
printf("video duration = %jd,pkt.dts = %jd\n",calc_duration,pkt->dts);
printf("(> 50ms, skip sleep)video frame num %d sleep us = %jd,pts = %jd,now = %jd\n",
myctx->video_frames_num,sleep_us,pts_time,now_time);
} else {
av_usleep(sleep_us);
}
}
}
/* copy packet */
//Convert PTS/DTS, note: in_stream maybe null when we fill AV_Packet ourself!
pkt.pts = av_rescale_q_rnd(pkt.pts, video_time_base/*in_stream->time_base*/, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, video_time_base/*in_stream->time_base*/, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, video_time_base/*in_stream->time_base*/, out_stream->time_base);
pkt.pos = -1;
if(pkt.stream_index==videoindex)
{
myctx->video_frames_num ++;
printf("[video:%d]dts = %jd,duration = %jd,size = %d,data= %p\n", myctx->video_frames_num,pkt.dts,pkt.duration,pkt.size,pkt.data);
//printf("[video:%d]index = %d,pts = %ld,dts = %ld,duration = %ld,size = %d,data= %p,flags = 0x%x,pos = %ld,side_data = %p\n",
// myctx->video_frames_num,pkt.stream_index,pkt.pts,pkt.dts,pkt.duration,pkt.size,pkt.data,pkt.flags,pkt.pos,pkt.side_data);
ret = AV_Write_Frame(ofmt_ctx, &pkt);
//printf("av_[interleaved]_write_frame ret = %d\n",ret);
if (ret < 0) {
printf( "Error muxing packet\n");
av_packet_unref(&pkt);
return -5;
}
}
else
{
myctx->audio_frames_num ++;
if(myctx->ctx_g711)
{//G711 Audio Stream
//printf("[audio: %d] pts = %ld,duration = %ld,size = %d\n",
// myctx->audio_frames_num,pkt.pts,pkt.duration,pkt.size);
ret = AV_Write_Frame(ofmt_ctx, &pkt);
if (ret < 0) {
printf( "Error muxing packet\n");
av_packet_unref(&pkt);
return -5;
}
}
else if(myctx->bsf_ctx && in_stream->codecpar->codec_id==AV_CODEC_ID_AAC)
{// AAC Audio Stream
ret = av_bsf_send_packet(myctx->bsf_ctx,&pkt);
if(ret < 0)
{
av_log(NULL,AV_LOG_ERROR,"send packet to filter failed!");
av_packet_unref(&pkt);
return -5;
}
int cnt = 0;
while((ret = av_bsf_receive_packet(myctx->bsf_ctx,&apkt))==0)
{
//printf("[%d]audio packet pts = %ld,duration = %ld,size = %d\n",cnt++,apkt.pts,apkt.duration,apkt.size);
ret = AV_Write_Frame(ofmt_ctx, &apkt);
if (ret < 0) {
printf( "Error muxing packet\n");
av_packet_unref(&apkt);
break;
}
av_packet_unref(&apkt);
}
}
}
av_packet_unref(&pkt);
return wr_size;
}
#endif
int ff_write_rtmp_frame(ff_rtmp_ctx *myctx, int *got_size)
{
int ret = 0;
//AVPacket pkt, apkt;
AVStream *in_stream = NULL, *out_stream = NULL;
int audioindex = myctx->audio_index;
int videoindex = myctx->video_index;
AVPacket *pkt = av_packet_alloc();
AVPacket *apkt = av_packet_alloc();
int wr_size = 0;
AVFormatContext *ifmt_ctx = NULL;
AVFormatContext *ofmt_ctx = NULL;
AVRational video_time_base;
AVRational video_frate;
//Get an AVPacket
if(myctx->ctx_g711 && myctx->audio_frames_num < myctx->video_frames_num)
{
char pcmaBuf[640]={0};
int pcmaSize = 0;
int64_t pts = AV_NOPTS_VALUE;
uint8_t *pdata = NULL;
pcmaSize = myctx->g711_func(myctx->ctx_g711,pcmaBuf,sizeof(pcmaBuf),&pts,NULL);
if(pcmaSize < 0)
{
printf("read g711 frame fail,ret = %d\n",pcmaSize);
ret = -1;
goto WRITE_END;
}
else if(pcmaSize == 0)
{
//printf("read g711 frame empty!\n");
ret = 0;
goto WRITE_END;
}
pdata = (uint8_t*)av_malloc(pcmaSize);
memcpy(pdata,pcmaBuf,pcmaSize);
ret = av_packet_from_data(pkt,pdata,pcmaSize);
if(ret < 0)
{
printf("av_packet_from_data fail,ret = %d\n",ret);
av_free(pdata);
ret = -1;
goto WRITE_END;
}
pkt->pts = pts;
pkt->dts = pts;
pkt->stream_index = audioindex;// audio index
}
else if(myctx->ifmt_ctx[1]!=NULL && myctx->audio_frames_num < myctx->video_frames_num)
{
AVFormatContext *afmt_ctx = myctx->ifmt_ctx[1];
ret = av_read_frame(afmt_ctx,pkt);
//printf("read audio ret = %d,pkt size = %d,pts = %ld\n",ret,pkt->size,pkt->pts);
if(ret < 0)
{
printf("read audio ret = %d,it's end!\n",ret);
ret = -2;
goto WRITE_END;
}
in_stream = afmt_ctx->streams[0];
pkt->pts = AV_NOPTS_VALUE;//reset it!
pkt->stream_index = audioindex; // Audio Stream
}
else if(myctx->ifmt_ctx[0])
{//video, or video+audio muxer
AVFormatContext *ifmt_ctx = myctx->ifmt_ctx[0];
ret = av_read_frame(ifmt_ctx, pkt);
if (ret < 0)
{
printf("read video frame fail,ret = %d\n",ret);
ret = -3;
goto WRITE_END;
}
in_stream = ifmt_ctx->streams[pkt->stream_index];
}
else if(myctx->ctx_h264 && myctx->h264_func)
{
char frame[512*1024]={0};
int keyflag = 0;
int64_t pts = AV_NOPTS_VALUE;
int size = myctx->h264_func(myctx->ctx_h264,frame,sizeof(frame),&pts,&keyflag);
uint8_t *pdata = NULL;
if(size < 0)
{
printf("read video frame fail,ret = %d\n",size);
ret = -1;
goto WRITE_END;
}
else if(size == 0)
{
//printf("read video frame empty!\n");
ret = 0;
goto WRITE_END;
}
pdata = (uint8_t*)av_malloc(size);
memcpy(pdata,frame,size);
ret = av_packet_from_data(pkt,pdata,size);
if(ret < 0)
{
printf("av_packet_from_data fail,ret = %d\n",ret);
av_free(pdata);
ret = -1;
goto WRITE_END;
}
if(keyflag) pkt->flags = AV_PKT_FLAG_KEY;
pkt->pts = pts;
pkt->dts = pts;
pkt->stream_index = videoindex ;
}
else
{
ret = -4;
goto WRITE_END;
}
wr_size = pkt->size;
ifmt_ctx = myctx->ifmt_ctx[0];
ofmt_ctx = myctx->ofmt_ctx;
out_stream = ofmt_ctx->streams[pkt->stream_index];
video_time_base=ofmt_ctx->streams[videoindex]->time_base;
video_frate = ofmt_ctx->streams[videoindex]->r_frame_rate;//{25,1};
//printf("video frate index = %d,frate = %d/%d\n",videoindex,video_frate.num,video_frate.den);
if(ifmt_ctx) { video_time_base=ifmt_ctx->streams[videoindex]->time_base; }
//FIX:No PTS (Example: Raw H.264)
//Simple Write PTS
if(pkt->pts==AV_NOPTS_VALUE)
{ //FIXME: Write PTS reference video
//Duration between 2 frames (us)
int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(video_frate);
pkt->pts=(double)(myctx->video_frames_num*calc_duration)/(double)(av_q2d(video_time_base)*AV_TIME_BASE);
//Parameters,unit : stream->time_base
pkt->dts=pkt->pts;
pkt->duration=(double)calc_duration/(double)(av_q2d(video_time_base)*AV_TIME_BASE);
}
//Important:Delay
if(pkt->stream_index==videoindex)
{
AVRational time_base_q={1,AV_TIME_BASE};
int64_t pts_time = av_rescale_q(pkt->dts, video_time_base, time_base_q);
int64_t now_time = av_gettime() - myctx->start_time;
int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(video_frate);
if(myctx->video_frames_num==0) {
printf("video frate index = %d,frate = %d/%d\n",videoindex,video_frate.num,video_frate.den);
printf("video timebase = %d/%d\n",video_time_base.num,video_time_base.den);
printf("video duration = %jd,pkt.dts = %jd\n",calc_duration,pkt->dts);
printf("-----video frame num %d ,pts = %jd,now = %jd\n", myctx->video_frames_num,pts_time,now_time);
}
if (pts_time > now_time)
{
int64_t sleep_us = pts_time - now_time;
if(sleep_us >= 50*1000) {
printf("video frate index = %d,frate = %d/%d\n",videoindex,video_frate.num,video_frate.den);
printf("video timebase = %d/%d\n",video_time_base.num,video_time_base.den);
printf("video duration = %jd,pkt.dts = %jd\n",calc_duration,pkt->dts);
printf("(> 50ms, skip sleep)video frame num %d sleep us = %jd,pts = %jd,now = %jd\n",
myctx->video_frames_num,sleep_us,pts_time,now_time);
} else {
av_usleep(sleep_us);
}
}
}
/* copy packet */
//Convert PTS/DTS, note: in_stream maybe null when we fill AV_Packet ourself!
pkt->pts = av_rescale_q_rnd(pkt->pts, video_time_base/*in_stream->time_base*/, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt->dts = av_rescale_q_rnd(pkt->dts, video_time_base/*in_stream->time_base*/, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt->duration = av_rescale_q(pkt->duration, video_time_base/*in_stream->time_base*/, out_stream->time_base);
pkt->pos = -1;
if(pkt->stream_index==videoindex)
{
myctx->video_frames_num ++;
//printf("[video:%d]dts = %jd,duration = %jd,size = %d,data= %p\n", myctx->video_frames_num,pkt->dts,pkt->duration,pkt->size,pkt->data);
//printf("[video:%d]index = %d,pts = %ld,dts = %ld,duration = %ld,size = %d,data= %p,flags = 0x%x,pos = %ld,side_data = %p\n",
// myctx->video_frames_num,pkt->stream_index,pkt->pts,pkt->dts,pkt->duration,pkt->size,pkt->data,pkt->flags,pkt->pos,pkt->side_data);
/*
* -32:管道阻塞,一般是socket错误,我遇到的情况是,推流的服务器主动断开了socket链接,导致发送失败
* -104: 服务器关闭或者退出时,connection reset by peer
* -110: Connection timed out
*/
ret = AV_Write_Frame(ofmt_ctx, pkt);
//printf("av_[interleaved]_write_frame ret = %d\n",ret);
if (ret < 0) {
char errstr[128];
av_strerror(ret,errstr,sizeof(errstr));
printf( "Error muxing write videox packet,ret = %d,str = %s\n",ret,errstr);
if(ret > -5) ret = -5; // 保留原始错误值用于处理
goto WRITE_END;
}
}
else
{
myctx->audio_frames_num ++;
if(myctx->ctx_g711)
{//G711 Audio Stream
//printf("[audio: %d] pts = %ld,duration = %ld,size = %d\n",
// myctx->audio_frames_num,pkt->pts,pkt->duration,pkt->size);
ret = AV_Write_Frame(ofmt_ctx, pkt);
if (ret < 0) {
printf( "Error muxing write G711 packet,ret = %d\n",ret);
if(ret > -5) ret = -5; // 保留原始错误值用于处理
goto WRITE_END;
}
}
else if(myctx->bsf_ctx && in_stream->codecpar->codec_id==AV_CODEC_ID_AAC)
{// AAC Audio Stream
ret = av_bsf_send_packet(myctx->bsf_ctx,pkt);
if(ret < 0)
{
av_log(NULL,AV_LOG_ERROR,"send packet to filter failed!");
ret = -5;
goto WRITE_END;
}
//int cnt = 0;
while((ret = av_bsf_receive_packet(myctx->bsf_ctx,apkt))==0)
{
//printf("[%d]audio packet pts = %ld,duration = %ld,size = %d\n",cnt++,apkt->pts,apkt->duration,apkt->size);
ret = AV_Write_Frame(ofmt_ctx, apkt);
if (ret < 0) {
printf( "Error muxing write aac packet,ret = %d\n",ret);
if(ret > -5) ret = -5;
goto WRITE_END;
}
av_packet_unref(apkt);
}
}
}
WRITE_END:
av_packet_free(&pkt);
av_packet_free(&apkt);
if(got_size!=NULL)
{// 返回实际处理的大小
*got_size = wr_size;
}
return ret;
}
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。