Merge remote-tracking branch 'qatar/master'
* qatar/master: avconv: make -frames work for all types of streams, not just video. bfi: K&R cosmetics bgmc: K&R cleanup rawdec: Set start_time to 0 for raw audio files. Detect 'yuv2' as rawvideo also in avi. rawdec: propagate pict_type information to the output frame rawdec: Support more QT 1bpp rawvideo files. avconv: free bitstream filters threads: limit the number of automatic threads to MAX_AUTO_THREADS avplay: K&R cleanup fate: use rgb24 as output format for dfa tests threads: set thread_count to 1 when thread support is disabled threads: introduce CODEC_CAP_AUTO_THREADS and add it to libx264 Conflicts: ffplay.c libavcodec/avcodec.h libavcodec/pthread.c libavcodec/version.h tests/ref/fate/dfa1 tests/ref/fate/dfa10 tests/ref/fate/dfa11 tests/ref/fate/dfa2 tests/ref/fate/dfa3 tests/ref/fate/dfa4 tests/ref/fate/dfa5 tests/ref/fate/dfa6 tests/ref/fate/dfa7 tests/ref/fate/dfa8 tests/ref/fate/dfa9 Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
30
ffmpeg.c
30
ffmpeg.c
@ -812,6 +812,15 @@ void av_noreturn exit_program(int ret)
|
||||
avformat_free_context(s);
|
||||
av_dict_free(&output_files[i].opts);
|
||||
}
|
||||
for (i = 0; i < nb_output_streams; i++) {
|
||||
AVBitStreamFilterContext *bsfc = output_streams[i].bitstream_filters;
|
||||
while (bsfc) {
|
||||
AVBitStreamFilterContext *next = bsfc->next;
|
||||
av_bitstream_filter_close(bsfc);
|
||||
bsfc = next;
|
||||
}
|
||||
output_streams[i].bitstream_filters = NULL;
|
||||
}
|
||||
for (i = 0; i < nb_input_files; i++) {
|
||||
avformat_close_input(&input_files[i].ctx);
|
||||
}
|
||||
@ -958,8 +967,10 @@ static double get_sync_ipts(const OutputStream *ost)
|
||||
return (double)(ist->pts - of->start_time) / AV_TIME_BASE;
|
||||
}
|
||||
|
||||
static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc)
|
||||
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
{
|
||||
AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
|
||||
AVCodecContext *avctx = ost->st->codec;
|
||||
int ret;
|
||||
|
||||
while (bsfc) {
|
||||
@ -989,6 +1000,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx
|
||||
print_error("av_interleaved_write_frame()", ret);
|
||||
exit_program(1);
|
||||
}
|
||||
ost->frame_number++;
|
||||
}
|
||||
|
||||
static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
|
||||
@ -1188,7 +1200,7 @@ need_realloc:
|
||||
if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
|
||||
pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
write_frame(s, &pkt, enc, ost->bitstream_filters);
|
||||
write_frame(s, &pkt, ost);
|
||||
|
||||
ost->sync_opts += enc->frame_size;
|
||||
}
|
||||
@ -1223,7 +1235,7 @@ need_realloc:
|
||||
if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
|
||||
pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
write_frame(s, &pkt, enc, ost->bitstream_filters);
|
||||
write_frame(s, &pkt, ost);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1325,7 +1337,7 @@ static void do_subtitle_out(AVFormatContext *s,
|
||||
else
|
||||
pkt.pts += 90 * sub->end_display_time;
|
||||
}
|
||||
write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
|
||||
write_frame(s, &pkt, ost);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1471,7 +1483,7 @@ static void do_video_out(AVFormatContext *s,
|
||||
pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
|
||||
write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
|
||||
write_frame(s, &pkt, ost);
|
||||
} else {
|
||||
AVFrame big_picture;
|
||||
|
||||
@ -1519,7 +1531,7 @@ static void do_video_out(AVFormatContext *s,
|
||||
|
||||
if (enc->coded_frame->key_frame)
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
|
||||
write_frame(s, &pkt, ost);
|
||||
*frame_size = ret;
|
||||
video_size += ret;
|
||||
// fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
|
||||
@ -1531,7 +1543,6 @@ static void do_video_out(AVFormatContext *s,
|
||||
}
|
||||
}
|
||||
ost->sync_opts++;
|
||||
ost->frame_number++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1792,7 +1803,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
|
||||
pkt.size = ret;
|
||||
if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
|
||||
pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
|
||||
write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
|
||||
write_frame(os, &pkt, ost);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1876,9 +1887,8 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
||||
opkt.flags |= AV_PKT_FLAG_KEY;
|
||||
}
|
||||
|
||||
write_frame(of->ctx, &opkt, ost->st->codec, ost->bitstream_filters);
|
||||
write_frame(of->ctx, &opkt, ost);
|
||||
ost->st->codec->frame_number++;
|
||||
ost->frame_number++;
|
||||
av_free_packet(&opkt);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user