Merge branch 'master' into oldabi
* master: (109 commits) libx264: fix open gop default. Please use -x264opts to force open gop This fixes Ticket268 avfilter picture pool: double free hotfix mpegaudio_parser: be less picky on the start position ppc32: Fix movrel Replace usages of av_get_bits_per_sample_fmt() with av_get_bytes_per_sample(). x86: cabac: fix register constraints for 32-bit mode cabac: move x86 asm to libavcodec/x86/cabac.h x86: h264: cast pointers to intptr_t rather than int x86: h264: remove hardcoded edi in decode_significance_8x8_x86() x86: h264: remove hardcoded esi in decode_significance[_8x8]_x86() x86: h264: remove hardcoded edx in decode_significance[_8x8]_x86() x86: h264: remove hardcoded eax in decode_significance[_8x8]_x86() x86: cabac: change 'a' constraint to 'r' in get_cabac_inline() x86: cabac: remove hardcoded esi in get_cabac_inline() x86: cabac: remove hardcoded edx in get_cabac_inline() x86: cabac: remove unused macro parameter x86: cabac: remove hardcoded ebx in inline asm x86: cabac: remove hardcoded struct offsets from inline asm cabac: remove inline asm under #if 0 cabac: remove BRANCHLESS_CABAC_DECODER switch ... Conflicts: cmdutils.c ffserver.c libavfilter/avfilter.h libavformat/avformat.h libavformat/utils.c libavformat/version.h libavutil/avutil.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
779d7610c7
11
Changelog
11
Changelog
@ -2,7 +2,7 @@ Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
|
||||
version <next>:
|
||||
version 0.7:
|
||||
|
||||
- many many things we forgot because we rather write code than changelogs
|
||||
- libmpcodecs video filter support (3 times as many filters than before)
|
||||
@ -14,17 +14,24 @@ version <next>:
|
||||
- floating-point sample format support to the ac3, eac3, dca, aac, and vorbis decoders.
|
||||
- H264/MPEG frame-level multi-threading
|
||||
- All av_metadata_* functions renamed to av_dict_* and moved to libavutil
|
||||
- 4:4:4 H.264 decoding support
|
||||
- 10-bit H.264 optimizations for x86
|
||||
- lut, lutrgb, and lutyuv filters added
|
||||
- buffersink libavfilter sink added
|
||||
- Bump libswscale for recently reported ABI break
|
||||
|
||||
|
||||
version 0.7_beta2:
|
||||
|
||||
- VP8 frame-multithreading
|
||||
- NEON optimizations for VP8
|
||||
- Lots of deprecated API cruft removed
|
||||
- fft and imdct optimizations for AVX (Sandy Bridge) processors
|
||||
- showinfo filter added
|
||||
- DPX image encoder
|
||||
- SMPTE 302M AES3 audio decoder
|
||||
- Apple Core Audio Format muxer
|
||||
- 9bit and 10bit H.264 decoding
|
||||
- 9bit and 10bit per sample support in the H.264 decoder
|
||||
- 9bit and 10bit FFV1 encoding / decoding
|
||||
- split filter added
|
||||
- select filter added
|
||||
|
118
cmdutils.c
118
cmdutils.c
@ -38,7 +38,8 @@
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/eval.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "cmdutils.h"
|
||||
#include "version.h"
|
||||
#if CONFIG_NETWORK
|
||||
@ -54,6 +55,7 @@ static int opt_name_count;
|
||||
AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
|
||||
AVFormatContext *avformat_opts;
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *format_opts, *video_opts, *audio_opts, *sub_opts;
|
||||
|
||||
static const int this_year = 2011;
|
||||
|
||||
@ -86,6 +88,10 @@ void uninit_opts(void)
|
||||
av_freep(&opt_names);
|
||||
av_freep(&opt_values);
|
||||
opt_name_count = 0;
|
||||
av_dict_free(&format_opts);
|
||||
av_dict_free(&video_opts);
|
||||
av_dict_free(&audio_opts);
|
||||
av_dict_free(&sub_opts);
|
||||
}
|
||||
|
||||
void log_callback_help(void* ptr, int level, const char* fmt, va_list vl)
|
||||
@ -290,6 +296,43 @@ unknown_opt:
|
||||
}
|
||||
}
|
||||
|
||||
#define FLAGS (o->type == FF_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0
|
||||
#define SET_PREFIXED_OPTS(ch, flag, output) \
|
||||
if (opt[0] == ch && avcodec_opts[0] && (o = av_opt_find(avcodec_opts[0], opt+1, NULL, flag, 0)))\
|
||||
av_dict_set(&output, opt+1, arg, FLAGS);
|
||||
static int opt_default2(const char *opt, const char *arg)
|
||||
{
|
||||
const AVOption *o;
|
||||
if ((o = av_opt_find(avcodec_opts[0], opt, NULL, 0, AV_OPT_SEARCH_CHILDREN))) {
|
||||
if (o->flags & AV_OPT_FLAG_VIDEO_PARAM)
|
||||
av_dict_set(&video_opts, opt, arg, FLAGS);
|
||||
if (o->flags & AV_OPT_FLAG_AUDIO_PARAM)
|
||||
av_dict_set(&audio_opts, opt, arg, FLAGS);
|
||||
if (o->flags & AV_OPT_FLAG_SUBTITLE_PARAM)
|
||||
av_dict_set(&sub_opts, opt, arg, FLAGS);
|
||||
} else if ((o = av_opt_find(avformat_opts, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN)))
|
||||
av_dict_set(&format_opts, opt, arg, FLAGS);
|
||||
else if ((o = av_opt_find(sws_opts, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN))) {
|
||||
// XXX we only support sws_flags, not arbitrary sws options
|
||||
int ret = av_set_string3(sws_opts, opt, arg, 1, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error setting option %s.\n", opt);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (!o) {
|
||||
SET_PREFIXED_OPTS('v', AV_OPT_FLAG_VIDEO_PARAM, video_opts)
|
||||
SET_PREFIXED_OPTS('a', AV_OPT_FLAG_AUDIO_PARAM, audio_opts)
|
||||
SET_PREFIXED_OPTS('s', AV_OPT_FLAG_SUBTITLE_PARAM, sub_opts)
|
||||
}
|
||||
|
||||
if (o)
|
||||
return 0;
|
||||
fprintf(stderr, "Unrecognized option '%s'\n", opt);
|
||||
return AVERROR_OPTION_NOT_FOUND;
|
||||
}
|
||||
|
||||
int opt_default(const char *opt, const char *arg){
|
||||
int type;
|
||||
int ret= 0;
|
||||
@ -322,7 +365,7 @@ int opt_default(const char *opt, const char *arg){
|
||||
goto out;
|
||||
|
||||
for(type=0; *avcodec_opts && type<AVMEDIA_TYPE_NB && ret>= 0; type++){
|
||||
const AVOption *o2 = av_find_opt(avcodec_opts[0], opt, NULL, opt_types[type], opt_types[type]);
|
||||
const AVOption *o2 = av_opt_find(avcodec_opts[0], opt, NULL, opt_types[type], 0);
|
||||
if(o2)
|
||||
ret = av_set_string3(avcodec_opts[type], opt, arg, 1, &o);
|
||||
}
|
||||
@ -350,6 +393,9 @@ int opt_default(const char *opt, const char *arg){
|
||||
}
|
||||
|
||||
out:
|
||||
if ((ret = opt_default2(opt, arg)) < 0)
|
||||
return ret;
|
||||
|
||||
// av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL));
|
||||
|
||||
opt_values= av_realloc(opt_values, sizeof(void*)*(opt_name_count+1));
|
||||
@ -880,71 +926,3 @@ FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
|
||||
static int ffsink_init(AVFilterContext *ctx, const char *args, void *opaque)
|
||||
{
|
||||
FFSinkContext *priv = ctx->priv;
|
||||
|
||||
if (!opaque)
|
||||
return AVERROR(EINVAL);
|
||||
*priv = *(FFSinkContext *)opaque;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void null_end_frame(AVFilterLink *inlink) { }
|
||||
|
||||
static int ffsink_query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
FFSinkContext *priv = ctx->priv;
|
||||
enum PixelFormat pix_fmts[] = { priv->pix_fmt, PIX_FMT_NONE };
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVFilter ffsink = {
|
||||
.name = "ffsink",
|
||||
.priv_size = sizeof(FFSinkContext),
|
||||
.init = ffsink_init,
|
||||
|
||||
.query_formats = ffsink_query_formats,
|
||||
|
||||
.inputs = (AVFilterPad[]) {{ .name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.end_frame = null_end_frame,
|
||||
.min_perms = AV_PERM_READ, },
|
||||
{ .name = NULL }},
|
||||
.outputs = (AVFilterPad[]) {{ .name = NULL }},
|
||||
};
|
||||
|
||||
int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
|
||||
AVFilterBufferRef **picref_ptr, AVRational *tb)
|
||||
{
|
||||
int ret;
|
||||
AVFilterBufferRef *picref;
|
||||
*picref_ptr = NULL;
|
||||
|
||||
if ((ret = avfilter_request_frame(ctx->inputs[0])) < 0)
|
||||
return ret;
|
||||
if (!(picref = ctx->inputs[0]->cur_buf))
|
||||
return AVERROR(ENOENT);
|
||||
*picref_ptr = picref;
|
||||
ctx->inputs[0]->cur_buf = NULL;
|
||||
*tb = ctx->inputs[0]->time_base;
|
||||
|
||||
memcpy(frame->data, picref->data, sizeof(frame->data));
|
||||
memcpy(frame->linesize, picref->linesize, sizeof(frame->linesize));
|
||||
frame->pkt_pos = picref->pos;
|
||||
frame->interlaced_frame = picref->video->interlaced;
|
||||
frame->top_field_first = picref->video->top_field_first;
|
||||
frame->key_frame = picref->video->key_frame;
|
||||
frame->pict_type = picref->video->pict_type;
|
||||
frame->sample_aspect_ratio = picref->video->sample_aspect_ratio;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_AVFILTER */
|
||||
|
16
cmdutils.h
16
cmdutils.h
@ -47,6 +47,7 @@ extern const char **opt_names;
|
||||
extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
|
||||
extern AVFormatContext *avformat_opts;
|
||||
extern struct SwsContext *sws_opts;
|
||||
extern AVDictionary *format_opts, *video_opts, *audio_opts, *sub_opts;
|
||||
|
||||
/**
|
||||
* Initialize the cmdutils option system, in particular
|
||||
@ -259,19 +260,4 @@ int read_file(const char *filename, char **bufptr, size_t *size);
|
||||
FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
const char *preset_name, int is_path, const char *codec_name);
|
||||
|
||||
typedef struct {
|
||||
enum PixelFormat pix_fmt;
|
||||
} FFSinkContext;
|
||||
|
||||
extern AVFilter ffsink;
|
||||
|
||||
/**
|
||||
* Extract a frame from sink.
|
||||
*
|
||||
* @return a negative error in case of failure, 1 if one frame has
|
||||
* been extracted successfully.
|
||||
*/
|
||||
int get_filtered_video_frame(AVFilterContext *sink, AVFrame *frame,
|
||||
AVFilterBufferRef **picref, AVRational *pts_tb);
|
||||
|
||||
#endif /* FFMPEG_CMDUTILS_H */
|
||||
|
3
configure
vendored
3
configure
vendored
@ -1500,6 +1500,7 @@ frei0r_src_filter_deps="frei0r dlopen strtok_r"
|
||||
hqdn3d_filter_deps="gpl"
|
||||
movie_filter_deps="avcodec avformat"
|
||||
mp_filter_deps="gpl avcodec"
|
||||
negate_filter_deps="lut_filter"
|
||||
ocv_filter_deps="libopencv"
|
||||
scale_filter_deps="swscale"
|
||||
yadif_filter_deps="gpl"
|
||||
@ -2099,6 +2100,7 @@ elif $cc -v 2>&1 | grep -q 'PathScale\|Path64'; then
|
||||
AS_DEPFLAGS='-MMD -MF $(@:.o=.d) -MT $@'
|
||||
speed_cflags='-O2'
|
||||
size_cflags='-Os'
|
||||
filter_cflags='filter_out -Wdisabled-optimization'
|
||||
elif $cc -v 2>&1 | grep -q Open64; then
|
||||
cc_type=open64
|
||||
cc_version=__OPEN64__
|
||||
@ -2107,6 +2109,7 @@ elif $cc -v 2>&1 | grep -q Open64; then
|
||||
AS_DEPFLAGS='-MMD -MF $(@:.o=.d) -MT $@'
|
||||
speed_cflags='-O2'
|
||||
size_cflags='-Os'
|
||||
filter_cflags='filter_out -Wdisabled-optimization|-Wtype-limits|-fno-signed-zeros'
|
||||
fi
|
||||
|
||||
test -n "$cc_type" && enable $cc_type ||
|
||||
|
@ -7,12 +7,32 @@ libavdevice: 2011-04-18
|
||||
libavfilter: 2011-04-18
|
||||
libavformat: 2011-04-18
|
||||
libpostproc: 2011-04-18
|
||||
libswscale: 2011-04-18
|
||||
libswscale: 2011-06-20
|
||||
libavutil: 2011-04-18
|
||||
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
2011-06-19 - xxxxxxx - lavfi 2.23.0 - avfilter.h
|
||||
Add layout negotiation fields and helper functions.
|
||||
|
||||
In particular, add in_chlayouts and out_chlayouts to AVFilterLink,
|
||||
and the functions:
|
||||
avfilter_set_common_sample_formats()
|
||||
avfilter_set_common_channel_layouts()
|
||||
avfilter_all_channel_layouts()
|
||||
|
||||
2011-06-19 - xxxxxxx - lavfi 2.22.0 - AVFilterFormats
|
||||
Change type of AVFilterFormats.formats from int * to int64_t *,
|
||||
and update formats handling API accordingly.
|
||||
|
||||
avfilter_make_format_list() still takes a int32_t array and converts
|
||||
it to int64_t. A new function, avfilter_make_format64_list(), that
|
||||
takes int64_t arrays has been added.
|
||||
|
||||
2011-06-19 - xxxxxxx - lavfi 2.21.0 - vsink_buffer.h
|
||||
Add video sink buffer and vsink_buffer.h public header.
|
||||
|
||||
2011-06-12 - xxxxxxx - lavfi 2.18.0 - avcodec.h
|
||||
Add avfilter_get_video_buffer_ref_from_frame() function in
|
||||
libavfilter/avcodec.h.
|
||||
@ -23,6 +43,16 @@ API changes, most recent first:
|
||||
2011-06-12 - xxxxxxx - lavfi 2.16.0 - avfilter_graph_parse()
|
||||
Change avfilter_graph_parse() signature.
|
||||
|
||||
2011-06-xx - xxxxxxx - lavf 53.2.0 - avformat.h
|
||||
Add avformat_open_input and avformat_write_header().
|
||||
Deprecate av_open_input_stream, av_open_input_file,
|
||||
AVFormatParameters and av_write_header.
|
||||
|
||||
2011-06-xx - xxxxxxx - lavu 51.7.0 - opt.h
|
||||
Add av_opt_set_dict() and av_opt_find().
|
||||
Deprecate av_find_opt().
|
||||
Add AV_DICT_APPEND flag.
|
||||
|
||||
2011-06-xx - xxxxxxx - lavu 51.6.0 - opt.h
|
||||
Add av_opt_flag_is_set().
|
||||
|
||||
|
52
doc/RELEASE_NOTES
Normal file
52
doc/RELEASE_NOTES
Normal file
@ -0,0 +1,52 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 0.7 "Love and Peace" June, 2011
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This release enables frame-based multithreaded decoding for a number of codecs,
|
||||
including VP8, H.263 and H.264. Additionally, there has been a major cleanup of
|
||||
both internal and external APIs. For this reason, the major versions of all
|
||||
libraries have been bumped. On the one hand, this means that 0.7 can be installed
|
||||
side-by-side with previous releases, on the other hand, in order to benefit
|
||||
from the new features, applications need to be recompiled.
|
||||
|
||||
Other important changes are additions of decoders including, but not limited to,
|
||||
AMR-WB, single stream LATM/LOAS, G.722 ADPCM, a native VP8 decoder
|
||||
and HE-AACv2. Additionally, many new de/muxers such as WebM in Matroska, Apple
|
||||
HTTP Live Streaming, SAP, IEC 61937 (S/PDIF) have been added.
|
||||
|
||||
See the Changelog file for a list of significant changes.
|
||||
|
||||
Please note that our policy on bug reports has not changed. We still only accept
|
||||
bug reports against HEAD of the FFmpeg trunk repository. If you are experiencing
|
||||
issues with any formally released version of FFmpeg, please try a current version
|
||||
of the development code to check if the issue still exists. If it does, make your
|
||||
report against the development code following the usual bug reporting guidelines.
|
||||
|
||||
|
||||
API changes
|
||||
-----------
|
||||
|
||||
Please see the file doc/APIchanges for programmer-centric information. Note that a
|
||||
lot of long-time deprecated APIs have been removed. Also, a number of additional
|
||||
APIs have been deprecated and are scheduled for removal in the next release.
|
||||
|
||||
|
||||
Other notable changes
|
||||
---------------------
|
||||
|
||||
- many ARM NEON optimizations
|
||||
- libswscale cleanup started, optimizations should become easier in the future
|
||||
- nonfree libfaad support for AAC decoding removed
|
||||
- 4:4:4 H.264 decoding
|
||||
- 9/10bit H.264 decoding
|
||||
- Win64 Assembler support
|
||||
- native MMSH/MMST support
|
||||
- Windows TV demuxing
|
||||
- native AMR-WB decoding
|
||||
- native GSM-MS decoding
|
||||
- SMPTE 302M decoding
|
||||
- AVS encoding
|
@ -169,7 +169,6 @@ Set frame rate (Hz value, fraction or abbreviation), (default = 25).
|
||||
Set frame size. The format is @samp{wxh} (ffserver default = 160x128).
|
||||
There is no default for input streams,
|
||||
for output streams it is set by default to the size of the source stream.
|
||||
If the input file has video streams with different resolutions, the behaviour is undefined.
|
||||
The following abbreviations are recognized:
|
||||
@table @samp
|
||||
@item sqcif
|
||||
@ -557,10 +556,8 @@ The timestamps must be specified in ascending order.
|
||||
@item -aframes @var{number}
|
||||
Set the number of audio frames to record.
|
||||
@item -ar @var{freq}
|
||||
Set the audio sampling frequency. For input streams it is set by
|
||||
default to 44100 Hz, for output streams it is set by default to the
|
||||
frequency of the input stream. If the input file has audio streams
|
||||
with different frequencies, the behaviour is undefined.
|
||||
Set the audio sampling frequency. there is no default for input streams,
|
||||
for output streams it is set by default to the frequency of the input stream.
|
||||
@item -ab @var{bitrate}
|
||||
Set the audio bitrate in bit/s (default = 64k).
|
||||
@item -aq @var{q}
|
||||
@ -568,8 +565,7 @@ Set the audio quality (codec-specific, VBR).
|
||||
@item -ac @var{channels}
|
||||
Set the number of audio channels. For input streams it is set by
|
||||
default to 1, for output streams it is set by default to the same
|
||||
number of audio channels in input. If the input file has audio streams
|
||||
with different channel count, the behaviour is undefined.
|
||||
number of audio channels in input.
|
||||
@item -an
|
||||
Disable audio recording.
|
||||
@item -acodec @var{codec}
|
||||
|
132
doc/filters.texi
132
doc/filters.texi
@ -701,6 +701,118 @@ a float number which specifies chroma temporal strength, defaults to
|
||||
@var{luma_tmp}*@var{chroma_spatial}/@var{luma_spatial}
|
||||
@end table
|
||||
|
||||
@section lut, lutrgb, lutyuv
|
||||
|
||||
Compute a look-up table for binding each pixel component input value
|
||||
to an output value, and apply it to input video.
|
||||
|
||||
@var{lutyuv} applies a lookup table to a YUV input video, @var{lutrgb}
|
||||
to an RGB input video.
|
||||
|
||||
These filters accept in input a ":"-separated list of options, which
|
||||
specify the expressions used for computing the lookup table for the
|
||||
corresponding pixel component values.
|
||||
|
||||
The @var{lut} filter requires either YUV or RGB pixel formats in
|
||||
input, and accepts the options:
|
||||
@table @option
|
||||
@var{c0} (first pixel component)
|
||||
@var{c1} (second pixel component)
|
||||
@var{c2} (third pixel component)
|
||||
@var{c3} (fourth pixel component, corresponds to the alpha component)
|
||||
@end table
|
||||
|
||||
The exact component associated to each option depends on the format in
|
||||
input.
|
||||
|
||||
The @var{lutrgb} filter requires RGB pixel formats in input, and
|
||||
accepts the options:
|
||||
@table @option
|
||||
@var{r} (red component)
|
||||
@var{g} (green component)
|
||||
@var{b} (blue component)
|
||||
@var{a} (alpha component)
|
||||
@end table
|
||||
|
||||
The @var{lutyuv} filter requires YUV pixel formats in input, and
|
||||
accepts the options:
|
||||
@table @option
|
||||
@var{y} (Y/luminance component)
|
||||
@var{u} (U/Cb component)
|
||||
@var{v} (V/Cr component)
|
||||
@var{a} (alpha component)
|
||||
@end table
|
||||
|
||||
The expressions can contain the following constants and functions:
|
||||
|
||||
@table @option
|
||||
@item E, PI, PHI
|
||||
the corresponding mathematical approximated values for e
|
||||
(euler number), pi (greek PI), PHI (golden ratio)
|
||||
|
||||
@item w, h
|
||||
the input width and heigth
|
||||
|
||||
@item val
|
||||
input value for the pixel component
|
||||
|
||||
@item clipval
|
||||
the input value clipped in the @var{minval}-@var{maxval} range
|
||||
|
||||
@item maxval
|
||||
maximum value for the pixel component
|
||||
|
||||
@item minval
|
||||
minimum value for the pixel component
|
||||
|
||||
@item negval
|
||||
the negated value for the pixel component value clipped in the
|
||||
@var{minval}-@var{maxval} range , it corresponds to the expression
|
||||
"maxval-clipval+minval"
|
||||
|
||||
@item clip(val)
|
||||
the computed value in @var{val} clipped in the
|
||||
@var{minval}-@var{maxval} range
|
||||
|
||||
@item gammaval(gamma)
|
||||
the computed gamma correction value of the pixel component value
|
||||
clipped in the @var{minval}-@var{maxval} range, corresponds to the
|
||||
expression
|
||||
"pow((clipval-minval)/(maxval-minval)\,@var{gamma})*(maxval-minval)+minval"
|
||||
|
||||
@end table
|
||||
|
||||
All expressions default to "val".
|
||||
|
||||
Some examples follow:
|
||||
@example
|
||||
# negate input video
|
||||
lutrgb="r=maxval+minval-val:g=maxval+minval-val:b=maxval+minval-val"
|
||||
lutyuv="y=maxval+minval-val:u=maxval+minval-val:v=maxval+minval-val"
|
||||
|
||||
# the above is the same as
|
||||
lutrgb="r=negval:g=negval:b=negval"
|
||||
lutyuv="y=negval:u=negval:v=negval"
|
||||
|
||||
# negate luminance
|
||||
lutyuv=negval
|
||||
|
||||
# remove chroma components, turns the video into a graytone image
|
||||
lutyuv="u=128:v=128"
|
||||
|
||||
# apply a luma burning effect
|
||||
lutyuv="y=2*val"
|
||||
|
||||
# remove green and blue components
|
||||
lutrgb="g=0:b=0"
|
||||
|
||||
# set a constant alpha channel value on input
|
||||
format=rgba,lutrgb=a="maxval-minval/2"
|
||||
|
||||
# correct luminance gamma by a 0.5 factor
|
||||
lutyuv=y=gammaval(0.5)
|
||||
@end example
|
||||
|
||||
@section mp
|
||||
|
||||
Apply an MPlayer filter to the input video.
|
||||
@ -799,6 +911,13 @@ mp=hue=100:-10
|
||||
|
||||
See also mplayer(1), @url{http://www.mplayerhq.hu/}.
|
||||
|
||||
@section negate
|
||||
|
||||
Negate input video.
|
||||
|
||||
This filter accepts an integer in input, if non-zero it negates the
|
||||
alpha component (if available). The default value in input is 0.
|
||||
|
||||
@section noformat
|
||||
|
||||
Force libavfilter not to use any of the specified pixel formats for the
|
||||
@ -1858,6 +1977,19 @@ frei0r_src=200x200:10:partik0l=1234 [overlay]; [in][overlay] overlay
|
||||
|
||||
Below is a description of the currently available video sinks.
|
||||
|
||||
@section buffersink
|
||||
|
||||
Buffer video frames, and make them available to the end of the filter
|
||||
graph.
|
||||
|
||||
This sink is mainly intended for a programmatic use, in particular
|
||||
through the interface defined in @file{libavfilter/vsink_buffer.h}.
|
||||
|
||||
It does not require a string parameter in input, but you need to
|
||||
specify a pointer to a list of supported pixel formats terminated by
|
||||
-1 in the opaque parameter provided to @code{avfilter_init_filter}
|
||||
when initializing this sink.
|
||||
|
||||
@section nullsink
|
||||
|
||||
Null video sink, do absolutely nothing with the input video. It is
|
||||
|
91
ffmpeg.c
91
ffmpeg.c
@ -51,6 +51,7 @@
|
||||
# include "libavfilter/avcodec.h"
|
||||
# include "libavfilter/avfilter.h"
|
||||
# include "libavfilter/avfiltergraph.h"
|
||||
# include "libavfilter/vsink_buffer.h"
|
||||
# include "libavfilter/vsrc_buffer.h"
|
||||
#endif
|
||||
|
||||
@ -124,9 +125,7 @@ static int nb_input_codecs = 0;
|
||||
static int nb_input_files_ts_scale[MAX_FILES] = {0};
|
||||
|
||||
static AVFormatContext *output_files[MAX_FILES];
|
||||
static AVCodec **output_codecs = NULL;
|
||||
static int nb_output_files = 0;
|
||||
static int nb_output_codecs = 0;
|
||||
|
||||
static AVStreamMap *stream_maps = NULL;
|
||||
static int nb_stream_maps;
|
||||
@ -278,6 +277,8 @@ typedef struct AVOutputStream {
|
||||
struct AVInputStream *sync_ist; /* input stream to sync against */
|
||||
int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
|
||||
AVBitStreamFilterContext *bitstream_filters;
|
||||
AVCodec *enc;
|
||||
|
||||
/* video only */
|
||||
int video_resample;
|
||||
AVFrame resample_frame; /* temporary frame for image resampling */
|
||||
@ -365,7 +366,7 @@ static int configure_video_filters(AVInputStream *ist, AVOutputStream *ost)
|
||||
/** filter graph containing all filters including input & output */
|
||||
AVCodecContext *codec = ost->st->codec;
|
||||
AVCodecContext *icodec = ist->st->codec;
|
||||
FFSinkContext ffsink_ctx = { .pix_fmt = codec->pix_fmt };
|
||||
enum PixelFormat pix_fmts[] = { codec->pix_fmt, PIX_FMT_NONE };
|
||||
AVRational sample_aspect_ratio;
|
||||
char args[255];
|
||||
int ret;
|
||||
@ -385,8 +386,8 @@ static int configure_video_filters(AVInputStream *ist, AVOutputStream *ost)
|
||||
"src", args, NULL, ost->graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = avfilter_graph_create_filter(&ost->output_video_filter, &ffsink,
|
||||
"out", NULL, &ffsink_ctx, ost->graph);
|
||||
ret = avfilter_graph_create_filter(&ost->output_video_filter, avfilter_get_by_name("buffersink"),
|
||||
"out", NULL, pix_fmts, ost->graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_filter = ost->input_video_filter;
|
||||
@ -556,7 +557,6 @@ static int ffmpeg_exit(int ret)
|
||||
|
||||
av_free(streamid_map);
|
||||
av_free(input_codecs);
|
||||
av_free(output_codecs);
|
||||
av_free(stream_maps);
|
||||
av_free(meta_data_maps);
|
||||
|
||||
@ -620,6 +620,8 @@ static void choose_sample_fmt(AVStream *st, AVCodec *codec)
|
||||
break;
|
||||
}
|
||||
if (*p == -1) {
|
||||
if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
|
||||
av_log(NULL, AV_LOG_ERROR, "Convertion will not be lossless'\n");
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
|
||||
av_get_sample_fmt_name(st->codec->sample_fmt),
|
||||
@ -809,8 +811,8 @@ static void do_audio_out(AVFormatContext *s,
|
||||
int size_out, frame_bytes, ret, resample_changed;
|
||||
AVCodecContext *enc= ost->st->codec;
|
||||
AVCodecContext *dec= ist->st->codec;
|
||||
int osize= av_get_bits_per_sample_fmt(enc->sample_fmt)/8;
|
||||
int isize= av_get_bits_per_sample_fmt(dec->sample_fmt)/8;
|
||||
int osize = av_get_bytes_per_sample(enc->sample_fmt);
|
||||
int isize = av_get_bytes_per_sample(dec->sample_fmt);
|
||||
const int coded_bps = av_get_bits_per_sample(enc->codec->id);
|
||||
|
||||
need_realloc:
|
||||
@ -1530,7 +1532,7 @@ static int output_packet(AVInputStream *ist, int ist_index,
|
||||
#endif
|
||||
|
||||
AVPacket avpkt;
|
||||
int bps = av_get_bits_per_sample_fmt(ist->st->codec->sample_fmt)>>3;
|
||||
int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
|
||||
|
||||
if(ist->next_pts == AV_NOPTS_VALUE)
|
||||
ist->next_pts= ist->pts;
|
||||
@ -1708,12 +1710,15 @@ static int output_packet(AVInputStream *ist, int ist_index,
|
||||
frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
|
||||
!ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
|
||||
while (frame_available) {
|
||||
AVRational ist_pts_tb;
|
||||
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter)
|
||||
if (get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb) < 0)
|
||||
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) {
|
||||
AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
|
||||
if (av_vsink_buffer_get_video_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
|
||||
goto cont;
|
||||
if (ost->picref)
|
||||
ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
|
||||
if (ost->picref) {
|
||||
avfilter_fill_frame_from_video_buffer_ref(&picture, ost->picref);
|
||||
ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
os = output_files[ost->file_index];
|
||||
|
||||
@ -2263,6 +2268,8 @@ static int transcode(AVFormatContext **output_files,
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
if (!ost->enc)
|
||||
ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
|
||||
switch(codec->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
ost->fifo= av_fifo_alloc(1024);
|
||||
@ -2274,7 +2281,7 @@ static int transcode(AVFormatContext **output_files,
|
||||
if (icodec->lowres)
|
||||
codec->sample_rate >>= icodec->lowres;
|
||||
}
|
||||
choose_sample_rate(ost->st, codec->codec);
|
||||
choose_sample_rate(ost->st, ost->enc);
|
||||
codec->time_base = (AVRational){1, codec->sample_rate};
|
||||
if (!codec->channels)
|
||||
codec->channels = icodec->channels;
|
||||
@ -2289,6 +2296,10 @@ static int transcode(AVFormatContext **output_files,
|
||||
ost->resample_channels = icodec->channels;
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if (codec->pix_fmt == PIX_FMT_NONE)
|
||||
codec->pix_fmt = icodec->pix_fmt;
|
||||
choose_pixel_fmt(ost->st, ost->enc);
|
||||
|
||||
if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
|
||||
fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n");
|
||||
ffmpeg_exit(1);
|
||||
@ -2299,6 +2310,10 @@ static int transcode(AVFormatContext **output_files,
|
||||
if (ost->video_resample) {
|
||||
codec->bits_per_raw_sample= frame_bits_per_raw_sample;
|
||||
}
|
||||
if (!codec->width || !codec->height) {
|
||||
codec->width = icodec->width;
|
||||
codec->height = icodec->height;
|
||||
}
|
||||
ost->resample_height = icodec->height;
|
||||
ost->resample_width = icodec->width;
|
||||
ost->resample_pix_fmt= icodec->pix_fmt;
|
||||
@ -2307,11 +2322,16 @@ static int transcode(AVFormatContext **output_files,
|
||||
|
||||
if (!ost->frame_rate.num)
|
||||
ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1};
|
||||
if (codec->codec && codec->codec->supported_framerates && !force_fps) {
|
||||
int idx = av_find_nearest_q_idx(ost->frame_rate, codec->codec->supported_framerates);
|
||||
ost->frame_rate = codec->codec->supported_framerates[idx];
|
||||
if (ost->enc && ost->enc->supported_framerates && !force_fps) {
|
||||
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
|
||||
ost->frame_rate = ost->enc->supported_framerates[idx];
|
||||
}
|
||||
codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
|
||||
if( av_q2d(codec->time_base) < 0.001 && video_sync_method
|
||||
&& (video_sync_method==1 || (video_sync_method<0 && !(os->oformat->flags & AVFMT_VARIABLE_FPS)))){
|
||||
av_log(os, AV_LOG_WARNING, "Frame rate very high for a muxer not effciciently supporting it.\n"
|
||||
"Please consider specifiying a lower framerate, a different muxer or -vsync 2\n");
|
||||
}
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
if (configure_video_filters(ist, ost)) {
|
||||
@ -2375,10 +2395,8 @@ static int transcode(AVFormatContext **output_files,
|
||||
for(i=0;i<nb_ostreams;i++) {
|
||||
ost = ost_table[i];
|
||||
if (ost->encoding_needed) {
|
||||
AVCodec *codec = i < nb_output_codecs ? output_codecs[i] : NULL;
|
||||
AVCodec *codec = ost->enc;
|
||||
AVCodecContext *dec = input_streams[ost->source_index].st->codec;
|
||||
if (!codec)
|
||||
codec = avcodec_find_encoder(ost->st->codec->codec_id);
|
||||
if (!codec) {
|
||||
snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d",
|
||||
ost->st->codec->codec_id, ost->file_index, ost->index);
|
||||
@ -3399,17 +3417,12 @@ static int opt_input_file(const char *opt, const char *filename)
|
||||
if(!input_codecs[nb_input_codecs-1])
|
||||
input_codecs[nb_input_codecs-1] = avcodec_find_decoder(dec->codec_id);
|
||||
set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, input_codecs[nb_input_codecs-1]);
|
||||
frame_height = dec->height;
|
||||
frame_width = dec->width;
|
||||
frame_pix_fmt = dec->pix_fmt;
|
||||
rfps = ic->streams[i]->r_frame_rate.num;
|
||||
rfps_base = ic->streams[i]->r_frame_rate.den;
|
||||
if (dec->lowres) {
|
||||
dec->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
frame_height >>= dec->lowres;
|
||||
frame_width >>= dec->lowres;
|
||||
dec->height = frame_height;
|
||||
dec->width = frame_width;
|
||||
dec->height >>= dec->lowres;
|
||||
dec->width >>= dec->lowres;
|
||||
}
|
||||
if(me_threshold)
|
||||
dec->debug |= FF_DEBUG_MV;
|
||||
@ -3454,9 +3467,12 @@ static int opt_input_file(const char *opt, const char *filename)
|
||||
input_files[nb_input_files - 1].ctx = ic;
|
||||
input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams;
|
||||
|
||||
video_channel = 0;
|
||||
top_field_first = -1;
|
||||
video_channel = 0;
|
||||
frame_rate = (AVRational){0, 0};
|
||||
frame_pix_fmt = PIX_FMT_NONE;
|
||||
frame_height = 0;
|
||||
frame_width = 0;
|
||||
audio_sample_rate = 0;
|
||||
audio_channels = 0;
|
||||
|
||||
@ -3526,13 +3542,12 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
|
||||
}
|
||||
ost = new_output_stream(oc, file_idx);
|
||||
|
||||
output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1);
|
||||
if(!video_stream_copy){
|
||||
if (video_codec_name) {
|
||||
codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1,
|
||||
avcodec_opts[AVMEDIA_TYPE_VIDEO]->strict_std_compliance);
|
||||
codec = avcodec_find_encoder_by_name(video_codec_name);
|
||||
output_codecs[nb_output_codecs-1] = codec;
|
||||
ost->enc = codec;
|
||||
} else {
|
||||
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO);
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
@ -3581,8 +3596,6 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
|
||||
video_enc->bits_per_raw_sample = frame_bits_per_raw_sample;
|
||||
st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
|
||||
|
||||
choose_pixel_fmt(st, codec);
|
||||
|
||||
if (intra_only)
|
||||
video_enc->gop_size = 0;
|
||||
if (video_qscale || same_quality) {
|
||||
@ -3669,13 +3682,12 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx)
|
||||
}
|
||||
ost = new_output_stream(oc, file_idx);
|
||||
|
||||
output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1);
|
||||
if(!audio_stream_copy){
|
||||
if (audio_codec_name) {
|
||||
codec_id = find_codec_or_die(audio_codec_name, AVMEDIA_TYPE_AUDIO, 1,
|
||||
avcodec_opts[AVMEDIA_TYPE_AUDIO]->strict_std_compliance);
|
||||
codec = avcodec_find_encoder_by_name(audio_codec_name);
|
||||
output_codecs[nb_output_codecs-1] = codec;
|
||||
ost->enc = codec;
|
||||
} else {
|
||||
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_AUDIO);
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
@ -3741,7 +3753,6 @@ static void new_data_stream(AVFormatContext *oc, int file_idx)
|
||||
}
|
||||
new_output_stream(oc, file_idx);
|
||||
data_enc = st->codec;
|
||||
output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1);
|
||||
if (!data_stream_copy) {
|
||||
fprintf(stderr, "Data stream encoding not supported yet (only streamcopy)\n");
|
||||
ffmpeg_exit(1);
|
||||
@ -3781,12 +3792,12 @@ static void new_subtitle_stream(AVFormatContext *oc, int file_idx)
|
||||
}
|
||||
ost = new_output_stream(oc, file_idx);
|
||||
subtitle_enc = st->codec;
|
||||
output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1);
|
||||
if(!subtitle_stream_copy){
|
||||
if (subtitle_codec_name) {
|
||||
codec_id = find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 1,
|
||||
avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->strict_std_compliance);
|
||||
codec= output_codecs[nb_output_codecs-1] = avcodec_find_encoder_by_name(subtitle_codec_name);
|
||||
codec = avcodec_find_encoder_by_name(subtitle_codec_name);
|
||||
ost->enc = codec;
|
||||
} else {
|
||||
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_SUBTITLE);
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
@ -3990,6 +4001,8 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
set_context_opts(oc, avformat_opts, AV_OPT_FLAG_ENCODING_PARAM, NULL);
|
||||
|
||||
frame_rate = (AVRational){0, 0};
|
||||
frame_width = 0;
|
||||
frame_height = 0;
|
||||
audio_sample_rate = 0;
|
||||
audio_channels = 0;
|
||||
|
||||
@ -4226,6 +4239,7 @@ static int opt_target(const char *opt, const char *arg)
|
||||
|
||||
opt_frame_size("s", norm == PAL ? "480x576" : "480x480");
|
||||
opt_frame_rate("r", frame_rates[norm]);
|
||||
opt_frame_pix_fmt("pix_fmt", "yuv420p");
|
||||
opt_default("g", norm == PAL ? "15" : "18");
|
||||
|
||||
opt_default("b", "2040000");
|
||||
@ -4248,6 +4262,7 @@ static int opt_target(const char *opt, const char *arg)
|
||||
|
||||
opt_frame_size("vcodec", norm == PAL ? "720x576" : "720x480");
|
||||
opt_frame_rate("r", frame_rates[norm]);
|
||||
opt_frame_pix_fmt("pix_fmt", "yuv420p");
|
||||
opt_default("g", norm == PAL ? "15" : "18");
|
||||
|
||||
opt_default("b", "6000000");
|
||||
|
126
ffplay.c
126
ffplay.c
@ -41,6 +41,7 @@
|
||||
# include "libavfilter/avcodec.h"
|
||||
# include "libavfilter/avfilter.h"
|
||||
# include "libavfilter/avfiltergraph.h"
|
||||
# include "libavfilter/vsink_buffer.h"
|
||||
#endif
|
||||
|
||||
#include <SDL.h>
|
||||
@ -253,7 +254,7 @@ static int autoexit;
|
||||
static int exit_on_keydown;
|
||||
static int exit_on_mousedown;
|
||||
static int loop=1;
|
||||
static int framedrop=1;
|
||||
static int framedrop=-1;
|
||||
static enum ShowMode show_mode = SHOW_MODE_NONE;
|
||||
|
||||
static int rdftspeed=20;
|
||||
@ -856,6 +857,57 @@ static void video_audio_display(VideoState *s)
|
||||
}
|
||||
}
|
||||
|
||||
static void stream_close(VideoState *is)
|
||||
{
|
||||
VideoPicture *vp;
|
||||
int i;
|
||||
/* XXX: use a special url_shutdown call to abort parse cleanly */
|
||||
is->abort_request = 1;
|
||||
SDL_WaitThread(is->read_tid, NULL);
|
||||
SDL_WaitThread(is->refresh_tid, NULL);
|
||||
|
||||
/* free all pictures */
|
||||
for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
|
||||
vp = &is->pictq[i];
|
||||
#if CONFIG_AVFILTER
|
||||
if (vp->picref) {
|
||||
avfilter_unref_buffer(vp->picref);
|
||||
vp->picref = NULL;
|
||||
}
|
||||
#endif
|
||||
if (vp->bmp) {
|
||||
SDL_FreeYUVOverlay(vp->bmp);
|
||||
vp->bmp = NULL;
|
||||
}
|
||||
}
|
||||
SDL_DestroyMutex(is->pictq_mutex);
|
||||
SDL_DestroyCond(is->pictq_cond);
|
||||
SDL_DestroyMutex(is->subpq_mutex);
|
||||
SDL_DestroyCond(is->subpq_cond);
|
||||
#if !CONFIG_AVFILTER
|
||||
if (is->img_convert_ctx)
|
||||
sws_freeContext(is->img_convert_ctx);
|
||||
#endif
|
||||
av_free(is);
|
||||
}
|
||||
|
||||
static void do_exit(void)
|
||||
{
|
||||
if (cur_stream) {
|
||||
stream_close(cur_stream);
|
||||
cur_stream = NULL;
|
||||
}
|
||||
uninit_opts();
|
||||
#if CONFIG_AVFILTER
|
||||
avfilter_uninit();
|
||||
#endif
|
||||
if (show_status)
|
||||
printf("\n");
|
||||
SDL_Quit();
|
||||
av_log(NULL, AV_LOG_QUIET, "");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
static int video_open(VideoState *is){
|
||||
int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
|
||||
int w,h;
|
||||
@ -894,7 +946,7 @@ static int video_open(VideoState *is){
|
||||
#endif
|
||||
if (!screen) {
|
||||
fprintf(stderr, "SDL: could not set video mode - exiting\n");
|
||||
return -1;
|
||||
do_exit();
|
||||
}
|
||||
if (!window_title)
|
||||
window_title = input_filename;
|
||||
@ -1087,7 +1139,7 @@ retry:
|
||||
}else{
|
||||
next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
|
||||
}
|
||||
if(framedrop && time > next_target){
|
||||
if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
|
||||
is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
|
||||
if(is->pictq_size > 1 || time > next_target + 0.5){
|
||||
/* update queue size and signal for next picture */
|
||||
@ -1204,57 +1256,6 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
static void stream_close(VideoState *is)
|
||||
{
|
||||
VideoPicture *vp;
|
||||
int i;
|
||||
/* XXX: use a special url_shutdown call to abort parse cleanly */
|
||||
is->abort_request = 1;
|
||||
SDL_WaitThread(is->read_tid, NULL);
|
||||
SDL_WaitThread(is->refresh_tid, NULL);
|
||||
|
||||
/* free all pictures */
|
||||
for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
|
||||
vp = &is->pictq[i];
|
||||
#if CONFIG_AVFILTER
|
||||
if (vp->picref) {
|
||||
avfilter_unref_buffer(vp->picref);
|
||||
vp->picref = NULL;
|
||||
}
|
||||
#endif
|
||||
if (vp->bmp) {
|
||||
SDL_FreeYUVOverlay(vp->bmp);
|
||||
vp->bmp = NULL;
|
||||
}
|
||||
}
|
||||
SDL_DestroyMutex(is->pictq_mutex);
|
||||
SDL_DestroyCond(is->pictq_cond);
|
||||
SDL_DestroyMutex(is->subpq_mutex);
|
||||
SDL_DestroyCond(is->subpq_cond);
|
||||
#if !CONFIG_AVFILTER
|
||||
if (is->img_convert_ctx)
|
||||
sws_freeContext(is->img_convert_ctx);
|
||||
#endif
|
||||
av_free(is);
|
||||
}
|
||||
|
||||
static void do_exit(void)
|
||||
{
|
||||
if (cur_stream) {
|
||||
stream_close(cur_stream);
|
||||
cur_stream = NULL;
|
||||
}
|
||||
uninit_opts();
|
||||
#if CONFIG_AVFILTER
|
||||
avfilter_uninit();
|
||||
#endif
|
||||
if (show_status)
|
||||
printf("\n");
|
||||
SDL_Quit();
|
||||
av_log(NULL, AV_LOG_QUIET, "");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/* allocate a picture (needs to do that in main thread to avoid
|
||||
potential locking problems */
|
||||
static void alloc_picture(void *opaque)
|
||||
@ -1643,7 +1644,7 @@ static int input_query_formats(AVFilterContext *ctx)
|
||||
priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1682,7 +1683,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
{
|
||||
char sws_flags_str[128];
|
||||
int ret;
|
||||
FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
|
||||
enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
|
||||
AVFilterContext *filt_src = NULL, *filt_out = NULL;
|
||||
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
|
||||
graph->scale_sws_opts = av_strdup(sws_flags_str);
|
||||
@ -1690,8 +1691,8 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
|
||||
NULL, is, graph)) < 0)
|
||||
goto the_end;
|
||||
if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
|
||||
NULL, &ffsink_ctx, graph)) < 0)
|
||||
if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
|
||||
NULL, pix_fmts, graph)) < 0)
|
||||
goto the_end;
|
||||
|
||||
if(vfilters) {
|
||||
@ -1748,13 +1749,14 @@ static int video_thread(void *arg)
|
||||
AVPacket pkt;
|
||||
#else
|
||||
AVFilterBufferRef *picref;
|
||||
AVRational tb;
|
||||
AVRational tb = filt_out->inputs[0]->time_base;
|
||||
#endif
|
||||
while (is->paused && !is->videoq.abort_request)
|
||||
SDL_Delay(10);
|
||||
#if CONFIG_AVFILTER
|
||||
ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
|
||||
ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
|
||||
if (picref) {
|
||||
avfilter_fill_frame_from_video_buffer_ref(frame, picref);
|
||||
pts_int = picref->pts;
|
||||
pos = picref->pos;
|
||||
frame->opaque = picref;
|
||||
@ -1776,7 +1778,7 @@ static int video_thread(void *arg)
|
||||
|
||||
if (ret < 0) goto the_end;
|
||||
|
||||
if (!ret)
|
||||
if (!picref)
|
||||
continue;
|
||||
|
||||
pts = pts_int*av_q2d(is->video_st->time_base);
|
||||
@ -2008,7 +2010,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
|
||||
if (is->reformat_ctx) {
|
||||
const void *ibuf[6]= {is->audio_buf1};
|
||||
void *obuf[6]= {is->audio_buf2};
|
||||
int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
|
||||
int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
|
||||
int ostride[6]= {2};
|
||||
int len= data_size/istride[0];
|
||||
if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
|
||||
|
16
ffprobe.c
16
ffprobe.c
@ -262,18 +262,18 @@ static void show_format(AVFormatContext *fmt_ctx)
|
||||
static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
||||
{
|
||||
int err, i;
|
||||
AVFormatParameters fmt_params;
|
||||
AVFormatContext *fmt_ctx;
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *t;
|
||||
|
||||
memset(&fmt_params, 0, sizeof(fmt_params));
|
||||
fmt_params.prealloced_context = 1;
|
||||
fmt_ctx = avformat_alloc_context();
|
||||
set_context_opts(fmt_ctx, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
|
||||
|
||||
if ((err = av_open_input_file(&fmt_ctx, filename, iformat, 0, &fmt_params)) < 0) {
|
||||
if ((err = avformat_open_input(&fmt_ctx, filename, iformat, &format_opts)) < 0) {
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
|
||||
return AVERROR_OPTION_NOT_FOUND;
|
||||
}
|
||||
|
||||
|
||||
/* fill the streams in the format context */
|
||||
if ((err = av_find_stream_info(fmt_ctx)) < 0) {
|
||||
|
36
ffserver.c
36
ffserver.c
@ -207,7 +207,7 @@ typedef struct FFStream {
|
||||
char filename[1024]; /* stream filename */
|
||||
struct FFStream *feed; /* feed we are using (can be null if
|
||||
coming from file) */
|
||||
AVFormatParameters *ap_in; /* input parameters */
|
||||
AVDictionary *in_opts; /* input parameters */
|
||||
AVInputFormat *ifmt; /* if non NULL, force input format */
|
||||
AVOutputFormat *fmt;
|
||||
IPAddressACL *acl;
|
||||
@ -2128,7 +2128,7 @@ static int open_input_stream(HTTPContext *c, const char *info)
|
||||
{
|
||||
char buf[128];
|
||||
char input_filename[1024];
|
||||
AVFormatContext *s;
|
||||
AVFormatContext *s = NULL;
|
||||
int buf_size, i, ret;
|
||||
int64_t stream_pos;
|
||||
|
||||
@ -2159,8 +2159,7 @@ static int open_input_stream(HTTPContext *c, const char *info)
|
||||
return -1;
|
||||
|
||||
/* open stream */
|
||||
if ((ret = av_open_input_file(&s, input_filename, c->stream->ifmt,
|
||||
buf_size, c->stream->ap_in)) < 0) {
|
||||
if ((ret = avformat_open_input(&s, input_filename, c->stream->ifmt, &c->stream->in_opts)) < 0) {
|
||||
http_log("could not open %s: %d\n", input_filename, ret);
|
||||
return -1;
|
||||
}
|
||||
@ -2270,8 +2269,7 @@ static int http_prepare_data(HTTPContext *c)
|
||||
c->fmt_ctx.preload = (int)(0.5*AV_TIME_BASE);
|
||||
c->fmt_ctx.max_delay = (int)(0.7*AV_TIME_BASE);
|
||||
|
||||
av_set_parameters(&c->fmt_ctx, NULL);
|
||||
if (av_write_header(&c->fmt_ctx) < 0) {
|
||||
if (avformat_write_header(&c->fmt_ctx, NULL) < 0) {
|
||||
http_log("Error writing output header\n");
|
||||
return -1;
|
||||
}
|
||||
@ -2711,11 +2709,14 @@ static int http_receive_data(HTTPContext *c)
|
||||
}
|
||||
} else {
|
||||
/* We have a header in our hands that contains useful data */
|
||||
AVFormatContext *s = NULL;
|
||||
AVFormatContext *s = avformat_alloc_context();
|
||||
AVIOContext *pb;
|
||||
AVInputFormat *fmt_in;
|
||||
int i;
|
||||
|
||||
if (!s)
|
||||
goto fail;
|
||||
|
||||
/* use feed output format name to find corresponding input format */
|
||||
fmt_in = av_find_input_format(feed->fmt->name);
|
||||
if (!fmt_in)
|
||||
@ -2725,7 +2726,8 @@ static int http_receive_data(HTTPContext *c)
|
||||
0, NULL, NULL, NULL, NULL);
|
||||
pb->seekable = 0;
|
||||
|
||||
if (av_open_input_stream(&s, pb, c->stream->feed_filename, fmt_in, NULL) < 0) {
|
||||
s->pb = pb;
|
||||
if (avformat_open_input(&s, c->stream->feed_filename, fmt_in, NULL) < 0) {
|
||||
av_free(pb);
|
||||
goto fail;
|
||||
}
|
||||
@ -3445,8 +3447,7 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
/* XXX: close stream */
|
||||
goto fail;
|
||||
}
|
||||
av_set_parameters(ctx, NULL);
|
||||
if (av_write_header(ctx) < 0) {
|
||||
if (avformat_write_header(ctx, NULL) < 0) {
|
||||
fail:
|
||||
if (h)
|
||||
url_close(h);
|
||||
@ -3600,28 +3601,25 @@ static void extract_mpeg4_header(AVFormatContext *infile)
|
||||
static void build_file_streams(void)
|
||||
{
|
||||
FFStream *stream, *stream_next;
|
||||
AVFormatContext *infile;
|
||||
int i, ret;
|
||||
|
||||
/* gather all streams */
|
||||
for(stream = first_stream; stream != NULL; stream = stream_next) {
|
||||
AVFormatContext *infile = NULL;
|
||||
stream_next = stream->next;
|
||||
if (stream->stream_type == STREAM_TYPE_LIVE &&
|
||||
!stream->feed) {
|
||||
/* the stream comes from a file */
|
||||
/* try to open the file */
|
||||
/* open stream */
|
||||
stream->ap_in = av_mallocz(sizeof(AVFormatParameters));
|
||||
if (stream->fmt && !strcmp(stream->fmt->name, "rtp")) {
|
||||
/* specific case : if transport stream output to RTP,
|
||||
we use a raw transport stream reader */
|
||||
stream->ap_in->mpeg2ts_raw = 1;
|
||||
stream->ap_in->mpeg2ts_compute_pcr = 1;
|
||||
av_dict_set(&stream->in_opts, "mpeg2ts_compute_pcr", "1", 0);
|
||||
}
|
||||
|
||||
http_log("Opening file '%s'\n", stream->feed_filename);
|
||||
if ((ret = av_open_input_file(&infile, stream->feed_filename,
|
||||
stream->ifmt, 0, stream->ap_in)) < 0) {
|
||||
if ((ret = avformat_open_input(&infile, stream->feed_filename, stream->ifmt, &stream->in_opts)) < 0) {
|
||||
http_log("Could not open '%s': %d\n", stream->feed_filename, ret);
|
||||
/* remove stream (no need to spend more time on it) */
|
||||
fail:
|
||||
@ -3681,10 +3679,10 @@ static void build_feed_streams(void)
|
||||
|
||||
if (url_exist(feed->feed_filename)) {
|
||||
/* See if it matches */
|
||||
AVFormatContext *s;
|
||||
AVFormatContext *s = NULL;
|
||||
int matches = 0;
|
||||
|
||||
if (av_open_input_file(&s, feed->feed_filename, NULL, FFM_PACKET_SIZE, NULL) >= 0) {
|
||||
if (avformat_open_input(&s, feed->feed_filename, NULL, NULL) >= 0) {
|
||||
/* Now see if it matches */
|
||||
if (s->nb_streams == feed->nb_streams) {
|
||||
matches = 1;
|
||||
@ -3951,7 +3949,7 @@ static int ffserver_opt_default(const char *opt, const char *arg,
|
||||
AVCodecContext *avctx, int type)
|
||||
{
|
||||
int ret = 0;
|
||||
const AVOption *o = av_find_opt(avctx, opt, NULL, type, type);
|
||||
const AVOption *o = av_opt_find(avctx, opt, NULL, type, 0);
|
||||
if(o)
|
||||
ret = av_set_string3(avctx, opt, arg, 1, NULL);
|
||||
return ret;
|
||||
|
@ -2179,7 +2179,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
data_size_tmp = samples * avctx->channels *
|
||||
(av_get_bits_per_sample_fmt(avctx->sample_fmt) / 8);
|
||||
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||
if (*data_size < data_size_tmp) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Output buffer too small (%d) or trying to output too many samples (%d) for this frame.\n",
|
||||
|
@ -1422,7 +1422,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
|
||||
}
|
||||
}
|
||||
*data_size = s->num_blocks * 256 * avctx->channels *
|
||||
(av_get_bits_per_sample_fmt(avctx->sample_fmt) / 8);
|
||||
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||
return FFMIN(buf_size, s->frame_size);
|
||||
}
|
||||
|
||||
|
@ -1450,7 +1450,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
// check for size of decoded data
|
||||
size = ctx->cur_frame_length * avctx->channels *
|
||||
(av_get_bits_per_sample_fmt(avctx->sample_fmt) >> 3);
|
||||
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||
|
||||
if (size > *data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Decoded data exceeds buffer size.\n");
|
||||
@ -1714,7 +1714,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
ctx->crc_buffer = av_malloc(sizeof(*ctx->crc_buffer) *
|
||||
ctx->cur_frame_length *
|
||||
avctx->channels *
|
||||
(av_get_bits_per_sample_fmt(avctx->sample_fmt) >> 3));
|
||||
av_get_bytes_per_sample(avctx->sample_fmt));
|
||||
if (!ctx->crc_buffer) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
|
||||
decode_end(avctx);
|
||||
|
@ -709,6 +709,10 @@ typedef struct RcOverride{
|
||||
* Codec supports slice-based (or partition-based) multithreading.
|
||||
*/
|
||||
#define CODEC_CAP_SLICE_THREADS 0x2000
|
||||
/**
|
||||
* Codec is lossless.
|
||||
*/
|
||||
#define CODEC_CAP_LOSSLESS 0x80000000
|
||||
|
||||
//The following defines may change, don't expect compatibility if you use them.
|
||||
#define MB_TYPE_INTRA4x4 0x0001
|
||||
|
@ -90,7 +90,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
s->version_b = avctx->codec_tag == MKTAG('B','I','K','b');
|
||||
if (avctx->extradata && avctx->extradata_size > 0)
|
||||
s->version_b = avctx->extradata[0];
|
||||
|
||||
if (avctx->codec->id == CODEC_ID_BINKAUDIO_RDFT) {
|
||||
// audio is already interleaved for the RDFT format variant
|
||||
|
@ -161,19 +161,11 @@ void ff_init_cabac_states(CABACContext *c){
|
||||
ff_h264_mps_state[2*i+1]= 2*mps_state[i]+1;
|
||||
|
||||
if( i ){
|
||||
#ifdef BRANCHLESS_CABAC_DECODER
|
||||
ff_h264_mlps_state[128-2*i-1]= 2*lps_state[i]+0;
|
||||
ff_h264_mlps_state[128-2*i-2]= 2*lps_state[i]+1;
|
||||
}else{
|
||||
ff_h264_mlps_state[128-2*i-1]= 1;
|
||||
ff_h264_mlps_state[128-2*i-2]= 0;
|
||||
#else
|
||||
ff_h264_lps_state[2*i+0]= 2*lps_state[i]+0;
|
||||
ff_h264_lps_state[2*i+1]= 2*lps_state[i]+1;
|
||||
}else{
|
||||
ff_h264_lps_state[2*i+0]= 1;
|
||||
ff_h264_lps_state[2*i+1]= 0;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -27,16 +27,15 @@
|
||||
#ifndef AVCODEC_CABAC_H
|
||||
#define AVCODEC_CABAC_H
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "put_bits.h"
|
||||
|
||||
//#undef NDEBUG
|
||||
#include <assert.h>
|
||||
#include "libavutil/x86_cpu.h"
|
||||
|
||||
#define CABAC_BITS 16
|
||||
#define CABAC_MASK ((1<<CABAC_BITS)-1)
|
||||
#define BRANCHLESS_CABAC_DECODER 1
|
||||
//#define ARCH_X86_DISABLED 1
|
||||
|
||||
typedef struct CABACContext{
|
||||
int low;
|
||||
@ -57,6 +56,9 @@ extern uint8_t ff_h264_mps_state[2*64]; ///< transIdxMPS
|
||||
extern uint8_t ff_h264_lps_state[2*64]; ///< transIdxLPS
|
||||
extern const uint8_t ff_h264_norm_shift[512];
|
||||
|
||||
#if ARCH_X86
|
||||
# include "x86/cabac.h"
|
||||
#endif
|
||||
|
||||
void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size);
|
||||
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size);
|
||||
@ -270,7 +272,24 @@ static void refill(CABACContext *c){
|
||||
c->bytestream+= CABAC_BITS/8;
|
||||
}
|
||||
|
||||
#if ! ( ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS) )
|
||||
static inline void renorm_cabac_decoder(CABACContext *c){
|
||||
while(c->range < 0x100){
|
||||
c->range+= c->range;
|
||||
c->low+= c->low;
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill(c);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void renorm_cabac_decoder_once(CABACContext *c){
|
||||
int shift= (uint32_t)(c->range - 0x100)>>31;
|
||||
c->range<<= shift;
|
||||
c->low <<= shift;
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill(c);
|
||||
}
|
||||
|
||||
#ifndef get_cabac_inline
|
||||
static void refill2(CABACContext *c){
|
||||
int i, x;
|
||||
|
||||
@ -288,279 +307,13 @@ static void refill2(CABACContext *c){
|
||||
c->low += x<<i;
|
||||
c->bytestream+= CABAC_BITS/8;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void renorm_cabac_decoder(CABACContext *c){
|
||||
while(c->range < 0x100){
|
||||
c->range+= c->range;
|
||||
c->low+= c->low;
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill(c);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void renorm_cabac_decoder_once(CABACContext *c){
|
||||
#ifdef ARCH_X86_DISABLED
|
||||
int temp;
|
||||
#if 0
|
||||
//P3:683 athlon:475
|
||||
__asm__(
|
||||
"lea -0x100(%0), %2 \n\t"
|
||||
"shr $31, %2 \n\t" //FIXME 31->63 for x86-64
|
||||
"shl %%cl, %0 \n\t"
|
||||
"shl %%cl, %1 \n\t"
|
||||
: "+r"(c->range), "+r"(c->low), "+c"(temp)
|
||||
);
|
||||
#elif 0
|
||||
//P3:680 athlon:474
|
||||
__asm__(
|
||||
"cmp $0x100, %0 \n\t"
|
||||
"setb %%cl \n\t" //FIXME 31->63 for x86-64
|
||||
"shl %%cl, %0 \n\t"
|
||||
"shl %%cl, %1 \n\t"
|
||||
: "+r"(c->range), "+r"(c->low), "+c"(temp)
|
||||
);
|
||||
#elif 1
|
||||
int temp2;
|
||||
//P3:665 athlon:517
|
||||
__asm__(
|
||||
"lea -0x100(%0), %%eax \n\t"
|
||||
"cltd \n\t"
|
||||
"mov %0, %%eax \n\t"
|
||||
"and %%edx, %0 \n\t"
|
||||
"and %1, %%edx \n\t"
|
||||
"add %%eax, %0 \n\t"
|
||||
"add %%edx, %1 \n\t"
|
||||
: "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2)
|
||||
);
|
||||
#elif 0
|
||||
int temp2;
|
||||
//P3:673 athlon:509
|
||||
__asm__(
|
||||
"cmp $0x100, %0 \n\t"
|
||||
"sbb %%edx, %%edx \n\t"
|
||||
"mov %0, %%eax \n\t"
|
||||
"and %%edx, %0 \n\t"
|
||||
"and %1, %%edx \n\t"
|
||||
"add %%eax, %0 \n\t"
|
||||
"add %%edx, %1 \n\t"
|
||||
: "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2)
|
||||
);
|
||||
#else
|
||||
int temp2;
|
||||
//P3:677 athlon:511
|
||||
__asm__(
|
||||
"cmp $0x100, %0 \n\t"
|
||||
"lea (%0, %0), %%eax \n\t"
|
||||
"lea (%1, %1), %%edx \n\t"
|
||||
"cmovb %%eax, %0 \n\t"
|
||||
"cmovb %%edx, %1 \n\t"
|
||||
: "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2)
|
||||
);
|
||||
#endif
|
||||
#else
|
||||
//P3:675 athlon:476
|
||||
int shift= (uint32_t)(c->range - 0x100)>>31;
|
||||
c->range<<= shift;
|
||||
c->low <<= shift;
|
||||
#endif
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill(c);
|
||||
}
|
||||
|
||||
static av_always_inline int get_cabac_inline(CABACContext *c, uint8_t * const state){
|
||||
//FIXME gcc generates duplicate load/stores for c->low and c->range
|
||||
#define LOW "0"
|
||||
#define RANGE "4"
|
||||
#if ARCH_X86_64
|
||||
#define BYTESTART "16"
|
||||
#define BYTE "24"
|
||||
#define BYTEEND "32"
|
||||
#else
|
||||
#define BYTESTART "12"
|
||||
#define BYTE "16"
|
||||
#define BYTEEND "20"
|
||||
#endif
|
||||
#if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS)
|
||||
int bit;
|
||||
|
||||
#ifndef BRANCHLESS_CABAC_DECODER
|
||||
__asm__ volatile(
|
||||
"movzbl (%1), %0 \n\t"
|
||||
"movl "RANGE "(%2), %%ebx \n\t"
|
||||
"movl "RANGE "(%2), %%edx \n\t"
|
||||
"andl $0xC0, %%ebx \n\t"
|
||||
"movzbl "MANGLE(ff_h264_lps_range)"(%0, %%ebx, 2), %%esi\n\t"
|
||||
"movl "LOW "(%2), %%ebx \n\t"
|
||||
//eax:state ebx:low, edx:range, esi:RangeLPS
|
||||
"subl %%esi, %%edx \n\t"
|
||||
"movl %%edx, %%ecx \n\t"
|
||||
"shll $17, %%ecx \n\t"
|
||||
"cmpl %%ecx, %%ebx \n\t"
|
||||
" ja 1f \n\t"
|
||||
|
||||
#if 1
|
||||
//athlon:4067 P3:4110
|
||||
"lea -0x100(%%edx), %%ecx \n\t"
|
||||
"shr $31, %%ecx \n\t"
|
||||
"shl %%cl, %%edx \n\t"
|
||||
"shl %%cl, %%ebx \n\t"
|
||||
#else
|
||||
//athlon:4057 P3:4130
|
||||
"cmp $0x100, %%edx \n\t" //FIXME avoidable
|
||||
"setb %%cl \n\t"
|
||||
"shl %%cl, %%edx \n\t"
|
||||
"shl %%cl, %%ebx \n\t"
|
||||
#endif
|
||||
"movzbl "MANGLE(ff_h264_mps_state)"(%0), %%ecx \n\t"
|
||||
"movb %%cl, (%1) \n\t"
|
||||
//eax:state ebx:low, edx:range, esi:RangeLPS
|
||||
"test %%bx, %%bx \n\t"
|
||||
" jnz 2f \n\t"
|
||||
"mov "BYTE "(%2), %%"REG_S" \n\t"
|
||||
"subl $0xFFFF, %%ebx \n\t"
|
||||
"movzwl (%%"REG_S"), %%ecx \n\t"
|
||||
"bswap %%ecx \n\t"
|
||||
"shrl $15, %%ecx \n\t"
|
||||
"add $2, %%"REG_S" \n\t"
|
||||
"addl %%ecx, %%ebx \n\t"
|
||||
"mov %%"REG_S", "BYTE "(%2) \n\t"
|
||||
"jmp 2f \n\t"
|
||||
"1: \n\t"
|
||||
//eax:state ebx:low, edx:range, esi:RangeLPS
|
||||
"subl %%ecx, %%ebx \n\t"
|
||||
"movl %%esi, %%edx \n\t"
|
||||
"movzbl " MANGLE(ff_h264_norm_shift) "(%%esi), %%ecx \n\t"
|
||||
"shll %%cl, %%ebx \n\t"
|
||||
"shll %%cl, %%edx \n\t"
|
||||
"movzbl "MANGLE(ff_h264_lps_state)"(%0), %%ecx \n\t"
|
||||
"movb %%cl, (%1) \n\t"
|
||||
"add $1, %0 \n\t"
|
||||
"test %%bx, %%bx \n\t"
|
||||
" jnz 2f \n\t"
|
||||
|
||||
"mov "BYTE "(%2), %%"REG_c" \n\t"
|
||||
"movzwl (%%"REG_c"), %%esi \n\t"
|
||||
"bswap %%esi \n\t"
|
||||
"shrl $15, %%esi \n\t"
|
||||
"subl $0xFFFF, %%esi \n\t"
|
||||
"add $2, %%"REG_c" \n\t"
|
||||
"mov %%"REG_c", "BYTE "(%2) \n\t"
|
||||
|
||||
"leal -1(%%ebx), %%ecx \n\t"
|
||||
"xorl %%ebx, %%ecx \n\t"
|
||||
"shrl $15, %%ecx \n\t"
|
||||
"movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t"
|
||||
"neg %%ecx \n\t"
|
||||
"add $7, %%ecx \n\t"
|
||||
|
||||
"shll %%cl , %%esi \n\t"
|
||||
"addl %%esi, %%ebx \n\t"
|
||||
"2: \n\t"
|
||||
"movl %%edx, "RANGE "(%2) \n\t"
|
||||
"movl %%ebx, "LOW "(%2) \n\t"
|
||||
:"=&a"(bit) //FIXME this is fragile gcc either runs out of registers or miscompiles it (for example if "+a"(bit) or "+m"(*state) is used
|
||||
:"r"(state), "r"(c)
|
||||
: "%"REG_c, "%ebx", "%edx", "%"REG_S, "memory"
|
||||
);
|
||||
bit&=1;
|
||||
#else /* BRANCHLESS_CABAC_DECODER */
|
||||
|
||||
|
||||
#if HAVE_FAST_CMOV
|
||||
#define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
|
||||
"mov "tmp" , %%ecx \n\t"\
|
||||
"shl $17 , "tmp" \n\t"\
|
||||
"cmp "low" , "tmp" \n\t"\
|
||||
"cmova %%ecx , "range" \n\t"\
|
||||
"sbb %%ecx , %%ecx \n\t"\
|
||||
"and %%ecx , "tmp" \n\t"\
|
||||
"sub "tmp" , "low" \n\t"\
|
||||
"xor %%ecx , "ret" \n\t"
|
||||
#else /* HAVE_FAST_CMOV */
|
||||
#define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
|
||||
"mov "tmp" , %%ecx \n\t"\
|
||||
"shl $17 , "tmp" \n\t"\
|
||||
"sub "low" , "tmp" \n\t"\
|
||||
"sar $31 , "tmp" \n\t" /*lps_mask*/\
|
||||
"sub %%ecx , "range" \n\t" /*RangeLPS - range*/\
|
||||
"and "tmp" , "range" \n\t" /*(RangeLPS - range)&lps_mask*/\
|
||||
"add %%ecx , "range" \n\t" /*new range*/\
|
||||
"shl $17 , %%ecx \n\t"\
|
||||
"and "tmp" , %%ecx \n\t"\
|
||||
"sub %%ecx , "low" \n\t"\
|
||||
"xor "tmp" , "ret" \n\t"
|
||||
#endif /* HAVE_FAST_CMOV */
|
||||
|
||||
|
||||
#define BRANCHLESS_GET_CABAC(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
|
||||
"movzbl "statep" , "ret" \n\t"\
|
||||
"mov "range" , "tmp" \n\t"\
|
||||
"and $0xC0 , "range" \n\t"\
|
||||
"movzbl "MANGLE(ff_h264_lps_range)"("ret", "range", 2), "range" \n\t"\
|
||||
"sub "range" , "tmp" \n\t"\
|
||||
BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
|
||||
"movzbl " MANGLE(ff_h264_norm_shift) "("range"), %%ecx \n\t"\
|
||||
"shl %%cl , "range" \n\t"\
|
||||
"movzbl "MANGLE(ff_h264_mlps_state)"+128("ret"), "tmp" \n\t"\
|
||||
"mov "tmpbyte" , "statep" \n\t"\
|
||||
"shl %%cl , "low" \n\t"\
|
||||
"test "lowword" , "lowword" \n\t"\
|
||||
" jnz 1f \n\t"\
|
||||
"mov "BYTE"("cabac"), %%"REG_c" \n\t"\
|
||||
"movzwl (%%"REG_c") , "tmp" \n\t"\
|
||||
"bswap "tmp" \n\t"\
|
||||
"shr $15 , "tmp" \n\t"\
|
||||
"sub $0xFFFF , "tmp" \n\t"\
|
||||
"add $2 , %%"REG_c" \n\t"\
|
||||
"mov %%"REG_c" , "BYTE "("cabac") \n\t"\
|
||||
"lea -1("low") , %%ecx \n\t"\
|
||||
"xor "low" , %%ecx \n\t"\
|
||||
"shr $15 , %%ecx \n\t"\
|
||||
"movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t"\
|
||||
"neg %%ecx \n\t"\
|
||||
"add $7 , %%ecx \n\t"\
|
||||
"shl %%cl , "tmp" \n\t"\
|
||||
"add "tmp" , "low" \n\t"\
|
||||
"1: \n\t"
|
||||
|
||||
__asm__ volatile(
|
||||
"movl "RANGE "(%2), %%esi \n\t"
|
||||
"movl "LOW "(%2), %%ebx \n\t"
|
||||
BRANCHLESS_GET_CABAC("%0", "%2", "(%1)", "%%ebx", "%%bx", "%%esi", "%%edx", "%%dl")
|
||||
"movl %%esi, "RANGE "(%2) \n\t"
|
||||
"movl %%ebx, "LOW "(%2) \n\t"
|
||||
|
||||
:"=&a"(bit)
|
||||
:"r"(state), "r"(c)
|
||||
: "%"REG_c, "%ebx", "%edx", "%esi", "memory"
|
||||
);
|
||||
bit&=1;
|
||||
#endif /* BRANCHLESS_CABAC_DECODER */
|
||||
#else /* ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS) */
|
||||
int s = *state;
|
||||
int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + s];
|
||||
int bit, lps_mask av_unused;
|
||||
int bit, lps_mask;
|
||||
|
||||
c->range -= RangeLPS;
|
||||
#ifndef BRANCHLESS_CABAC_DECODER
|
||||
if(c->low < (c->range<<(CABAC_BITS+1))){
|
||||
bit= s&1;
|
||||
*state= ff_h264_mps_state[s];
|
||||
renorm_cabac_decoder_once(c);
|
||||
}else{
|
||||
bit= ff_h264_norm_shift[RangeLPS];
|
||||
c->low -= (c->range<<(CABAC_BITS+1));
|
||||
*state= ff_h264_lps_state[s];
|
||||
c->range = RangeLPS<<bit;
|
||||
c->low <<= bit;
|
||||
bit= (s&1)^1;
|
||||
|
||||
if(!(c->low & CABAC_MASK)){
|
||||
refill2(c);
|
||||
}
|
||||
}
|
||||
#else /* BRANCHLESS_CABAC_DECODER */
|
||||
lps_mask= ((c->range<<(CABAC_BITS+1)) - c->low)>>31;
|
||||
|
||||
c->low -= (c->range<<(CABAC_BITS+1)) & lps_mask;
|
||||
@ -575,10 +328,9 @@ static av_always_inline int get_cabac_inline(CABACContext *c, uint8_t * const st
|
||||
c->low <<= lps_mask;
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill2(c);
|
||||
#endif /* BRANCHLESS_CABAC_DECODER */
|
||||
#endif /* ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS) */
|
||||
return bit;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int av_noinline av_unused get_cabac_noinline(CABACContext *c, uint8_t * const state){
|
||||
return get_cabac_inline(c,state);
|
||||
@ -589,36 +341,6 @@ static int av_unused get_cabac(CABACContext *c, uint8_t * const state){
|
||||
}
|
||||
|
||||
static int av_unused get_cabac_bypass(CABACContext *c){
|
||||
#if 0 //not faster
|
||||
int bit;
|
||||
__asm__ volatile(
|
||||
"movl "RANGE "(%1), %%ebx \n\t"
|
||||
"movl "LOW "(%1), %%eax \n\t"
|
||||
"shl $17, %%ebx \n\t"
|
||||
"add %%eax, %%eax \n\t"
|
||||
"sub %%ebx, %%eax \n\t"
|
||||
"cltd \n\t"
|
||||
"and %%edx, %%ebx \n\t"
|
||||
"add %%ebx, %%eax \n\t"
|
||||
"test %%ax, %%ax \n\t"
|
||||
" jnz 1f \n\t"
|
||||
"movl "BYTE "(%1), %%"REG_b" \n\t"
|
||||
"subl $0xFFFF, %%eax \n\t"
|
||||
"movzwl (%%"REG_b"), %%ecx \n\t"
|
||||
"bswap %%ecx \n\t"
|
||||
"shrl $15, %%ecx \n\t"
|
||||
"addl $2, %%"REG_b" \n\t"
|
||||
"addl %%ecx, %%eax \n\t"
|
||||
"movl %%"REG_b", "BYTE "(%1) \n\t"
|
||||
"1: \n\t"
|
||||
"movl %%eax, "LOW "(%1) \n\t"
|
||||
|
||||
:"=&d"(bit)
|
||||
:"r"(c)
|
||||
: "%eax", "%"REG_b, "%ecx", "memory"
|
||||
);
|
||||
return bit+1;
|
||||
#else
|
||||
int range;
|
||||
c->low += c->low;
|
||||
|
||||
@ -632,42 +354,11 @@ static int av_unused get_cabac_bypass(CABACContext *c){
|
||||
c->low -= range;
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifndef get_cabac_bypass_sign
|
||||
static av_always_inline int get_cabac_bypass_sign(CABACContext *c, int val){
|
||||
#if ARCH_X86 && HAVE_EBX_AVAILABLE
|
||||
__asm__ volatile(
|
||||
"movl "RANGE "(%1), %%ebx \n\t"
|
||||
"movl "LOW "(%1), %%eax \n\t"
|
||||
"shl $17, %%ebx \n\t"
|
||||
"add %%eax, %%eax \n\t"
|
||||
"sub %%ebx, %%eax \n\t"
|
||||
"cltd \n\t"
|
||||
"and %%edx, %%ebx \n\t"
|
||||
"add %%ebx, %%eax \n\t"
|
||||
"xor %%edx, %%ecx \n\t"
|
||||
"sub %%edx, %%ecx \n\t"
|
||||
"test %%ax, %%ax \n\t"
|
||||
" jnz 1f \n\t"
|
||||
"mov "BYTE "(%1), %%"REG_b" \n\t"
|
||||
"subl $0xFFFF, %%eax \n\t"
|
||||
"movzwl (%%"REG_b"), %%edx \n\t"
|
||||
"bswap %%edx \n\t"
|
||||
"shrl $15, %%edx \n\t"
|
||||
"add $2, %%"REG_b" \n\t"
|
||||
"addl %%edx, %%eax \n\t"
|
||||
"mov %%"REG_b", "BYTE "(%1) \n\t"
|
||||
"1: \n\t"
|
||||
"movl %%eax, "LOW "(%1) \n\t"
|
||||
|
||||
:"+c"(val)
|
||||
:"r"(c)
|
||||
: "%eax", "%"REG_b, "%edx", "memory"
|
||||
);
|
||||
return val;
|
||||
#else
|
||||
int range, mask;
|
||||
c->low += c->low;
|
||||
|
||||
@ -680,8 +371,8 @@ static av_always_inline int get_cabac_bypass_sign(CABACContext *c, int val){
|
||||
range &= mask;
|
||||
c->low += range;
|
||||
return (val^mask)-mask;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -1817,7 +1817,7 @@ static int dca_decode_frame(AVCodecContext * avctx,
|
||||
}
|
||||
|
||||
out_size = 256 / 8 * s->sample_blocks * channels *
|
||||
(av_get_bits_per_sample_fmt(avctx->sample_fmt) / 8);
|
||||
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||
if (*data_size < out_size)
|
||||
return -1;
|
||||
*data_size = out_size;
|
||||
|
@ -365,8 +365,7 @@ static inline uint32_t quantize(int32_t sample, int bits)
|
||||
{
|
||||
av_assert0(sample < 1 << (bits - 1));
|
||||
av_assert0(sample >= -(1 << (bits - 1)));
|
||||
sample &= sample & ((1 << bits) - 1);
|
||||
return sample;
|
||||
return sample & ((1 << bits) - 1);
|
||||
}
|
||||
|
||||
static inline int find_scale_factor7(int64_t max_value, int bits)
|
||||
|
@ -542,6 +542,9 @@ skip_mean_and_median:
|
||||
ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
|
||||
mb_y, 0);
|
||||
}
|
||||
if (!s->last_picture.motion_val[0] ||
|
||||
!s->last_picture.ref_index[0])
|
||||
goto skip_last_mv;
|
||||
prev_x = s->last_picture.motion_val[0][mot_index][0];
|
||||
prev_y = s->last_picture.motion_val[0][mot_index][1];
|
||||
prev_ref = s->last_picture.ref_index[0][4*mb_xy];
|
||||
@ -556,6 +559,7 @@ skip_mean_and_median:
|
||||
mv_predictor[pred_count][1]= prev_y;
|
||||
ref [pred_count] = prev_ref;
|
||||
pred_count++;
|
||||
skip_last_mv:
|
||||
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
s->mb_intra=0;
|
||||
|
@ -1398,7 +1398,7 @@ AVCodec ff_flac_encoder = {
|
||||
flac_encode_frame,
|
||||
flac_encode_close,
|
||||
NULL,
|
||||
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
|
||||
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_LOSSLESS,
|
||||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
|
||||
.priv_class = &flac_encoder_class,
|
||||
|
@ -1014,7 +1014,7 @@ int ff_h264_decode_extradata(H264Context *h)
|
||||
{
|
||||
AVCodecContext *avctx = h->s.avctx;
|
||||
|
||||
if(*(char *)avctx->extradata == 1){
|
||||
if(avctx->extradata[0] == 1){
|
||||
int i, cnt, nalsize;
|
||||
unsigned char *p = avctx->extradata;
|
||||
|
||||
@ -1049,7 +1049,7 @@ int ff_h264_decode_extradata(H264Context *h)
|
||||
p += nalsize;
|
||||
}
|
||||
// Now store right nal length size, that will be use to parse all other nals
|
||||
h->nal_length_size = ((*(((char*)(avctx->extradata))+4))&0x03)+1;
|
||||
h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
|
||||
} else {
|
||||
h->is_avc = 0;
|
||||
if(decode_nal_units(h, avctx->extradata, avctx->extradata_size) < 0)
|
||||
@ -2984,7 +2984,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h0->last_slice_type = slice_type;
|
||||
h->slice_num = ++h0->current_slice;
|
||||
if(h->slice_num >= MAX_SLICES){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Too many slices, increase MAX_SLICES and recompile\n");
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Too many slices (%d >= %d), increase MAX_SLICES and recompile\n", h->slice_num, MAX_SLICES);
|
||||
}
|
||||
|
||||
for(j=0; j<2; j++){
|
||||
@ -3690,6 +3690,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
switch (hx->nal_unit_type) {
|
||||
case NAL_SPS:
|
||||
case NAL_PPS:
|
||||
case NAL_IDR_SLICE:
|
||||
case NAL_SLICE:
|
||||
nals_needed = nal_index;
|
||||
}
|
||||
continue;
|
||||
@ -3794,8 +3796,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
init_get_bits(&s->gb, ptr, bit_length);
|
||||
ff_h264_decode_seq_parameter_set(h);
|
||||
|
||||
if(s->flags& CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag && !h->sps.num_reorder_frames))
|
||||
if (s->flags& CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag && !h->sps.num_reorder_frames))
|
||||
s->low_delay=1;
|
||||
|
||||
if(avctx->has_b_frames < 2)
|
||||
|
@ -377,7 +377,7 @@ static av_cold int X264_init(AVCodecContext *avctx)
|
||||
|
||||
x4->params.b_interlaced = avctx->flags & CODEC_FLAG_INTERLACED_DCT;
|
||||
|
||||
x4->params.b_open_gop = !(avctx->flags & CODEC_FLAG_CLOSED_GOP);
|
||||
// x4->params.b_open_gop = !(avctx->flags & CODEC_FLAG_CLOSED_GOP);
|
||||
|
||||
x4->params.i_slice_count = avctx->slices;
|
||||
|
||||
|
@ -993,8 +993,11 @@ int ff_mjpeg_decode_sos(MJpegDecodeContext *s,
|
||||
|
||||
predictor= get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
|
||||
ilv= get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
|
||||
prev_shift = get_bits(&s->gb, 4); /* Ah */
|
||||
point_transform= get_bits(&s->gb, 4); /* Al */
|
||||
if(s->avctx->codec_tag != AV_RL32("CJPG")){
|
||||
prev_shift = get_bits(&s->gb, 4); /* Ah */
|
||||
point_transform= get_bits(&s->gb, 4); /* Al */
|
||||
}else
|
||||
prev_shift= point_transform= 0;
|
||||
|
||||
for(i=0;i<nb_components;i++)
|
||||
s->last_dc[i] = 1024;
|
||||
@ -1014,8 +1017,8 @@ int ff_mjpeg_decode_sos(MJpegDecodeContext *s,
|
||||
}
|
||||
|
||||
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d %s\n", s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
|
||||
predictor, point_transform, ilv, s->bits,
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s\n", s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
|
||||
predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
|
||||
s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""));
|
||||
|
||||
|
||||
|
@ -66,7 +66,8 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
|
||||
|
||||
ret = ff_mpa_decode_header(avctx, state, &sr, &channels, &frame_size, &bit_rate);
|
||||
if (ret < 4) {
|
||||
s->header_count= -2;
|
||||
if(i > 4)
|
||||
s->header_count= -2;
|
||||
} else {
|
||||
if((state&SAME_HEADER_MASK) != (s->header&SAME_HEADER_MASK) && s->header)
|
||||
s->header_count= -3;
|
||||
|
@ -209,6 +209,7 @@ typedef struct MpegEncContext {
|
||||
|
||||
/* the following codec id fields are deprecated in favor of codec_id */
|
||||
int h263_plus; ///< h263 plus headers
|
||||
int h263_msmpeg4; ///< generate MSMPEG4 compatible stream (deprecated, use msmpeg4_version instead)
|
||||
int h263_flv; ///< use flv h263 header
|
||||
|
||||
enum CodecID codec_id; /* see CODEC_ID_xxx */
|
||||
|
@ -37,6 +37,25 @@ static const char* context_to_name(void* ptr) {
|
||||
return "NULL";
|
||||
}
|
||||
|
||||
static const AVOption *opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
|
||||
{
|
||||
AVCodecContext *s = obj;
|
||||
AVCodec *c = NULL;
|
||||
|
||||
if (s->priv_data) {
|
||||
if (s->codec->priv_class)
|
||||
return av_opt_find(s->priv_data, name, unit, opt_flags, search_flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while ((c = av_codec_next(c))) {
|
||||
const AVOption *o;
|
||||
if (c->priv_class && (o = av_opt_find(&c->priv_class, name, unit, opt_flags, search_flags)))
|
||||
return o;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define OFFSET(x) offsetof(AVCodecContext,x)
|
||||
#define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
|
||||
//these names are too long to be readable
|
||||
@ -458,7 +477,7 @@ static const AVOption options[]={
|
||||
#undef D
|
||||
#undef DEFAULT
|
||||
|
||||
static const AVClass av_codec_context_class = { "AVCodecContext", context_to_name, options, LIBAVUTIL_VERSION_INT, OFFSET(log_level_offset) };
|
||||
static const AVClass av_codec_context_class = { "AVCodecContext", context_to_name, options, LIBAVUTIL_VERSION_INT, OFFSET(log_level_offset), .opt_find = opt_find};
|
||||
|
||||
void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType codec_type){
|
||||
int flags=0;
|
||||
|
@ -67,7 +67,11 @@ X(\name):
|
||||
|
||||
.macro movrel rd, sym
|
||||
#if CONFIG_PIC
|
||||
lwz \rd, \sym@got(r2)
|
||||
bcl 20, 31, lab_pic_\@
|
||||
lab_pic_\@:
|
||||
mflr \rd
|
||||
addis \rd, \rd, (\sym - lab_pic_\@)@ha
|
||||
addi \rd, \rd, (\sym - lab_pic_\@)@l
|
||||
#else
|
||||
lis \rd, \sym@ha
|
||||
la \rd, \sym@l(\rd)
|
||||
|
@ -175,6 +175,7 @@ typedef struct {
|
||||
DECLARE_ALIGNED(32, float, synth_buf)[MPA_MAX_CHANNELS][512*2];
|
||||
int synth_buf_offset[MPA_MAX_CHANNELS];
|
||||
DECLARE_ALIGNED(32, float, sb_samples)[MPA_MAX_CHANNELS][128][SBLIMIT];
|
||||
DECLARE_ALIGNED(32, float, samples)[MPA_MAX_CHANNELS * MPA_FRAME_SIZE];
|
||||
|
||||
/// Mixed temporary data used in decoding
|
||||
float tone_level[MPA_MAX_CHANNELS][30][64];
|
||||
@ -1598,7 +1599,6 @@ static void qdm2_calculate_fft (QDM2Context *q, int channel, int sub_packet)
|
||||
*/
|
||||
static void qdm2_synthesis_filter (QDM2Context *q, int index)
|
||||
{
|
||||
float samples[MPA_MAX_CHANNELS * MPA_FRAME_SIZE];
|
||||
int i, k, ch, sb_used, sub_sampling, dither_state = 0;
|
||||
|
||||
/* copy sb_samples */
|
||||
@ -1610,7 +1610,7 @@ static void qdm2_synthesis_filter (QDM2Context *q, int index)
|
||||
q->sb_samples[ch][(8 * index) + i][k] = 0;
|
||||
|
||||
for (ch = 0; ch < q->nb_channels; ch++) {
|
||||
float *samples_ptr = samples + ch;
|
||||
float *samples_ptr = q->samples + ch;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
ff_mpa_synth_filter_float(&q->mpadsp,
|
||||
@ -1627,7 +1627,7 @@ static void qdm2_synthesis_filter (QDM2Context *q, int index)
|
||||
|
||||
for (ch = 0; ch < q->channels; ch++)
|
||||
for (i = 0; i < q->frame_size; i++)
|
||||
q->output_buffer[q->channels * i + ch] += (1 << 23) * samples[q->nb_channels * sub_sampling * i + ch];
|
||||
q->output_buffer[q->channels * i + ch] += (1 << 23) * q->samples[q->nb_channels * sub_sampling * i + ch];
|
||||
}
|
||||
|
||||
|
||||
|
@ -239,8 +239,8 @@ ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
|
||||
|
||||
s->sample_fmt[0] = sample_fmt_in;
|
||||
s->sample_fmt[1] = sample_fmt_out;
|
||||
s->sample_size[0] = av_get_bits_per_sample_fmt(s->sample_fmt[0]) >> 3;
|
||||
s->sample_size[1] = av_get_bits_per_sample_fmt(s->sample_fmt[1]) >> 3;
|
||||
s->sample_size[0] = av_get_bytes_per_sample(s->sample_fmt[0]);
|
||||
s->sample_size[1] = av_get_bytes_per_sample(s->sample_fmt[1]);
|
||||
|
||||
if (s->sample_fmt[0] != AV_SAMPLE_FMT_S16) {
|
||||
if (!(s->convert_ctx[0] = av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
|
||||
|
@ -23,6 +23,7 @@
|
||||
|
||||
#include <math.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "avcodec.h"
|
||||
@ -461,7 +462,7 @@ static void decode_frame(SiprContext *ctx, SiprParameters *params,
|
||||
memcpy(ctx->postfilter_syn5k0, ctx->postfilter_syn5k0 + frame_size,
|
||||
LP_FILTER_ORDER*sizeof(float));
|
||||
}
|
||||
memcpy(ctx->excitation, excitation - PITCH_DELAY_MAX - L_INTERPOL,
|
||||
memmove(ctx->excitation, excitation - PITCH_DELAY_MAX - L_INTERPOL,
|
||||
(PITCH_DELAY_MAX + L_INTERPOL) * sizeof(float));
|
||||
|
||||
ff_acelp_apply_order_2_transfer_function(out_data, synth,
|
||||
@ -495,8 +496,6 @@ static av_cold int sipr_decoder_init(AVCodecContext * avctx)
|
||||
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
||||
|
||||
dsputil_init(&ctx->dsp, avctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,6 @@ typedef enum {
|
||||
|
||||
typedef struct {
|
||||
AVCodecContext *avctx;
|
||||
DSPContext dsp;
|
||||
|
||||
SiprMode mode;
|
||||
|
||||
|
@ -1266,7 +1266,7 @@ int av_get_bits_per_sample(enum CodecID codec_id){
|
||||
|
||||
#if FF_API_OLD_SAMPLE_FMT
|
||||
int av_get_bits_per_sample_format(enum AVSampleFormat sample_fmt) {
|
||||
return av_get_bits_per_sample_fmt(sample_fmt);
|
||||
return av_get_bytes_per_sample(sample_fmt) << 3;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -450,7 +450,7 @@ static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
else
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_U8;
|
||||
s->out_bps = av_get_bits_per_sample_fmt(avctx->sample_fmt) >> 3;
|
||||
s->out_bps = av_get_bytes_per_sample(avctx->sample_fmt);
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
|
||||
"block align = %d, sample rate = %d\n",
|
||||
|
@ -1646,7 +1646,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
|
||||
vc->audio_channels);
|
||||
|
||||
*data_size = len * vc->audio_channels *
|
||||
(av_get_bits_per_sample_fmt(avccontext->sample_fmt) / 8);
|
||||
av_get_bytes_per_sample(avccontext->sample_fmt);
|
||||
|
||||
return buf_size ;
|
||||
}
|
||||
|
@ -135,11 +135,11 @@ int ff_thread_init(AVCodecContext *s){
|
||||
return 0;
|
||||
}
|
||||
|
||||
s->active_thread_type= FF_THREAD_SLICE;
|
||||
|
||||
if (s->thread_count <= 1)
|
||||
return 0;
|
||||
|
||||
s->active_thread_type= FF_THREAD_SLICE;
|
||||
|
||||
assert(!s->thread_opaque);
|
||||
c= av_mallocz(sizeof(ThreadContext)*s->thread_count);
|
||||
s->thread_opaque= c;
|
||||
|
@ -45,6 +45,7 @@ MMX-OBJS-$(HAVE_YASM) += x86/dsputil_yasm.o \
|
||||
x86/deinterlace.o \
|
||||
x86/fmtconvert.o \
|
||||
x86/h264_chromamc.o \
|
||||
x86/h264_chromamc_10bit.o \
|
||||
$(YASM-OBJS-yes)
|
||||
|
||||
MMX-OBJS-$(CONFIG_FFT) += x86/fft.o
|
||||
|
148
libavcodec/x86/cabac.h
Normal file
148
libavcodec/x86/cabac.h
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
|
||||
*
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_X86_CABAC_H
|
||||
#define AVCODEC_X86_CABAC_H
|
||||
|
||||
#include "libavcodec/cabac.h"
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/x86_cpu.h"
|
||||
#include "config.h"
|
||||
|
||||
#if HAVE_FAST_CMOV
|
||||
#define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp)\
|
||||
"mov "tmp" , %%ecx \n\t"\
|
||||
"shl $17 , "tmp" \n\t"\
|
||||
"cmp "low" , "tmp" \n\t"\
|
||||
"cmova %%ecx , "range" \n\t"\
|
||||
"sbb %%ecx , %%ecx \n\t"\
|
||||
"and %%ecx , "tmp" \n\t"\
|
||||
"sub "tmp" , "low" \n\t"\
|
||||
"xor %%ecx , "ret" \n\t"
|
||||
#else /* HAVE_FAST_CMOV */
|
||||
#define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp)\
|
||||
"mov "tmp" , %%ecx \n\t"\
|
||||
"shl $17 , "tmp" \n\t"\
|
||||
"sub "low" , "tmp" \n\t"\
|
||||
"sar $31 , "tmp" \n\t" /*lps_mask*/\
|
||||
"sub %%ecx , "range" \n\t" /*RangeLPS - range*/\
|
||||
"and "tmp" , "range" \n\t" /*(RangeLPS - range)&lps_mask*/\
|
||||
"add %%ecx , "range" \n\t" /*new range*/\
|
||||
"shl $17 , %%ecx \n\t"\
|
||||
"and "tmp" , %%ecx \n\t"\
|
||||
"sub %%ecx , "low" \n\t"\
|
||||
"xor "tmp" , "ret" \n\t"
|
||||
#endif /* HAVE_FAST_CMOV */
|
||||
|
||||
#define BRANCHLESS_GET_CABAC(ret, cabac, statep, low, lowword, range, tmp, tmpbyte, byte) \
|
||||
"movzbl "statep" , "ret" \n\t"\
|
||||
"mov "range" , "tmp" \n\t"\
|
||||
"and $0xC0 , "range" \n\t"\
|
||||
"movzbl "MANGLE(ff_h264_lps_range)"("ret", "range", 2), "range" \n\t"\
|
||||
"sub "range" , "tmp" \n\t"\
|
||||
BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, \
|
||||
range, tmp) \
|
||||
"movzbl " MANGLE(ff_h264_norm_shift) "("range"), %%ecx \n\t"\
|
||||
"shl %%cl , "range" \n\t"\
|
||||
"movzbl "MANGLE(ff_h264_mlps_state)"+128("ret"), "tmp" \n\t"\
|
||||
"mov "tmpbyte" , "statep" \n\t"\
|
||||
"shl %%cl , "low" \n\t"\
|
||||
"test "lowword" , "lowword" \n\t"\
|
||||
" jnz 1f \n\t"\
|
||||
"mov "byte"("cabac"), %%"REG_c" \n\t"\
|
||||
"movzwl (%%"REG_c") , "tmp" \n\t"\
|
||||
"bswap "tmp" \n\t"\
|
||||
"shr $15 , "tmp" \n\t"\
|
||||
"sub $0xFFFF , "tmp" \n\t"\
|
||||
"add $2 , %%"REG_c" \n\t"\
|
||||
"mov %%"REG_c" , "byte "("cabac") \n\t"\
|
||||
"lea -1("low") , %%ecx \n\t"\
|
||||
"xor "low" , %%ecx \n\t"\
|
||||
"shr $15 , %%ecx \n\t"\
|
||||
"movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t"\
|
||||
"neg %%ecx \n\t"\
|
||||
"add $7 , %%ecx \n\t"\
|
||||
"shl %%cl , "tmp" \n\t"\
|
||||
"add "tmp" , "low" \n\t"\
|
||||
"1: \n\t"
|
||||
|
||||
#if ARCH_X86 && HAVE_7REGS && !defined(BROKEN_RELOCATIONS)
|
||||
#define get_cabac_inline get_cabac_inline_x86
|
||||
static av_always_inline int get_cabac_inline_x86(CABACContext *c,
|
||||
uint8_t *const state)
|
||||
{
|
||||
int bit, low, range, tmp;
|
||||
|
||||
__asm__ volatile(
|
||||
"movl %a6(%5), %2 \n\t"
|
||||
"movl %a7(%5), %1 \n\t"
|
||||
BRANCHLESS_GET_CABAC("%0", "%5", "(%4)", "%1", "%w1", "%2",
|
||||
"%3", "%b3", "%a8")
|
||||
"movl %2, %a6(%5) \n\t"
|
||||
"movl %1, %a7(%5) \n\t"
|
||||
|
||||
:"=&r"(bit), "=&r"(low), "=&r"(range), "=&q"(tmp)
|
||||
:"r"(state), "r"(c),
|
||||
"i"(offsetof(CABACContext, range)), "i"(offsetof(CABACContext, low)),
|
||||
"i"(offsetof(CABACContext, bytestream))
|
||||
: "%"REG_c, "memory"
|
||||
);
|
||||
return bit & 1;
|
||||
}
|
||||
#endif /* ARCH_X86 && HAVE_7REGS && !defined(BROKEN_RELOCATIONS) */
|
||||
|
||||
#define get_cabac_bypass_sign get_cabac_bypass_sign_x86
|
||||
static av_always_inline int get_cabac_bypass_sign_x86(CABACContext *c, int val)
|
||||
{
|
||||
x86_reg tmp;
|
||||
__asm__ volatile(
|
||||
"movl %a3(%2), %k1 \n\t"
|
||||
"movl %a4(%2), %%eax \n\t"
|
||||
"shl $17, %k1 \n\t"
|
||||
"add %%eax, %%eax \n\t"
|
||||
"sub %k1, %%eax \n\t"
|
||||
"cltd \n\t"
|
||||
"and %%edx, %k1 \n\t"
|
||||
"add %k1, %%eax \n\t"
|
||||
"xor %%edx, %%ecx \n\t"
|
||||
"sub %%edx, %%ecx \n\t"
|
||||
"test %%ax, %%ax \n\t"
|
||||
" jnz 1f \n\t"
|
||||
"mov %a5(%2), %1 \n\t"
|
||||
"subl $0xFFFF, %%eax \n\t"
|
||||
"movzwl (%1), %%edx \n\t"
|
||||
"bswap %%edx \n\t"
|
||||
"shrl $15, %%edx \n\t"
|
||||
"add $2, %1 \n\t"
|
||||
"addl %%edx, %%eax \n\t"
|
||||
"mov %1, %a5(%2) \n\t"
|
||||
"1: \n\t"
|
||||
"movl %%eax, %a4(%2) \n\t"
|
||||
|
||||
:"+c"(val), "=&r"(tmp)
|
||||
:"r"(c),
|
||||
"i"(offsetof(CABACContext, range)), "i"(offsetof(CABACContext, low)),
|
||||
"i"(offsetof(CABACContext, bytestream))
|
||||
: "%eax", "%edx", "memory"
|
||||
);
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* AVCODEC_X86_CABAC_H */
|
@ -1842,6 +1842,19 @@ void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
|
||||
void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
|
||||
int stride, int h, int x, int y);
|
||||
|
||||
#define CHROMA_MC(OP, NUM, DEPTH, OPT) \
|
||||
void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
|
||||
(uint8_t *dst, uint8_t *src,\
|
||||
int stride, int h, int x, int y);
|
||||
|
||||
CHROMA_MC(put, 2, 10, mmxext)
|
||||
CHROMA_MC(avg, 2, 10, mmxext)
|
||||
CHROMA_MC(put, 4, 10, mmxext)
|
||||
CHROMA_MC(avg, 4, 10, mmxext)
|
||||
CHROMA_MC(put, 8, 10, sse2)
|
||||
CHROMA_MC(avg, 8, 10, sse2)
|
||||
CHROMA_MC(put, 8, 10, avx)
|
||||
CHROMA_MC(avg, 8, 10, avx)
|
||||
|
||||
/* CAVS specific */
|
||||
void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
|
||||
@ -2324,6 +2337,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
|
||||
{
|
||||
int mm_flags = av_get_cpu_flags();
|
||||
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
|
||||
const int bit_depth = avctx->bits_per_raw_sample;
|
||||
|
||||
if (avctx->dsp_mask) {
|
||||
if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
|
||||
@ -2554,6 +2568,12 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
|
||||
c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
|
||||
c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
|
||||
}
|
||||
if (bit_depth == 10) {
|
||||
c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_10_mmxext;
|
||||
c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_10_mmxext;
|
||||
c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_10_mmxext;
|
||||
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_10_mmxext;
|
||||
}
|
||||
|
||||
c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
|
||||
#endif
|
||||
@ -2658,6 +2678,12 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
|
||||
H264_QPEL_FUNCS(3, 2, sse2);
|
||||
H264_QPEL_FUNCS(3, 3, sse2);
|
||||
}
|
||||
#if HAVE_YASM
|
||||
if (bit_depth == 10) {
|
||||
c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_10_sse2;
|
||||
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_10_sse2;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#if HAVE_SSSE3
|
||||
if(mm_flags & AV_CPU_FLAG_SSSE3){
|
||||
@ -2755,6 +2781,14 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#if HAVE_AVX && HAVE_YASM
|
||||
if (mm_flags & AV_CPU_FLAG_AVX) {
|
||||
if (bit_depth == 10) {
|
||||
c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_10_avx;
|
||||
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_10_avx;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (CONFIG_ENCODERS)
|
||||
|
273
libavcodec/x86/h264_chromamc_10bit.asm
Normal file
273
libavcodec/x86/h264_chromamc_10bit.asm
Normal file
@ -0,0 +1,273 @@
|
||||
;*****************************************************************************
|
||||
;* MMX/SSE2/AVX-optimized 10-bit H.264 chroma MC code
|
||||
;*****************************************************************************
|
||||
;* Copyright (C) 2005-2011 x264 project
|
||||
;*
|
||||
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
|
||||
;*
|
||||
;* This file is part of Libav.
|
||||
;*
|
||||
;* Libav is free software; you can redistribute it and/or
|
||||
;* modify it under the terms of the GNU Lesser General Public
|
||||
;* License as published by the Free Software Foundation; either
|
||||
;* version 2.1 of the License, or (at your option) any later version.
|
||||
;*
|
||||
;* Libav is distributed in the hope that it will be useful,
|
||||
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
;* Lesser General Public License for more details.
|
||||
;*
|
||||
;* You should have received a copy of the GNU Lesser General Public
|
||||
;* License along with Libav; if not, write to the Free Software
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%include "x86inc.asm"
|
||||
%include "x86util.asm"
|
||||
|
||||
SECTION_RODATA
|
||||
|
||||
cextern pw_4
|
||||
cextern pw_8
|
||||
cextern pw_32
|
||||
cextern pw_64
|
||||
|
||||
SECTION .text
|
||||
|
||||
|
||||
%macro MV0_PIXELS_MC8 0
|
||||
lea r4, [r2*3 ]
|
||||
lea r5, [r2*4 ]
|
||||
.next4rows
|
||||
movu m0, [r1 ]
|
||||
movu m1, [r1+r2 ]
|
||||
CHROMAMC_AVG m0, [r0 ]
|
||||
CHROMAMC_AVG m1, [r0+r2 ]
|
||||
mova [r0 ], m0
|
||||
mova [r0+r2 ], m1
|
||||
movu m0, [r1+r2*2]
|
||||
movu m1, [r1+r4 ]
|
||||
CHROMAMC_AVG m0, [r0+r2*2]
|
||||
CHROMAMC_AVG m1, [r0+r4 ]
|
||||
mova [r0+r2*2], m0
|
||||
mova [r0+r4 ], m1
|
||||
add r1, r5
|
||||
add r0, r5
|
||||
sub r3d, 4
|
||||
jne .next4rows
|
||||
%endmacro
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void put/avg_h264_chroma_mc8(pixel *dst, pixel *src, int stride, int h, int mx, int my)
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro CHROMA_MC8 2
|
||||
; put/avg_h264_chroma_mc8_*(uint8_t *dst /*align 8*/, uint8_t *src /*align 1*/,
|
||||
; int stride, int h, int mx, int my)
|
||||
cglobal %1_h264_chroma_mc8_10_%2, 6,7,8
|
||||
movsxdifnidn r2, r2d
|
||||
mov r6d, r5d
|
||||
or r6d, r4d
|
||||
jne .at_least_one_non_zero
|
||||
; mx == 0 AND my == 0 - no filter needed
|
||||
MV0_PIXELS_MC8
|
||||
REP_RET
|
||||
|
||||
.at_least_one_non_zero
|
||||
mov r6d, 2
|
||||
test r5d, r5d
|
||||
je .x_interpolation
|
||||
mov r6, r2 ; dxy = x ? 1 : stride
|
||||
test r4d, r4d
|
||||
jne .xy_interpolation
|
||||
.x_interpolation
|
||||
; mx == 0 XOR my == 0 - 1 dimensional filter only
|
||||
or r4d, r5d ; x + y
|
||||
movd m5, r4d
|
||||
mova m4, [pw_8]
|
||||
mova m6, [pw_4] ; mm6 = rnd >> 3
|
||||
SPLATW m5, m5 ; mm5 = B = x
|
||||
psubw m4, m5 ; mm4 = A = 8-x
|
||||
|
||||
.next1drow
|
||||
movu m0, [r1 ] ; mm0 = src[0..7]
|
||||
movu m2, [r1+r6] ; mm2 = src[1..8]
|
||||
|
||||
pmullw m0, m4 ; mm0 = A * src[0..7]
|
||||
pmullw m2, m5 ; mm2 = B * src[1..8]
|
||||
|
||||
paddw m0, m6
|
||||
paddw m0, m2
|
||||
psrlw m0, 3
|
||||
CHROMAMC_AVG m0, [r0]
|
||||
mova [r0], m0 ; dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3
|
||||
|
||||
add r0, r2
|
||||
add r1, r2
|
||||
dec r3d
|
||||
jne .next1drow
|
||||
REP_RET
|
||||
|
||||
.xy_interpolation ; general case, bilinear
|
||||
movd m4, r4m ; x
|
||||
movd m6, r5m ; y
|
||||
|
||||
SPLATW m4, m4 ; mm4 = x words
|
||||
SPLATW m6, m6 ; mm6 = y words
|
||||
psllw m5, m4, 3 ; mm5 = 8x
|
||||
pmullw m4, m6 ; mm4 = x * y
|
||||
psllw m6, 3 ; mm6 = 8y
|
||||
paddw m1, m5, m6 ; mm7 = 8x+8y
|
||||
mova m7, m4 ; DD = x * y
|
||||
psubw m5, m4 ; mm5 = B = 8x - xy
|
||||
psubw m6, m4 ; mm6 = C = 8y - xy
|
||||
paddw m4, [pw_64]
|
||||
psubw m4, m1 ; mm4 = A = xy - (8x+8y) + 64
|
||||
|
||||
movu m0, [r1 ] ; mm0 = src[0..7]
|
||||
movu m1, [r1+2] ; mm1 = src[1..8]
|
||||
.next2drow
|
||||
add r1, r2
|
||||
|
||||
pmullw m2, m0, m4
|
||||
pmullw m1, m5
|
||||
paddw m2, m1 ; mm2 = A * src[0..7] + B * src[1..8]
|
||||
|
||||
movu m0, [r1]
|
||||
movu m1, [r1+2]
|
||||
pmullw m3, m0, m6
|
||||
paddw m2, m3 ; mm2 += C * src[0..7+strde]
|
||||
pmullw m3, m1, m7
|
||||
paddw m2, m3 ; mm2 += D * src[1..8+strde]
|
||||
|
||||
paddw m2, [pw_32]
|
||||
psrlw m2, 6
|
||||
CHROMAMC_AVG m2, [r0]
|
||||
mova [r0], m2 ; dst[0..7] = (mm2 + 32) >> 6
|
||||
|
||||
add r0, r2
|
||||
dec r3d
|
||||
jne .next2drow
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void put/avg_h264_chroma_mc4(pixel *dst, pixel *src, int stride, int h, int mx, int my)
|
||||
;-----------------------------------------------------------------------------
|
||||
;TODO: xmm mc4
|
||||
%macro MC4_OP 2
|
||||
movq %1, [r1 ]
|
||||
movq m1, [r1+2]
|
||||
add r1, r2
|
||||
pmullw %1, m4
|
||||
pmullw m1, m2
|
||||
paddw m1, %1
|
||||
mova %1, m1
|
||||
|
||||
pmullw %2, m5
|
||||
pmullw m1, m3
|
||||
paddw %2, [pw_32]
|
||||
paddw m1, %2
|
||||
psrlw m1, 6
|
||||
CHROMAMC_AVG m1, %2, [r0]
|
||||
movq [r0], m1
|
||||
add r0, r2
|
||||
%endmacro
|
||||
|
||||
%macro CHROMA_MC4 2
|
||||
cglobal %1_h264_chroma_mc4_10_%2, 6,6,7
|
||||
movsxdifnidn r2, r2d
|
||||
movd m2, r4m ; x
|
||||
movd m3, r5m ; y
|
||||
mova m4, [pw_8]
|
||||
mova m5, m4
|
||||
SPLATW m2, m2
|
||||
SPLATW m3, m3
|
||||
psubw m4, m2
|
||||
psubw m5, m3
|
||||
|
||||
movq m0, [r1 ]
|
||||
movq m6, [r1+2]
|
||||
add r1, r2
|
||||
pmullw m0, m4
|
||||
pmullw m6, m2
|
||||
paddw m6, m0
|
||||
|
||||
.next2rows
|
||||
MC4_OP m0, m6
|
||||
MC4_OP m6, m0
|
||||
sub r3d, 2
|
||||
jnz .next2rows
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void put/avg_h264_chroma_mc2(pixel *dst, pixel *src, int stride, int h, int mx, int my)
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro CHROMA_MC2 2
|
||||
cglobal %1_h264_chroma_mc2_10_%2, 6,7
|
||||
movsxdifnidn r2, r2d
|
||||
mov r6d, r4d
|
||||
shl r4d, 16
|
||||
sub r4d, r6d
|
||||
add r4d, 8
|
||||
imul r5d, r4d ; x*y<<16 | y*(8-x)
|
||||
shl r4d, 3
|
||||
sub r4d, r5d ; x*(8-y)<<16 | (8-x)*(8-y)
|
||||
|
||||
movd m5, r4d
|
||||
movd m6, r5d
|
||||
punpckldq m5, m5 ; mm5 = {A,B,A,B}
|
||||
punpckldq m6, m6 ; mm6 = {C,D,C,D}
|
||||
pxor m7, m7
|
||||
pshufw m2, [r1], 0x94 ; mm0 = src[0,1,1,2]
|
||||
|
||||
.nextrow
|
||||
add r1, r2
|
||||
movq m1, m2
|
||||
pmaddwd m1, m5 ; mm1 = A * src[0,1] + B * src[1,2]
|
||||
pshufw m0, [r1], 0x94 ; mm0 = src[0,1,1,2]
|
||||
movq m2, m0
|
||||
pmaddwd m0, m6
|
||||
paddw m1, [pw_32]
|
||||
paddw m1, m0 ; mm1 += C * src[0,1] + D * src[1,2]
|
||||
psrlw m1, 6
|
||||
packssdw m1, m7
|
||||
CHROMAMC_AVG m1, m3, [r0]
|
||||
movd [r0], m1
|
||||
add r0, r2
|
||||
dec r3d
|
||||
jnz .nextrow
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
%macro NOTHING 2-3
|
||||
%endmacro
|
||||
%macro AVG 2-3
|
||||
%if %0==3
|
||||
movq %2, %3
|
||||
%endif
|
||||
PAVG %1, %2
|
||||
%endmacro
|
||||
|
||||
%define CHROMAMC_AVG NOTHING
|
||||
INIT_XMM
|
||||
CHROMA_MC8 put, sse2
|
||||
%ifdef HAVE_AVX
|
||||
INIT_AVX
|
||||
CHROMA_MC8 put, avx
|
||||
%endif
|
||||
INIT_MMX
|
||||
CHROMA_MC4 put, mmxext
|
||||
CHROMA_MC2 put, mmxext
|
||||
|
||||
%define CHROMAMC_AVG AVG
|
||||
%define PAVG pavgw
|
||||
INIT_XMM
|
||||
CHROMA_MC8 avg, sse2
|
||||
%ifdef HAVE_AVX
|
||||
INIT_AVX
|
||||
CHROMA_MC8 avg, avx
|
||||
%endif
|
||||
INIT_MMX
|
||||
CHROMA_MC4 avg, mmxext
|
||||
CHROMA_MC2 avg, mmxext
|
@ -386,8 +386,10 @@ cglobal deblock_h_luma_8_%1, 5,7
|
||||
|
||||
INIT_XMM
|
||||
DEBLOCK_LUMA sse2
|
||||
%ifdef HAVE_AVX
|
||||
INIT_AVX
|
||||
DEBLOCK_LUMA avx
|
||||
%endif
|
||||
|
||||
%else
|
||||
|
||||
@ -506,8 +508,10 @@ INIT_MMX
|
||||
DEBLOCK_LUMA mmxext, v8, 8
|
||||
INIT_XMM
|
||||
DEBLOCK_LUMA sse2, v, 16
|
||||
%ifdef HAVE_AVX
|
||||
INIT_AVX
|
||||
DEBLOCK_LUMA avx, v, 16
|
||||
%endif
|
||||
|
||||
%endif ; ARCH
|
||||
|
||||
@ -778,8 +782,10 @@ cglobal deblock_h_luma_intra_8_%1, 2,4
|
||||
|
||||
INIT_XMM
|
||||
DEBLOCK_LUMA_INTRA sse2, v
|
||||
%ifdef HAVE_AVX
|
||||
INIT_AVX
|
||||
DEBLOCK_LUMA_INTRA avx , v
|
||||
%endif
|
||||
%ifndef ARCH_X86_64
|
||||
INIT_MMX
|
||||
DEBLOCK_LUMA_INTRA mmxext, v8
|
||||
|
@ -419,9 +419,11 @@ cglobal deblock_h_luma_10_%1, 5,7,15
|
||||
|
||||
INIT_XMM
|
||||
DEBLOCK_LUMA_64 sse2
|
||||
%ifdef HAVE_AVX
|
||||
INIT_AVX
|
||||
DEBLOCK_LUMA_64 avx
|
||||
%endif
|
||||
%endif
|
||||
|
||||
%macro SWAPMOVA 2
|
||||
%ifid %1
|
||||
@ -714,8 +716,10 @@ cglobal deblock_h_luma_intra_10_%1, 4,7,16
|
||||
|
||||
INIT_XMM
|
||||
DEBLOCK_LUMA_INTRA_64 sse2
|
||||
%ifdef HAVE_AVX
|
||||
INIT_AVX
|
||||
DEBLOCK_LUMA_INTRA_64 avx
|
||||
%endif
|
||||
|
||||
%endif
|
||||
|
||||
@ -799,10 +803,12 @@ DEBLOCK_LUMA_INTRA mmxext
|
||||
INIT_XMM
|
||||
DEBLOCK_LUMA sse2
|
||||
DEBLOCK_LUMA_INTRA sse2
|
||||
%ifdef HAVE_AVX
|
||||
INIT_AVX
|
||||
DEBLOCK_LUMA avx
|
||||
DEBLOCK_LUMA_INTRA avx
|
||||
%endif
|
||||
%endif
|
||||
|
||||
; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
|
||||
; out: %1=p0', %2=q0'
|
||||
@ -913,5 +919,7 @@ DEBLOCK_CHROMA mmxext
|
||||
%endif
|
||||
INIT_XMM
|
||||
DEBLOCK_CHROMA sse2
|
||||
%ifdef HAVE_AVX
|
||||
INIT_AVX
|
||||
DEBLOCK_CHROMA avx
|
||||
%endif
|
||||
|
@ -29,63 +29,72 @@
|
||||
#ifndef AVCODEC_X86_H264_I386_H
|
||||
#define AVCODEC_X86_H264_I386_H
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "libavcodec/cabac.h"
|
||||
#include "cabac.h"
|
||||
|
||||
//FIXME use some macros to avoid duplicating get_cabac (cannot be done yet
|
||||
//as that would make optimization work hard)
|
||||
#if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS)
|
||||
#if ARCH_X86 && HAVE_7REGS && !defined(BROKEN_RELOCATIONS)
|
||||
static int decode_significance_x86(CABACContext *c, int max_coeff,
|
||||
uint8_t *significant_coeff_ctx_base,
|
||||
int *index, x86_reg last_off){
|
||||
void *end= significant_coeff_ctx_base + max_coeff - 1;
|
||||
int minusstart= -(int)significant_coeff_ctx_base;
|
||||
int minusindex= 4-(int)index;
|
||||
int coeff_count;
|
||||
int minusstart= -(intptr_t)significant_coeff_ctx_base;
|
||||
int minusindex= 4-(intptr_t)index;
|
||||
int bit;
|
||||
x86_reg coeff_count;
|
||||
int low;
|
||||
int range;
|
||||
__asm__ volatile(
|
||||
"movl "RANGE "(%3), %%esi \n\t"
|
||||
"movl "LOW "(%3), %%ebx \n\t"
|
||||
"movl %a11(%6), %5 \n\t"
|
||||
"movl %a12(%6), %3 \n\t"
|
||||
|
||||
"2: \n\t"
|
||||
|
||||
BRANCHLESS_GET_CABAC("%%edx", "%3", "(%1)", "%%ebx",
|
||||
"%%bx", "%%esi", "%%eax", "%%al")
|
||||
BRANCHLESS_GET_CABAC("%4", "%6", "(%1)", "%3",
|
||||
"%w3", "%5", "%k0", "%b0", "%a13")
|
||||
|
||||
"test $1, %%edx \n\t"
|
||||
"test $1, %4 \n\t"
|
||||
" jz 3f \n\t"
|
||||
"add %7, %1 \n\t"
|
||||
"add %10, %1 \n\t"
|
||||
|
||||
BRANCHLESS_GET_CABAC("%%edx", "%3", "(%1)", "%%ebx",
|
||||
"%%bx", "%%esi", "%%eax", "%%al")
|
||||
BRANCHLESS_GET_CABAC("%4", "%6", "(%1)", "%3",
|
||||
"%w3", "%5", "%k0", "%b0", "%a13")
|
||||
|
||||
"sub %7, %1 \n\t"
|
||||
"mov %2, %%"REG_a" \n\t"
|
||||
"movl %4, %%ecx \n\t"
|
||||
"sub %10, %1 \n\t"
|
||||
"mov %2, %0 \n\t"
|
||||
"movl %7, %%ecx \n\t"
|
||||
"add %1, %%"REG_c" \n\t"
|
||||
"movl %%ecx, (%%"REG_a") \n\t"
|
||||
"movl %%ecx, (%0) \n\t"
|
||||
|
||||
"test $1, %%edx \n\t"
|
||||
"test $1, %4 \n\t"
|
||||
" jnz 4f \n\t"
|
||||
|
||||
"add $4, %%"REG_a" \n\t"
|
||||
"mov %%"REG_a", %2 \n\t"
|
||||
"add $4, %0 \n\t"
|
||||
"mov %0, %2 \n\t"
|
||||
|
||||
"3: \n\t"
|
||||
"add $1, %1 \n\t"
|
||||
"cmp %5, %1 \n\t"
|
||||
"cmp %8, %1 \n\t"
|
||||
" jb 2b \n\t"
|
||||
"mov %2, %%"REG_a" \n\t"
|
||||
"movl %4, %%ecx \n\t"
|
||||
"mov %2, %0 \n\t"
|
||||
"movl %7, %%ecx \n\t"
|
||||
"add %1, %%"REG_c" \n\t"
|
||||
"movl %%ecx, (%%"REG_a") \n\t"
|
||||
"movl %%ecx, (%0) \n\t"
|
||||
"4: \n\t"
|
||||
"add %6, %%eax \n\t"
|
||||
"shr $2, %%eax \n\t"
|
||||
"add %9, %k0 \n\t"
|
||||
"shr $2, %k0 \n\t"
|
||||
|
||||
"movl %%esi, "RANGE "(%3) \n\t"
|
||||
"movl %%ebx, "LOW "(%3) \n\t"
|
||||
:"=&a"(coeff_count), "+r"(significant_coeff_ctx_base), "+m"(index)
|
||||
:"r"(c), "m"(minusstart), "m"(end), "m"(minusindex), "m"(last_off)
|
||||
: "%"REG_c, "%ebx", "%edx", "%esi", "memory"
|
||||
"movl %5, %a11(%6) \n\t"
|
||||
"movl %3, %a12(%6) \n\t"
|
||||
:"=&q"(coeff_count), "+r"(significant_coeff_ctx_base), "+m"(index),
|
||||
"=&r"(low), "=&r"(bit), "=&r"(range)
|
||||
:"r"(c), "m"(minusstart), "m"(end), "m"(minusindex), "m"(last_off),
|
||||
"i"(offsetof(CABACContext, range)), "i"(offsetof(CABACContext, low)),
|
||||
"i"(offsetof(CABACContext, bytestream))
|
||||
: "%"REG_c, "memory"
|
||||
);
|
||||
return coeff_count;
|
||||
}
|
||||
@ -93,64 +102,70 @@ static int decode_significance_x86(CABACContext *c, int max_coeff,
|
||||
static int decode_significance_8x8_x86(CABACContext *c,
|
||||
uint8_t *significant_coeff_ctx_base,
|
||||
int *index, x86_reg last_off, const uint8_t *sig_off){
|
||||
int minusindex= 4-(int)index;
|
||||
int coeff_count;
|
||||
int minusindex= 4-(intptr_t)index;
|
||||
int bit;
|
||||
x86_reg coeff_count;
|
||||
int low;
|
||||
int range;
|
||||
x86_reg last=0;
|
||||
x86_reg state;
|
||||
__asm__ volatile(
|
||||
"movl "RANGE "(%3), %%esi \n\t"
|
||||
"movl "LOW "(%3), %%ebx \n\t"
|
||||
"movl %a12(%7), %5 \n\t"
|
||||
"movl %a13(%7), %3 \n\t"
|
||||
|
||||
"mov %1, %%"REG_D" \n\t"
|
||||
"mov %1, %6 \n\t"
|
||||
"2: \n\t"
|
||||
|
||||
"mov %6, %%"REG_a" \n\t"
|
||||
"movzbl (%%"REG_a", %%"REG_D"), %%edi \n\t"
|
||||
"add %5, %%"REG_D" \n\t"
|
||||
"mov %10, %0 \n\t"
|
||||
"movzbl (%0, %6), %k6 \n\t"
|
||||
"add %9, %6 \n\t"
|
||||
|
||||
BRANCHLESS_GET_CABAC("%%edx", "%3", "(%%"REG_D")", "%%ebx",
|
||||
"%%bx", "%%esi", "%%eax", "%%al")
|
||||
BRANCHLESS_GET_CABAC("%4", "%7", "(%6)", "%3",
|
||||
"%w3", "%5", "%k0", "%b0", "%a14")
|
||||
|
||||
"mov %1, %%edi \n\t"
|
||||
"test $1, %%edx \n\t"
|
||||
"mov %1, %k6 \n\t"
|
||||
"test $1, %4 \n\t"
|
||||
" jz 3f \n\t"
|
||||
|
||||
"movzbl "MANGLE(last_coeff_flag_offset_8x8)"(%%edi), %%edi\n\t"
|
||||
"add %5, %%"REG_D" \n\t"
|
||||
"add %7, %%"REG_D" \n\t"
|
||||
"movzbl "MANGLE(last_coeff_flag_offset_8x8)"(%k6), %k6\n\t"
|
||||
"add %9, %6 \n\t"
|
||||
"add %11, %6 \n\t"
|
||||
|
||||
BRANCHLESS_GET_CABAC("%%edx", "%3", "(%%"REG_D")", "%%ebx",
|
||||
"%%bx", "%%esi", "%%eax", "%%al")
|
||||
BRANCHLESS_GET_CABAC("%4", "%7", "(%6)", "%3",
|
||||
"%w3", "%5", "%k0", "%b0", "%a14")
|
||||
|
||||
"mov %2, %%"REG_a" \n\t"
|
||||
"mov %1, %%edi \n\t"
|
||||
"movl %%edi, (%%"REG_a") \n\t"
|
||||
"mov %2, %0 \n\t"
|
||||
"mov %1, %k6 \n\t"
|
||||
"movl %k6, (%0) \n\t"
|
||||
|
||||
"test $1, %%edx \n\t"
|
||||
"test $1, %4 \n\t"
|
||||
" jnz 4f \n\t"
|
||||
|
||||
"add $4, %%"REG_a" \n\t"
|
||||
"mov %%"REG_a", %2 \n\t"
|
||||
"add $4, %0 \n\t"
|
||||
"mov %0, %2 \n\t"
|
||||
|
||||
"3: \n\t"
|
||||
"addl $1, %%edi \n\t"
|
||||
"mov %%edi, %1 \n\t"
|
||||
"cmpl $63, %%edi \n\t"
|
||||
"addl $1, %k6 \n\t"
|
||||
"mov %k6, %1 \n\t"
|
||||
"cmpl $63, %k6 \n\t"
|
||||
" jb 2b \n\t"
|
||||
"mov %2, %%"REG_a" \n\t"
|
||||
"movl %%edi, (%%"REG_a") \n\t"
|
||||
"mov %2, %0 \n\t"
|
||||
"movl %k6, (%0) \n\t"
|
||||
"4: \n\t"
|
||||
"addl %4, %%eax \n\t"
|
||||
"shr $2, %%eax \n\t"
|
||||
"addl %8, %k0 \n\t"
|
||||
"shr $2, %k0 \n\t"
|
||||
|
||||
"movl %%esi, "RANGE "(%3) \n\t"
|
||||
"movl %%ebx, "LOW "(%3) \n\t"
|
||||
:"=&a"(coeff_count),"+m"(last), "+m"(index)
|
||||
:"r"(c), "m"(minusindex), "m"(significant_coeff_ctx_base), "m"(sig_off), "m"(last_off)
|
||||
: "%"REG_c, "%ebx", "%edx", "%esi", "%"REG_D, "memory"
|
||||
"movl %5, %a12(%7) \n\t"
|
||||
"movl %3, %a13(%7) \n\t"
|
||||
:"=&q"(coeff_count),"+m"(last), "+m"(index), "=&r"(low), "=&r"(bit),
|
||||
"=&r"(range), "=&r"(state)
|
||||
:"r"(c), "m"(minusindex), "m"(significant_coeff_ctx_base), "m"(sig_off), "m"(last_off),
|
||||
"i"(offsetof(CABACContext, range)), "i"(offsetof(CABACContext, low)),
|
||||
"i"(offsetof(CABACContext, bytestream))
|
||||
: "%"REG_c, "memory"
|
||||
);
|
||||
return coeff_count;
|
||||
}
|
||||
#endif /* ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE */
|
||||
/* !defined(BROKEN_RELOCATIONS) */
|
||||
#endif /* ARCH_X86 && HAVE_7REGS && !defined(BROKEN_RELOCATIONS) */
|
||||
|
||||
#endif /* AVCODEC_X86_H264_I386_H */
|
||||
|
@ -418,7 +418,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
|
||||
c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
|
||||
c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
|
||||
}
|
||||
if (mm_flags&AV_CPU_FLAG_AVX) {
|
||||
if (HAVE_AVX && mm_flags&AV_CPU_FLAG_AVX) {
|
||||
#if HAVE_ALIGNED_STACK
|
||||
c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
|
||||
c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
|
||||
|
@ -6,7 +6,7 @@ FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec
|
||||
FFLIBS-$(CONFIG_SCALE_FILTER) += swscale
|
||||
FFLIBS-$(CONFIG_MP_FILTER) += avcodec
|
||||
|
||||
HEADERS = avcodec.h avfilter.h avfiltergraph.h vsrc_buffer.h
|
||||
HEADERS = avcodec.h avfilter.h avfiltergraph.h vsink_buffer.h vsrc_buffer.h
|
||||
|
||||
OBJS = allfilters.o \
|
||||
avfilter.o \
|
||||
@ -38,7 +38,11 @@ OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
|
||||
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
|
||||
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
|
||||
OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o
|
||||
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
|
||||
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
|
||||
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
|
||||
OBJS-$(CONFIG_MP_FILTER) += vf_mp.o
|
||||
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o
|
||||
OBJS-$(CONFIG_NOFORMAT_FILTER) += vf_format.o
|
||||
OBJS-$(CONFIG_NULL_FILTER) += vf_null.o
|
||||
OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o
|
||||
@ -65,6 +69,7 @@ OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o
|
||||
OBJS-$(CONFIG_MOVIE_FILTER) += vsrc_movie.o
|
||||
OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_nullsrc.o
|
||||
|
||||
OBJS-$(CONFIG_BUFFERSINK_FILTER) += vsink_buffer.o
|
||||
OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o
|
||||
|
||||
|
||||
|
@ -54,7 +54,11 @@ void avfilter_register_all(void)
|
||||
REGISTER_FILTER (GRADFUN, gradfun, vf);
|
||||
REGISTER_FILTER (HFLIP, hflip, vf);
|
||||
REGISTER_FILTER (HQDN3D, hqdn3d, vf);
|
||||
REGISTER_FILTER (LUT, lut, vf);
|
||||
REGISTER_FILTER (LUTRGB, lutrgb, vf);
|
||||
REGISTER_FILTER (LUTYUV, lutyuv, vf);
|
||||
REGISTER_FILTER (MP, mp, vf);
|
||||
REGISTER_FILTER (NEGATE, negate, vf);
|
||||
REGISTER_FILTER (NOFORMAT, noformat, vf);
|
||||
REGISTER_FILTER (NULL, null, vf);
|
||||
REGISTER_FILTER (OCV, ocv, vf);
|
||||
@ -81,5 +85,6 @@ void avfilter_register_all(void)
|
||||
REGISTER_FILTER (MOVIE, movie, vsrc);
|
||||
REGISTER_FILTER (NULLSRC, nullsrc, vsrc);
|
||||
|
||||
REGISTER_FILTER (BUFFER, buffersink, vsink);
|
||||
REGISTER_FILTER (NULLSINK, nullsink, vsink);
|
||||
}
|
||||
|
@ -182,10 +182,11 @@ void avfilter_link_free(AVFilterLink **link)
|
||||
|
||||
av_freep(&picref->audio);
|
||||
av_freep(&picref->video);
|
||||
av_freep(&picref);
|
||||
av_freep(&(*link)->pool->pic[i]);
|
||||
}
|
||||
}
|
||||
av_freep(&(*link)->pool);
|
||||
(*link)->pool->count = 0;
|
||||
// av_freep(&(*link)->pool);
|
||||
}
|
||||
av_freep(link);
|
||||
}
|
||||
@ -217,6 +218,9 @@ int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
|
||||
if (link->out_formats)
|
||||
avfilter_formats_changeref(&link->out_formats,
|
||||
&filt->outputs[filt_dstpad_idx]->out_formats);
|
||||
if (link->out_chlayouts)
|
||||
avfilter_formats_changeref(&link->out_chlayouts,
|
||||
&filt->outputs[filt_dstpad_idx]->out_chlayouts);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#define LIBAVFILTER_VERSION_MAJOR 1
|
||||
#define LIBAVFILTER_VERSION_MINOR 79
|
||||
#define LIBAVFILTER_VERSION_MINOR 80
|
||||
#define LIBAVFILTER_VERSION_MICRO 0
|
||||
|
||||
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
||||
@ -223,7 +223,7 @@ void avfilter_unref_buffer(AVFilterBufferRef *ref);
|
||||
*/
|
||||
typedef struct AVFilterFormats {
|
||||
unsigned format_count; ///< number of formats
|
||||
int *formats; ///< list of media formats
|
||||
int64_t *formats; ///< list of media formats
|
||||
|
||||
unsigned refcount; ///< number of references to this list
|
||||
struct AVFilterFormats ***refs; ///< references to this list
|
||||
@ -238,6 +238,7 @@ typedef struct AVFilterFormats {
|
||||
* @return the format list, with no existing references
|
||||
*/
|
||||
AVFilterFormats *avfilter_make_format_list(const int *fmts);
|
||||
AVFilterFormats *avfilter_make_format64_list(const int64_t *fmts);
|
||||
|
||||
/**
|
||||
* Add fmt to the list of media formats contained in *avff.
|
||||
@ -247,13 +248,18 @@ AVFilterFormats *avfilter_make_format_list(const int *fmts);
|
||||
* @return a non negative value in case of success, or a negative
|
||||
* value corresponding to an AVERROR code in case of error
|
||||
*/
|
||||
int avfilter_add_format(AVFilterFormats **avff, int fmt);
|
||||
int avfilter_add_format(AVFilterFormats **avff, int64_t fmt);
|
||||
|
||||
/**
|
||||
* Return a list of all formats supported by FFmpeg for the given media type.
|
||||
*/
|
||||
AVFilterFormats *avfilter_all_formats(enum AVMediaType type);
|
||||
|
||||
/**
|
||||
* Return a list of all channel layouts supported by FFmpeg.
|
||||
*/
|
||||
AVFilterFormats *avfilter_all_channel_layouts(void);
|
||||
|
||||
/**
|
||||
* Return a format list which contains the intersection of the formats of
|
||||
* a and b. Also, all the references of a, all the references of b, and
|
||||
@ -465,11 +471,13 @@ AVFilterBufferRef *avfilter_default_get_audio_buffer(AVFilterLink *link, int per
|
||||
int64_t channel_layout, int planar);
|
||||
|
||||
/**
|
||||
* A helper for query_formats() which sets all links to the same list of
|
||||
* formats. If there are no links hooked to this filter, the list of formats is
|
||||
* freed.
|
||||
* Helpers for query_formats() which set all links to the same list of
|
||||
* formats/layouts. If there are no links hooked to this filter, the list
|
||||
* of formats is freed.
|
||||
*/
|
||||
void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats);
|
||||
void avfilter_set_common_pixel_formats(AVFilterContext *ctx, AVFilterFormats *formats);
|
||||
void avfilter_set_common_sample_formats(AVFilterContext *ctx, AVFilterFormats *formats);
|
||||
void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats *formats);
|
||||
|
||||
/** Default handler for query_formats() */
|
||||
int avfilter_default_query_formats(AVFilterContext *ctx);
|
||||
@ -520,9 +528,9 @@ typedef struct AVFilter {
|
||||
void (*uninit)(AVFilterContext *ctx);
|
||||
|
||||
/**
|
||||
* Queries formats supported by the filter and its pads, and sets the
|
||||
* in_formats for links connected to its output pads, and out_formats
|
||||
* for links connected to its input pads.
|
||||
* Queries formats/layouts supported by the filter and its pads, and sets
|
||||
* the in_formats/in_chlayouts for links connected to its output pads,
|
||||
* and out_formats/out_chlayouts for links connected to its input pads.
|
||||
*
|
||||
* @return zero on success, a negative value corresponding to an
|
||||
* AVERROR code otherwise
|
||||
@ -592,13 +600,18 @@ struct AVFilterLink {
|
||||
int format; ///< agreed upon media format
|
||||
|
||||
/**
|
||||
* Lists of formats supported by the input and output filters respectively.
|
||||
* These lists are used for negotiating the format to actually be used,
|
||||
* which will be loaded into the format member, above, when chosen.
|
||||
* Lists of formats and channel layouts supported by the input and output
|
||||
* filters respectively. These lists are used for negotiating the format
|
||||
* to actually be used, which will be loaded into the format and
|
||||
* channel_layout members, above, when chosen.
|
||||
*
|
||||
*/
|
||||
AVFilterFormats *in_formats;
|
||||
AVFilterFormats *out_formats;
|
||||
|
||||
AVFilterFormats *in_chlayouts;
|
||||
AVFilterFormats *out_chlayouts;
|
||||
|
||||
/**
|
||||
* The buffer reference currently being sent across the link by the source
|
||||
* filter. This is used internally by the filter system to allow
|
||||
|
@ -195,9 +195,16 @@ static void pick_format(AVFilterLink *link)
|
||||
|
||||
link->in_formats->format_count = 1;
|
||||
link->format = link->in_formats->formats[0];
|
||||
|
||||
avfilter_formats_unref(&link->in_formats);
|
||||
avfilter_formats_unref(&link->out_formats);
|
||||
|
||||
if (link->type == AVMEDIA_TYPE_AUDIO) {
|
||||
link->in_chlayouts->format_count = 1;
|
||||
link->channel_layout = link->in_chlayouts->formats[0];
|
||||
avfilter_formats_unref(&link->in_chlayouts);
|
||||
avfilter_formats_unref(&link->out_chlayouts);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void pick_formats(AVFilterGraph *graph)
|
||||
|
@ -197,45 +197,54 @@ int avfilter_default_config_output_link(AVFilterLink *link)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* A helper for query_formats() which sets all links to the same list of
|
||||
* formats. If there are no links hooked to this filter, the list of formats is
|
||||
* freed.
|
||||
*
|
||||
* FIXME: this will need changed for filters with a mix of pad types
|
||||
* (video + audio, etc)
|
||||
*/
|
||||
void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
|
||||
static void set_common_formats(AVFilterContext *ctx, AVFilterFormats *fmts,
|
||||
enum AVMediaType type, int offin, int offout)
|
||||
{
|
||||
int count = 0, i;
|
||||
int i;
|
||||
for (i = 0; i < ctx->input_count; i++)
|
||||
if (ctx->inputs[i] && ctx->inputs[i]->type == type)
|
||||
avfilter_formats_ref(fmts,
|
||||
(AVFilterFormats**)((void*)ctx->inputs[i]+offout));
|
||||
|
||||
for (i = 0; i < ctx->input_count; i++) {
|
||||
if (ctx->inputs[i]) {
|
||||
avfilter_formats_ref(formats, &ctx->inputs[i]->out_formats);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < ctx->output_count; i++) {
|
||||
if (ctx->outputs[i]) {
|
||||
avfilter_formats_ref(formats, &ctx->outputs[i]->in_formats);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < ctx->output_count; i++)
|
||||
if (ctx->outputs[i] && ctx->outputs[i]->type == type)
|
||||
avfilter_formats_ref(fmts,
|
||||
(AVFilterFormats**)((void*)ctx->outputs[i]+offin));
|
||||
|
||||
if (!count) {
|
||||
av_free(formats->formats);
|
||||
av_free(formats->refs);
|
||||
av_free(formats);
|
||||
if (!fmts->refcount) {
|
||||
av_free(fmts->formats);
|
||||
av_free(fmts->refs);
|
||||
av_free(fmts);
|
||||
}
|
||||
}
|
||||
|
||||
void avfilter_set_common_pixel_formats(AVFilterContext *ctx, AVFilterFormats *formats)
|
||||
{
|
||||
set_common_formats(ctx, formats, AVMEDIA_TYPE_VIDEO,
|
||||
offsetof(AVFilterLink, in_formats),
|
||||
offsetof(AVFilterLink, out_formats));
|
||||
}
|
||||
|
||||
void avfilter_set_common_sample_formats(AVFilterContext *ctx, AVFilterFormats *formats)
|
||||
{
|
||||
set_common_formats(ctx, formats, AVMEDIA_TYPE_AUDIO,
|
||||
offsetof(AVFilterLink, in_formats),
|
||||
offsetof(AVFilterLink, out_formats));
|
||||
}
|
||||
|
||||
void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats *formats)
|
||||
{
|
||||
set_common_formats(ctx, formats, AVMEDIA_TYPE_AUDIO,
|
||||
offsetof(AVFilterLink, in_chlayouts),
|
||||
offsetof(AVFilterLink, out_chlayouts));
|
||||
}
|
||||
|
||||
int avfilter_default_query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type :
|
||||
ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
|
||||
AVMEDIA_TYPE_VIDEO;
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_all_formats(AVMEDIA_TYPE_VIDEO));
|
||||
avfilter_set_common_sample_formats(ctx, avfilter_all_formats(AVMEDIA_TYPE_AUDIO));
|
||||
avfilter_set_common_channel_layouts(ctx, avfilter_all_channel_layouts());
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_all_formats(type));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
*/
|
||||
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/audioconvert.h"
|
||||
#include "avfilter.h"
|
||||
|
||||
/**
|
||||
@ -72,34 +73,50 @@ AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define MAKE_FORMAT_LIST() \
|
||||
AVFilterFormats *formats; \
|
||||
int count = 0; \
|
||||
if (fmts) \
|
||||
for (count = 0; fmts[count] != -1; count++) \
|
||||
; \
|
||||
formats = av_mallocz(sizeof(AVFilterFormats)); \
|
||||
if (!formats) return NULL; \
|
||||
formats->format_count = count; \
|
||||
if (count) { \
|
||||
formats->formats = av_malloc(sizeof(*formats->formats)*count); \
|
||||
if (!formats->formats) { \
|
||||
av_free(formats); \
|
||||
return NULL; \
|
||||
} \
|
||||
}
|
||||
|
||||
AVFilterFormats *avfilter_make_format_list(const int *fmts)
|
||||
{
|
||||
AVFilterFormats *formats;
|
||||
int count = 0;
|
||||
|
||||
if (fmts)
|
||||
for (count = 0; fmts[count] != -1; count++)
|
||||
;
|
||||
|
||||
formats = av_mallocz(sizeof(AVFilterFormats));
|
||||
formats->format_count = count;
|
||||
if (count) {
|
||||
formats->formats = av_malloc(sizeof(*formats->formats) * count);
|
||||
memcpy(formats->formats, fmts, sizeof(*formats->formats) * count);
|
||||
}
|
||||
MAKE_FORMAT_LIST();
|
||||
while (count--)
|
||||
formats->formats[count] = fmts[count];
|
||||
|
||||
return formats;
|
||||
}
|
||||
|
||||
int avfilter_add_format(AVFilterFormats **avff, int fmt)
|
||||
AVFilterFormats *avfilter_make_format64_list(const int64_t *fmts)
|
||||
{
|
||||
int *fmts;
|
||||
MAKE_FORMAT_LIST();
|
||||
if (count)
|
||||
memcpy(formats->formats, fmts, sizeof(*formats->formats) * count);
|
||||
|
||||
return formats;
|
||||
}
|
||||
|
||||
int avfilter_add_format(AVFilterFormats **avff, int64_t fmt)
|
||||
{
|
||||
int64_t *fmts;
|
||||
|
||||
if (!(*avff) && !(*avff = av_mallocz(sizeof(AVFilterFormats))))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
fmts = av_realloc((*avff)->formats,
|
||||
sizeof((*avff)->formats) * ((*avff)->format_count+1));
|
||||
sizeof(*(*avff)->formats) * ((*avff)->format_count+1));
|
||||
if (!fmts)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@ -123,6 +140,27 @@ AVFilterFormats *avfilter_all_formats(enum AVMediaType type)
|
||||
return ret;
|
||||
}
|
||||
|
||||
AVFilterFormats *avfilter_all_channel_layouts(void)
|
||||
{
|
||||
static int64_t chlayouts[] = {
|
||||
AV_CH_LAYOUT_MONO,
|
||||
AV_CH_LAYOUT_STEREO,
|
||||
AV_CH_LAYOUT_4POINT0,
|
||||
AV_CH_LAYOUT_QUAD,
|
||||
AV_CH_LAYOUT_5POINT0,
|
||||
AV_CH_LAYOUT_5POINT0_BACK,
|
||||
AV_CH_LAYOUT_5POINT1,
|
||||
AV_CH_LAYOUT_5POINT1_BACK,
|
||||
AV_CH_LAYOUT_5POINT1|AV_CH_LAYOUT_STEREO_DOWNMIX,
|
||||
AV_CH_LAYOUT_7POINT1,
|
||||
AV_CH_LAYOUT_7POINT1_WIDE,
|
||||
AV_CH_LAYOUT_7POINT1|AV_CH_LAYOUT_STEREO_DOWNMIX,
|
||||
-1,
|
||||
};
|
||||
|
||||
return avfilter_make_format64_list(chlayouts);
|
||||
}
|
||||
|
||||
void avfilter_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
|
||||
{
|
||||
*ref = f;
|
||||
|
@ -387,7 +387,7 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (*open_inputs && !strcmp((*open_inputs)->name, "out") && curr_inputs) {
|
||||
if (open_inputs && *open_inputs && !strcmp((*open_inputs)->name, "out") && curr_inputs) {
|
||||
/* Last output can be omitted if it is "[out]" */
|
||||
const char *tmp = "[out]";
|
||||
if ((ret = parse_outputs(&tmp, &curr_inputs, open_inputs, open_outputs,
|
||||
|
@ -44,7 +44,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -334,7 +334,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ static AVFilterFormats *make_format_list(FormatContext *format, int flag)
|
||||
#if CONFIG_FORMAT_FILTER
|
||||
static int query_formats_format(AVFilterContext *ctx)
|
||||
{
|
||||
avfilter_set_common_formats(ctx, make_format_list(ctx->priv, 1));
|
||||
avfilter_set_common_pixel_formats(ctx, make_format_list(ctx->priv, 1));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -118,7 +118,7 @@ AVFilter avfilter_vf_format = {
|
||||
#if CONFIG_NOFORMAT_FILTER
|
||||
static int query_formats_noformat(AVFilterContext *ctx)
|
||||
{
|
||||
avfilter_set_common_formats(ctx, make_format_list(ctx->priv, 0));
|
||||
avfilter_set_common_pixel_formats(ctx, make_format_list(ctx->priv, 0));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -332,7 +332,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
if (!formats)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
avfilter_set_common_formats(ctx, formats);
|
||||
avfilter_set_common_pixel_formats(ctx, formats);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -160,7 +160,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -268,7 +268,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_BGR24, PIX_FMT_BGRA, PIX_FMT_GRAY8, PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
391
libavfilter/vf_lut.c
Normal file
391
libavfilter/vf_lut.c
Normal file
@ -0,0 +1,391 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Compute a look-up table for binding the input value to the output
|
||||
* value, and apply it to input video.
|
||||
*/
|
||||
|
||||
#include "libavutil/eval.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "avfilter.h"
|
||||
|
||||
static const char *var_names[] = {
|
||||
"E",
|
||||
"PHI",
|
||||
"PI",
|
||||
"w", ///< width of the input video
|
||||
"h", ///< height of the input video
|
||||
"val", ///< input value for the pixel
|
||||
"maxval", ///< max value for the pixel
|
||||
"minval", ///< min value for the pixel
|
||||
"negval", ///< negated value
|
||||
"clipval",
|
||||
NULL
|
||||
};
|
||||
|
||||
enum var_name {
|
||||
VAR_E,
|
||||
VAR_PHI,
|
||||
VAR_PI,
|
||||
VAR_W,
|
||||
VAR_H,
|
||||
VAR_VAL,
|
||||
VAR_MAXVAL,
|
||||
VAR_MINVAL,
|
||||
VAR_NEGVAL,
|
||||
VAR_CLIPVAL,
|
||||
VAR_VARS_NB
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
uint8_t lut[4][256]; ///< lookup table for each component
|
||||
char *comp_expr_str[4];
|
||||
AVExpr *comp_expr[4];
|
||||
int hsub, vsub;
|
||||
double var_values[VAR_VARS_NB];
|
||||
int is_rgb, is_yuv;
|
||||
int rgba_map[4];
|
||||
int step;
|
||||
int negate_alpha; /* only used by negate */
|
||||
} LutContext;
|
||||
|
||||
#define Y 0
|
||||
#define U 1
|
||||
#define V 2
|
||||
#define R 0
|
||||
#define G 1
|
||||
#define B 2
|
||||
#define A 3
|
||||
|
||||
#define OFFSET(x) offsetof(LutContext, x)
|
||||
|
||||
static const AVOption lut_options[] = {
|
||||
{"c0", "set component #0 expression", OFFSET(comp_expr_str[0]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"c1", "set component #1 expression", OFFSET(comp_expr_str[1]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"c2", "set component #2 expression", OFFSET(comp_expr_str[2]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"c3", "set component #3 expression", OFFSET(comp_expr_str[3]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"y", "set Y expression", OFFSET(comp_expr_str[Y]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"u", "set U expression", OFFSET(comp_expr_str[U]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"v", "set V expression", OFFSET(comp_expr_str[V]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"r", "set R expression", OFFSET(comp_expr_str[R]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"g", "set G expression", OFFSET(comp_expr_str[G]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"b", "set B expression", OFFSET(comp_expr_str[B]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{"a", "set A expression", OFFSET(comp_expr_str[A]), FF_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX},
|
||||
{NULL},
|
||||
};
|
||||
|
||||
static const char *lut_get_name(void *ctx)
|
||||
{
|
||||
return "lut";
|
||||
}
|
||||
|
||||
static const AVClass lut_class = {
|
||||
"LutContext",
|
||||
lut_get_name,
|
||||
lut_options
|
||||
};
|
||||
|
||||
static int init(AVFilterContext *ctx, const char *args, void *opaque)
|
||||
{
|
||||
LutContext *lut = ctx->priv;
|
||||
int ret;
|
||||
|
||||
lut->class = &lut_class;
|
||||
av_opt_set_defaults2(lut, 0, 0);
|
||||
|
||||
lut->var_values[VAR_PHI] = M_PHI;
|
||||
lut->var_values[VAR_PI] = M_PI;
|
||||
lut->var_values[VAR_E ] = M_E;
|
||||
|
||||
lut->is_rgb = !strcmp(ctx->filter->name, "lutrgb");
|
||||
lut->is_yuv = !strcmp(ctx->filter->name, "lutyuv");
|
||||
if (args && (ret = av_set_options_string(lut, args, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
LutContext *lut = ctx->priv;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
av_expr_free(lut->comp_expr[i]);
|
||||
lut->comp_expr[i] = NULL;
|
||||
av_freep(&lut->comp_expr_str[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#define YUV_FORMATS \
|
||||
PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, \
|
||||
PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_YUV440P, \
|
||||
PIX_FMT_YUVA420P, \
|
||||
PIX_FMT_YUVJ444P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ420P, \
|
||||
PIX_FMT_YUVJ440P
|
||||
|
||||
#define RGB_FORMATS \
|
||||
PIX_FMT_ARGB, PIX_FMT_RGBA, \
|
||||
PIX_FMT_ABGR, PIX_FMT_BGRA, \
|
||||
PIX_FMT_RGB24, PIX_FMT_BGR24
|
||||
|
||||
static enum PixelFormat yuv_pix_fmts[] = { YUV_FORMATS, PIX_FMT_NONE };
|
||||
static enum PixelFormat rgb_pix_fmts[] = { RGB_FORMATS, PIX_FMT_NONE };
|
||||
static enum PixelFormat all_pix_fmts[] = { RGB_FORMATS, YUV_FORMATS, PIX_FMT_NONE };
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
LutContext *lut = ctx->priv;
|
||||
|
||||
enum PixelFormat *pix_fmts = lut->is_rgb ? rgb_pix_fmts :
|
||||
lut->is_yuv ? yuv_pix_fmts : all_pix_fmts;
|
||||
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pix_fmt_is_in(enum PixelFormat pix_fmt, enum PixelFormat *pix_fmts)
|
||||
{
|
||||
enum PixelFormat *p;
|
||||
for (p = pix_fmts; *p != PIX_FMT_NONE; p++) {
|
||||
if (pix_fmt == *p)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clip value val in the minval - maxval range.
|
||||
*/
|
||||
static double clip(void *opaque, double val)
|
||||
{
|
||||
LutContext *lut = opaque;
|
||||
double minval = lut->var_values[VAR_MINVAL];
|
||||
double maxval = lut->var_values[VAR_MAXVAL];
|
||||
|
||||
return av_clip(val, minval, maxval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute gamma correction for value val, assuming the minval-maxval
|
||||
* range, val is clipped to a value contained in the same interval.
|
||||
*/
|
||||
static double compute_gammaval(void *opaque, double gamma)
|
||||
{
|
||||
LutContext *lut = opaque;
|
||||
double val = lut->var_values[VAR_CLIPVAL];
|
||||
double minval = lut->var_values[VAR_MINVAL];
|
||||
double maxval = lut->var_values[VAR_MAXVAL];
|
||||
|
||||
return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
|
||||
}
|
||||
|
||||
static double (* const funcs1[])(void *, double) = {
|
||||
(void *)clip,
|
||||
(void *)compute_gammaval,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const char * const funcs1_names[] = {
|
||||
"clip",
|
||||
"gammaval",
|
||||
NULL
|
||||
};
|
||||
|
||||
static int config_props(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
LutContext *lut = ctx->priv;
|
||||
const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
int min[4], max[4];
|
||||
int val, comp, ret;
|
||||
|
||||
lut->hsub = desc->log2_chroma_w;
|
||||
lut->vsub = desc->log2_chroma_h;
|
||||
|
||||
lut->var_values[VAR_W] = inlink->w;
|
||||
lut->var_values[VAR_H] = inlink->h;
|
||||
|
||||
switch (inlink->format) {
|
||||
case PIX_FMT_YUV410P:
|
||||
case PIX_FMT_YUV411P:
|
||||
case PIX_FMT_YUV420P:
|
||||
case PIX_FMT_YUV422P:
|
||||
case PIX_FMT_YUV440P:
|
||||
case PIX_FMT_YUV444P:
|
||||
case PIX_FMT_YUVA420P:
|
||||
min[Y] = min[U] = min[V] = 16;
|
||||
max[Y] = 235;
|
||||
max[U] = max[V] = 240;
|
||||
break;
|
||||
default:
|
||||
min[0] = min[1] = min[2] = min[3] = 0;
|
||||
max[0] = max[1] = max[2] = max[3] = 255;
|
||||
}
|
||||
|
||||
lut->is_yuv = lut->is_rgb = 0;
|
||||
if (pix_fmt_is_in(inlink->format, yuv_pix_fmts)) lut->is_yuv = 1;
|
||||
else if (pix_fmt_is_in(inlink->format, rgb_pix_fmts)) lut->is_rgb = 1;
|
||||
|
||||
if (lut->is_rgb) {
|
||||
switch (inlink->format) {
|
||||
case PIX_FMT_ARGB: lut->rgba_map[A] = 0; lut->rgba_map[R] = 1; lut->rgba_map[G] = 2; lut->rgba_map[B] = 3; break;
|
||||
case PIX_FMT_ABGR: lut->rgba_map[A] = 0; lut->rgba_map[B] = 1; lut->rgba_map[G] = 2; lut->rgba_map[R] = 3; break;
|
||||
case PIX_FMT_RGBA:
|
||||
case PIX_FMT_RGB24: lut->rgba_map[R] = 0; lut->rgba_map[G] = 1; lut->rgba_map[B] = 2; lut->rgba_map[A] = 3; break;
|
||||
case PIX_FMT_BGRA:
|
||||
case PIX_FMT_BGR24: lut->rgba_map[B] = 0; lut->rgba_map[G] = 1; lut->rgba_map[R] = 2; lut->rgba_map[A] = 3; break;
|
||||
}
|
||||
lut->step = av_get_bits_per_pixel(desc) >> 3;
|
||||
}
|
||||
|
||||
for (comp = 0; comp < desc->nb_components; comp++) {
|
||||
double res;
|
||||
|
||||
/* create the parsed expression */
|
||||
ret = av_expr_parse(&lut->comp_expr[comp], lut->comp_expr_str[comp],
|
||||
var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx);
|
||||
if (ret < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Error when parsing the expression '%s' for the component %d.\n",
|
||||
lut->comp_expr_str[comp], comp);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* compute the lut */
|
||||
lut->var_values[VAR_MAXVAL] = max[comp];
|
||||
lut->var_values[VAR_MINVAL] = min[comp];
|
||||
|
||||
for (val = 0; val < 256; val++) {
|
||||
lut->var_values[VAR_VAL] = val;
|
||||
lut->var_values[VAR_CLIPVAL] = av_clip(val, min[comp], max[comp]);
|
||||
lut->var_values[VAR_NEGVAL] =
|
||||
av_clip(min[comp] + max[comp] - lut->var_values[VAR_VAL],
|
||||
min[comp], max[comp]);
|
||||
|
||||
res = av_expr_eval(lut->comp_expr[comp], lut->var_values, lut);
|
||||
if (isnan(res)) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Error when evaluating the expression '%s' for the value %d for the component #%d.\n",
|
||||
lut->comp_expr_str[comp], val, comp);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
lut->lut[comp][val] = av_clip((int)res, min[comp], max[comp]);
|
||||
av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, lut->lut[comp][val]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
|
||||
{
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
LutContext *lut = ctx->priv;
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
AVFilterBufferRef *inpic = inlink ->cur_buf;
|
||||
AVFilterBufferRef *outpic = outlink->out_buf;
|
||||
uint8_t *inrow, *outrow;
|
||||
int i, j, k, plane;
|
||||
|
||||
if (lut->is_rgb) {
|
||||
/* packed */
|
||||
inrow = inpic ->data[0] + y * inpic ->linesize[0];
|
||||
outrow = outpic->data[0] + y * outpic->linesize[0];
|
||||
|
||||
for (i = 0; i < h; i ++) {
|
||||
for (j = 0; j < inlink->w; j++) {
|
||||
for (k = 0; k < lut->step; k++)
|
||||
outrow[k] = lut->lut[lut->rgba_map[k]][inrow[k]];
|
||||
outrow += lut->step;
|
||||
inrow += lut->step;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* planar */
|
||||
for (plane = 0; inpic->data[plane]; plane++) {
|
||||
int vsub = plane == 1 || plane == 2 ? lut->vsub : 0;
|
||||
int hsub = plane == 1 || plane == 2 ? lut->hsub : 0;
|
||||
|
||||
inrow = inpic ->data[plane] + (y>>vsub) * inpic ->linesize[plane];
|
||||
outrow = outpic->data[plane] + (y>>vsub) * outpic->linesize[plane];
|
||||
|
||||
for (i = 0; i < h>>vsub; i ++) {
|
||||
for (j = 0; j < inlink->w>>hsub; j++)
|
||||
outrow[j] = lut->lut[plane][inrow[j]];
|
||||
inrow += inpic ->linesize[plane];
|
||||
outrow += outpic->linesize[plane];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
avfilter_draw_slice(outlink, y, h, slice_dir);
|
||||
}
|
||||
|
||||
#define DEFINE_LUT_FILTER(name_, description_, init_) \
|
||||
AVFilter avfilter_vf_##name_ = { \
|
||||
.name = NULL_IF_CONFIG_SMALL(#name_), \
|
||||
.description = description_, \
|
||||
.priv_size = sizeof(LutContext), \
|
||||
\
|
||||
.init = init_, \
|
||||
.uninit = uninit, \
|
||||
.query_formats = query_formats, \
|
||||
\
|
||||
.inputs = (AVFilterPad[]) {{ .name = "default", \
|
||||
.type = AVMEDIA_TYPE_VIDEO, \
|
||||
.draw_slice = draw_slice, \
|
||||
.config_props = config_props, \
|
||||
.min_perms = AV_PERM_READ, }, \
|
||||
{ .name = NULL}}, \
|
||||
.outputs = (AVFilterPad[]) {{ .name = "default", \
|
||||
.type = AVMEDIA_TYPE_VIDEO, }, \
|
||||
{ .name = NULL}}, \
|
||||
}
|
||||
|
||||
DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.", init);
|
||||
DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.", init);
|
||||
DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.", init);
|
||||
|
||||
#if CONFIG_NEGATE_FILTER
|
||||
|
||||
static int negate_init(AVFilterContext *ctx, const char *args, void *opaque)
|
||||
{
|
||||
LutContext *lut = ctx->priv;
|
||||
char lut_params[1024];
|
||||
|
||||
if (args)
|
||||
sscanf(args, "%d", &lut->negate_alpha);
|
||||
|
||||
av_log(ctx, AV_LOG_INFO, "negate_alpha:%d\n", lut->negate_alpha);
|
||||
|
||||
snprintf(lut_params, sizeof(lut_params), "c0=negval:c1=negval:c2=negval:a=%s",
|
||||
lut->negate_alpha ? "negval" : "val");
|
||||
|
||||
return init(ctx, lut_params, opaque);
|
||||
}
|
||||
|
||||
DEFINE_LUT_FILTER(negate, "Negate input video.", negate_init);
|
||||
|
||||
#endif
|
@ -796,7 +796,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
}
|
||||
|
||||
//We assume all allowed input formats are also allowed output formats
|
||||
avfilter_set_common_formats(ctx, avfmts);
|
||||
avfilter_set_common_pixel_formats(ctx, avfmts);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
/**
|
||||
* @file
|
||||
* video padding filter and color source
|
||||
* video padding filter
|
||||
*/
|
||||
|
||||
#include "avfilter.h"
|
||||
@ -83,7 +83,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_YUVJ444P, PIX_FMT_YUVJ440P, PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
111
libavfilter/vsink_buffer.c
Normal file
111
libavfilter/vsink_buffer.c
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* buffer video sink
|
||||
*/
|
||||
|
||||
#include "avfilter.h"
|
||||
#include "vsink_buffer.h"
|
||||
|
||||
typedef struct {
|
||||
AVFilterBufferRef *picref; ///< cached picref
|
||||
enum PixelFormat *pix_fmts; ///< accepted pixel formats, must be terminated with -1
|
||||
} BufferSinkContext;
|
||||
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
|
||||
{
|
||||
BufferSinkContext *buf = ctx->priv;
|
||||
|
||||
if (!opaque) {
|
||||
av_log(ctx, AV_LOG_ERROR, "No opaque field provided, which is required.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
buf->pix_fmts = opaque;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
BufferSinkContext *buf = ctx->priv;
|
||||
|
||||
if (buf->picref)
|
||||
avfilter_unref_buffer(buf->picref);
|
||||
buf->picref = NULL;
|
||||
}
|
||||
|
||||
static void end_frame(AVFilterLink *inlink)
|
||||
{
|
||||
BufferSinkContext *buf = inlink->dst->priv;
|
||||
|
||||
if (buf->picref) /* drop the last cached frame */
|
||||
avfilter_unref_buffer(buf->picref);
|
||||
buf->picref = inlink->cur_buf;
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
BufferSinkContext *buf = ctx->priv;
|
||||
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(buf->pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx,
|
||||
AVFilterBufferRef **picref, int flags)
|
||||
{
|
||||
BufferSinkContext *buf = ctx->priv;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
int ret;
|
||||
*picref = NULL;
|
||||
|
||||
/* no picref available, fetch it from the filterchain */
|
||||
if (!buf->picref) {
|
||||
if ((ret = avfilter_request_frame(inlink)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!buf->picref)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
*picref = buf->picref;
|
||||
if (!(flags & AV_VSINK_BUF_FLAG_PEEK))
|
||||
buf->picref = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVFilter avfilter_vsink_buffersink = {
|
||||
.name = "buffersink",
|
||||
.priv_size = sizeof(BufferSinkContext),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = (AVFilterPad[]) {{ .name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.end_frame = end_frame,
|
||||
.min_perms = AV_PERM_READ, },
|
||||
{ .name = NULL }},
|
||||
.outputs = (AVFilterPad[]) {{ .name = NULL }},
|
||||
};
|
47
libavfilter/vsink_buffer.h
Normal file
47
libavfilter/vsink_buffer.h
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVFILTER_VSINK_BUFFER_H
|
||||
#define AVFILTER_VSINK_BUFFER_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* memory buffer sink API for video
|
||||
*/
|
||||
|
||||
#include "avfilter.h"
|
||||
|
||||
/**
|
||||
* Tell av_vsink_buffer_get_video_buffer_ref() to read the picref, but not
|
||||
* remove it from the buffer. This is useful if you need only to read
|
||||
* the picref, without to fetch it.
|
||||
*/
|
||||
#define AV_VSINK_BUF_FLAG_PEEK 1
|
||||
|
||||
/**
|
||||
* Get a video buffer data from buffer_sink and put it in picref.
|
||||
*
|
||||
* @param buffer_sink pointer to a buffer sink context
|
||||
* @param flags a combination of AV_VSINK_BUF_FLAG_* flags
|
||||
* @return >= 0 in case of success, a negative AVERROR code in case of
|
||||
* failure
|
||||
*/
|
||||
int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *buffer_sink,
|
||||
AVFilterBufferRef **picref, int flags);
|
||||
|
||||
#endif /* AVFILTER_VSINK_BUFFER_H */
|
@ -166,7 +166,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
BufferSourceContext *c = ctx->priv;
|
||||
enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,11 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* color source
|
||||
*/
|
||||
|
||||
#include "avfilter.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/colorspace.h"
|
||||
@ -94,7 +99,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
PIX_FMT_NONE
|
||||
};
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -124,6 +129,7 @@ static int color_config_props(AVFilterLink *inlink)
|
||||
is_packed_rgba ? "rgba" : "yuva");
|
||||
inlink->w = color->w;
|
||||
inlink->h = color->h;
|
||||
inlink->time_base = color->time_base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -133,8 +139,8 @@ static int color_request_frame(AVFilterLink *link)
|
||||
ColorContext *color = link->src->priv;
|
||||
AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
|
||||
picref->video->sample_aspect_ratio = (AVRational) {1, 1};
|
||||
picref->pts = av_rescale_q(color->pts++, color->time_base, AV_TIME_BASE_Q);
|
||||
picref->pos = 0;
|
||||
picref->pts = color->pts++;
|
||||
picref->pos = -1;
|
||||
|
||||
avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
|
||||
ff_draw_rectangle(picref->data, picref->linesize,
|
||||
|
@ -92,9 +92,9 @@ static int movie_init(AVFilterContext *ctx)
|
||||
iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL;
|
||||
|
||||
movie->format_ctx = NULL;
|
||||
if ((ret = av_open_input_file(&movie->format_ctx, movie->file_name, iformat, 0, NULL)) < 0) {
|
||||
if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Failed to av_open_input_file '%s'\n", movie->file_name);
|
||||
"Failed to avformat_open_input '%s'\n", movie->file_name);
|
||||
return ret;
|
||||
}
|
||||
if ((ret = av_find_stream_info(movie->format_ctx)) < 0)
|
||||
@ -203,7 +203,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
MovieContext *movie = ctx->priv;
|
||||
enum PixelFormat pix_fmts[] = { movie->codec_ctx->pix_fmt, PIX_FMT_NONE };
|
||||
|
||||
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -475,6 +475,11 @@ static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
if (v->n_segments == 0)
|
||||
continue;
|
||||
|
||||
if (!(v->ctx = avformat_alloc_context())) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
v->index = i;
|
||||
v->needed = 1;
|
||||
v->parent = s;
|
||||
@ -493,8 +498,8 @@ static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
NULL, 0, 0);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
ret = av_open_input_stream(&v->ctx, &v->pb, v->segments[0]->url,
|
||||
in_fmt, NULL);
|
||||
v->ctx->pb = &v->pb;
|
||||
ret = avformat_open_input(&v->ctx, v->segments[0]->url, in_fmt, NULL);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
v->stream_offset = stream_offset;
|
||||
|
@ -261,8 +261,8 @@ typedef struct AVFormatParameters {
|
||||
attribute_deprecated unsigned int mpeg2ts_compute_pcr:1;
|
||||
attribute_deprecated unsigned int initial_pause:1; /**< Do not begin to play the stream
|
||||
immediately (RTSP only). */
|
||||
attribute_deprecated unsigned int prealloced_context:1;
|
||||
#endif
|
||||
unsigned int prealloced_context:1;
|
||||
#if FF_API_PARAMETERS_CODEC_ID
|
||||
attribute_deprecated enum CodecID video_codec_id;
|
||||
attribute_deprecated enum CodecID audio_codec_id;
|
||||
@ -814,10 +814,12 @@ typedef struct AVFormatContext {
|
||||
#if FF_API_FLAG_RTP_HINT
|
||||
#define AVFMT_FLAG_RTP_HINT 0x0040 ///< Deprecated, use the -movflags rtphint muxer specific AVOption instead
|
||||
#endif
|
||||
#define AVFMT_FLAG_MP4A_LATM 0x0080 ///< Enable RTP MP4A-LATM payload
|
||||
#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it.
|
||||
#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload
|
||||
#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)
|
||||
#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)
|
||||
#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Dont merge side data but keep it seperate.
|
||||
|
||||
int loop_input;
|
||||
|
||||
/**
|
||||
@ -1134,11 +1136,13 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
|
||||
const char *filename, void *logctx,
|
||||
unsigned int offset, unsigned int max_probe_size);
|
||||
|
||||
#if FF_API_FORMAT_PARAMETERS
|
||||
/**
|
||||
* Allocate all the structures needed to read an input stream.
|
||||
* This does not open the needed codecs for decoding the stream[s].
|
||||
* @deprecated use avformat_open_input instead.
|
||||
*/
|
||||
int av_open_input_stream(AVFormatContext **ic_ptr,
|
||||
attribute_deprecated int av_open_input_stream(AVFormatContext **ic_ptr,
|
||||
AVIOContext *pb, const char *filename,
|
||||
AVInputFormat *fmt, AVFormatParameters *ap);
|
||||
|
||||
@ -1153,11 +1157,35 @@ int av_open_input_stream(AVFormatContext **ic_ptr,
|
||||
* @param ap Additional parameters needed when opening the file
|
||||
* (NULL if default).
|
||||
* @return 0 if OK, AVERROR_xxx otherwise
|
||||
*
|
||||
* @deprecated use avformat_open_input instead.
|
||||
*/
|
||||
int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
|
||||
attribute_deprecated int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
|
||||
AVInputFormat *fmt,
|
||||
int buf_size,
|
||||
AVFormatParameters *ap);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Open an input stream and read the header. The codecs are not opened.
|
||||
* The stream must be closed with av_close_input_file().
|
||||
*
|
||||
* @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context).
|
||||
* May be a pointer to NULL, in which case an AVFormatContext is allocated by this
|
||||
* function and written into ps.
|
||||
* Note that a user-supplied AVFormatContext will be freed on failure.
|
||||
* @param filename Name of the stream to open.
|
||||
* @param fmt If non-NULL, this parameter forces a specific input format.
|
||||
* Otherwise the format is autodetected.
|
||||
* @param options A dictionary filled with AVFormatContext and demuxer-private options.
|
||||
* On return this parameter will be destroyed and replaced with a dict containing
|
||||
* options that were not found. May be NULL.
|
||||
*
|
||||
* @return 0 on success, a negative AVERROR on failure.
|
||||
*
|
||||
* @note If you want to use custom IO, preallocate the format context and set its pb field.
|
||||
*/
|
||||
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);
|
||||
|
||||
#if FF_API_ALLOC_FORMAT_CONTEXT
|
||||
/**
|
||||
@ -1451,7 +1479,12 @@ int64_t av_gen_search(AVFormatContext *s, int stream_index,
|
||||
/**
|
||||
* media file output
|
||||
*/
|
||||
#if FF_API_FORMAT_PARAMETERS
|
||||
/**
|
||||
* @deprecated pass the options to avformat_write_header directly.
|
||||
*/
|
||||
attribute_deprecated int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Split a URL string into components.
|
||||
@ -1479,6 +1512,24 @@ void av_url_split(char *proto, int proto_size,
|
||||
char *path, int path_size,
|
||||
const char *url);
|
||||
|
||||
/**
|
||||
* Allocate the stream private data and write the stream header to
|
||||
* an output media file.
|
||||
*
|
||||
* @param s Media file handle, must be allocated with avformat_alloc_context().
|
||||
* Its oformat field must be set to the desired output format;
|
||||
* Its pb field must be set to an already openened AVIOContext.
|
||||
* @param options An AVDictionary filled with AVFormatContext and muxer-private options.
|
||||
* On return this parameter will be destroyed and replaced with a dict containing
|
||||
* options that were not found. May be NULL.
|
||||
*
|
||||
* @return 0 on success, negative AVERROR on failure.
|
||||
*
|
||||
* @see av_opt_find, av_dict_set, avio_open, av_oformat_next.
|
||||
*/
|
||||
int avformat_write_header(AVFormatContext *s, AVDictionary **options);
|
||||
|
||||
#if FF_API_FORMAT_PARAMETERS
|
||||
/**
|
||||
* Allocate the stream private data and write the stream header to an
|
||||
* output media file.
|
||||
@ -1487,8 +1538,11 @@ void av_url_split(char *proto, int proto_size,
|
||||
*
|
||||
* @param s media file handle
|
||||
* @return 0 if OK, AVERROR_xxx on error
|
||||
*
|
||||
* @deprecated use avformat_write_header.
|
||||
*/
|
||||
int av_write_header(AVFormatContext *s);
|
||||
attribute_deprecated int av_write_header(AVFormatContext *s);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Write a packet to an output media file.
|
||||
|
@ -554,8 +554,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
codec_type = AVMEDIA_TYPE_DATA;
|
||||
break;
|
||||
default:
|
||||
av_log(s, AV_LOG_ERROR, "unknown stream type %X\n", tag1);
|
||||
goto fail;
|
||||
av_log(s, AV_LOG_INFO, "unknown stream type %X\n", tag1);
|
||||
}
|
||||
if(ast->sample_size == 0)
|
||||
st->duration = st->nb_frames;
|
||||
@ -797,7 +796,11 @@ static int read_gab2_sub(AVStream *st, AVPacket *pkt) {
|
||||
if (!(sub_demuxer = av_probe_input_format2(&pd, 1, &score)))
|
||||
goto error;
|
||||
|
||||
if (!av_open_input_stream(&ast->sub_ctx, pb, "", sub_demuxer, NULL)) {
|
||||
if (!(ast->sub_ctx = avformat_alloc_context()))
|
||||
goto error;
|
||||
|
||||
ast->sub_ctx->pb = pb;
|
||||
if (!avformat_open_input(&ast->sub_ctx, "", sub_demuxer, NULL)) {
|
||||
av_read_packet(ast->sub_ctx, &ast->sub_pkt);
|
||||
*st->codec = *ast->sub_ctx->streams[0]->codec;
|
||||
ast->sub_ctx->streams[0]->codec->extradata = NULL;
|
||||
@ -1349,7 +1352,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
|
||||
index = av_index_search_timestamp(
|
||||
st2,
|
||||
av_rescale_q(timestamp, st->time_base, st2->time_base) * FFMAX(ast2->sample_size, 1),
|
||||
flags | AVSEEK_FLAG_BACKWARD);
|
||||
flags | AVSEEK_FLAG_BACKWARD | (st2->codec->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0));
|
||||
if(index<0)
|
||||
index=0;
|
||||
ast2->seek_pos= st2->index_entries[index].pos;
|
||||
@ -1365,7 +1368,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
|
||||
index = av_index_search_timestamp(
|
||||
st2,
|
||||
av_rescale_q(timestamp, st->time_base, st2->time_base) * FFMAX(ast2->sample_size, 1),
|
||||
flags | AVSEEK_FLAG_BACKWARD);
|
||||
flags | AVSEEK_FLAG_BACKWARD | (st2->codec->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0));
|
||||
if(index<0)
|
||||
index=0;
|
||||
while(index>0 && st2->index_entries[index-1].pos >= pos_min)
|
||||
@ -1391,7 +1394,7 @@ static int avi_read_close(AVFormatContext *s)
|
||||
if (ast) {
|
||||
if (ast->sub_ctx) {
|
||||
av_freep(&ast->sub_ctx->pb);
|
||||
av_close_input_stream(ast->sub_ctx);
|
||||
av_close_input_file(ast->sub_ctx);
|
||||
}
|
||||
av_free(ast->sub_buffer);
|
||||
av_free_packet(&ast->sub_pkt);
|
||||
|
@ -134,13 +134,15 @@ static int read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
if (!ast)
|
||||
return AVERROR(ENOMEM);
|
||||
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
ast->codec->codec_tag = vst->codec->codec_tag;
|
||||
ast->codec->sample_rate = avio_rl16(pb);
|
||||
av_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
|
||||
flags = avio_rl16(pb);
|
||||
ast->codec->codec_id = flags & BINK_AUD_USEDCT ?
|
||||
CODEC_ID_BINKAUDIO_DCT : CODEC_ID_BINKAUDIO_RDFT;
|
||||
ast->codec->channels = flags & BINK_AUD_STEREO ? 2 : 1;
|
||||
ast->codec->extradata = av_mallocz(1 + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
ast->codec->extradata_size = 1;
|
||||
ast->codec->extradata[0] = vst->codec->codec_tag == MKTAG('B','I','K','b');
|
||||
}
|
||||
|
||||
for (i = 0; i < bink->num_audio_tracks; i++)
|
||||
|
@ -57,6 +57,7 @@ const AVCodecTag ff_mp4_obj_type[] = {
|
||||
{ CODEC_ID_VC1 , 0xA3 },
|
||||
{ CODEC_ID_DIRAC , 0xA4 },
|
||||
{ CODEC_ID_AC3 , 0xA5 },
|
||||
{ CODEC_ID_DTS , 0xA9 }, /* mp4ra.org */
|
||||
{ CODEC_ID_VORBIS , 0xDD }, /* non standard, gpac uses it */
|
||||
{ CODEC_ID_DVD_SUBTITLE, 0xE0 }, /* non standard, see unsupported-embedded-subs-2.mp4 */
|
||||
{ CODEC_ID_QCELP , 0xE1 },
|
||||
@ -244,6 +245,8 @@ const AVCodecTag codec_movaudio_tags[] = {
|
||||
{ CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') }, /* MPEG-4 AAC */
|
||||
{ CODEC_ID_AC3, MKTAG('a', 'c', '-', '3') }, /* ETSI TS 102 366 Annex F */
|
||||
{ CODEC_ID_AC3, MKTAG('s', 'a', 'c', '3') }, /* Nero Recode */
|
||||
{ CODEC_ID_DTS, MKTAG('d', 't', 's', 'c') }, /* mp4ra.org */
|
||||
{ CODEC_ID_DTS, MKTAG('D', 'T', 'S', ' ') }, /* non standard */
|
||||
|
||||
{ CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') }, /* AMR-NB 3gp */
|
||||
{ CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') }, /* AMR-WB 3gp */
|
||||
|
@ -527,7 +527,7 @@ static int mkv_write_tracks(AVFormatContext *s)
|
||||
AVDictionaryEntry *tag;
|
||||
|
||||
if (!bit_depth)
|
||||
bit_depth = av_get_bits_per_sample_fmt(codec->sample_fmt);
|
||||
bit_depth = av_get_bytes_per_sample(codec->sample_fmt) << 3;
|
||||
|
||||
if (codec->codec_id == CODEC_ID_AAC)
|
||||
get_aac_sample_rates(s, codec, &sample_rate, &output_sample_rate);
|
||||
|
@ -244,7 +244,7 @@ static int mmsh_open(URLContext *h, const char *uri, int flags)
|
||||
"Pragma: no-cache,rate=1.000000,stream-time=0,"
|
||||
"stream-offset=0:0,request-context=%u,max-duration=0\r\n"
|
||||
CLIENTGUID
|
||||
"Connection: Close\r\n\r\n",
|
||||
"Connection: Close\r\n",
|
||||
host, port, mmsh->request_seq++);
|
||||
ff_http_set_headers(mms->mms_hd, headers);
|
||||
|
||||
@ -284,7 +284,7 @@ static int mmsh_open(URLContext *h, const char *uri, int flags)
|
||||
CLIENTGUID
|
||||
"Pragma: stream-switch-count=%d\r\n"
|
||||
"Pragma: stream-switch-entry=%s\r\n"
|
||||
"Connection: Close\r\n\r\n",
|
||||
"Connection: Close\r\n",
|
||||
host, port, mmsh->request_seq++, mms->stream_num, stream_selection);
|
||||
av_freep(&stream_selection);
|
||||
if (err < 0) {
|
||||
|
@ -2203,7 +2203,7 @@ static int mov_read_elst(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
time = avio_rb64(pb);
|
||||
} else {
|
||||
duration = avio_rb32(pb); /* segment duration */
|
||||
time = avio_rb32(pb); /* media time */
|
||||
time = (int32_t)avio_rb32(pb); /* media time */
|
||||
}
|
||||
avio_rb32(pb); /* Media rate */
|
||||
if (i == 0 && time >= -1) {
|
||||
|
@ -33,6 +33,33 @@ static const char* format_to_name(void* ptr)
|
||||
else return "NULL";
|
||||
}
|
||||
|
||||
static const AVOption *opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
|
||||
{
|
||||
AVFormatContext *s = obj;
|
||||
AVInputFormat *ifmt = NULL;
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
if (s->priv_data) {
|
||||
if ((s->iformat && !s->iformat->priv_class) ||
|
||||
(s->oformat && !s->oformat->priv_class))
|
||||
return NULL;
|
||||
return av_opt_find(s->priv_data, name, unit, opt_flags, search_flags);
|
||||
}
|
||||
|
||||
while ((ifmt = av_iformat_next(ifmt))) {
|
||||
const AVOption *o;
|
||||
|
||||
if (ifmt->priv_class && (o = av_opt_find(&ifmt->priv_class, name, unit, opt_flags, search_flags)))
|
||||
return o;
|
||||
}
|
||||
while ((ofmt = av_oformat_next(ofmt))) {
|
||||
const AVOption *o;
|
||||
|
||||
if (ofmt->priv_class && (o = av_opt_find(&ofmt->priv_class, name, unit, opt_flags, search_flags)))
|
||||
return o;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define OFFSET(x) offsetof(AVFormatContext,x)
|
||||
#define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
|
||||
//these names are too long to be readable
|
||||
@ -75,6 +102,7 @@ static const AVClass av_format_context_class = {
|
||||
.item_name = format_to_name,
|
||||
.option = options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
.opt_find = opt_find,
|
||||
};
|
||||
|
||||
static void avformat_get_context_defaults(AVFormatContext *s)
|
||||
|
@ -523,7 +523,7 @@ rdt_new_context (void)
|
||||
{
|
||||
PayloadContext *rdt = av_mallocz(sizeof(PayloadContext));
|
||||
|
||||
av_open_input_stream(&rdt->rmctx, NULL, "", &ff_rdt_demuxer, NULL);
|
||||
avformat_open_input(&rdt->rmctx, "", &ff_rdt_demuxer, NULL);
|
||||
|
||||
return rdt;
|
||||
}
|
||||
@ -539,7 +539,7 @@ rdt_free_context (PayloadContext *rdt)
|
||||
av_freep(&rdt->rmst[i]);
|
||||
}
|
||||
if (rdt->rmctx)
|
||||
av_close_input_stream(rdt->rmctx);
|
||||
av_close_input_file(rdt->rmctx);
|
||||
av_freep(&rdt->mlti_data);
|
||||
av_freep(&rdt->rmst);
|
||||
av_free(rdt);
|
||||
|
@ -107,10 +107,13 @@ int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
|
||||
"Failed to fix invalid RTSP-MS/ASF min_pktsize\n");
|
||||
init_packetizer(&pb, buf, len);
|
||||
if (rt->asf_ctx) {
|
||||
av_close_input_stream(rt->asf_ctx);
|
||||
av_close_input_file(rt->asf_ctx);
|
||||
rt->asf_ctx = NULL;
|
||||
}
|
||||
ret = av_open_input_stream(&rt->asf_ctx, &pb, "", &ff_asf_demuxer, NULL);
|
||||
if (!(rt->asf_ctx = avformat_alloc_context()))
|
||||
return AVERROR(ENOMEM);
|
||||
rt->asf_ctx->pb = &pb;
|
||||
ret = avformat_open_input(&rt->asf_ctx, "", &ff_asf_demuxer, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0);
|
||||
|
@ -67,7 +67,7 @@ AVFormatContext *ff_rtp_chain_mux_open(AVFormatContext *s, AVStream *st,
|
||||
ffio_fdopen(&rtpctx->pb, handle);
|
||||
} else
|
||||
ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size);
|
||||
ret = av_write_header(rtpctx);
|
||||
ret = avformat_write_header(rtpctx, NULL);
|
||||
|
||||
if (ret) {
|
||||
if (handle) {
|
||||
|
@ -9,13 +9,13 @@
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
|
@ -1234,10 +1234,10 @@ int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port,
|
||||
if (reply->transports[0].source[0]) {
|
||||
ff_url_join(url, sizeof(url), "rtp", NULL,
|
||||
reply->transports[0].source,
|
||||
reply->transports[0].server_port_min, options);
|
||||
reply->transports[0].server_port_min, "%s", options);
|
||||
} else {
|
||||
ff_url_join(url, sizeof(url), "rtp", NULL, host,
|
||||
reply->transports[0].server_port_min, options);
|
||||
reply->transports[0].server_port_min, "%s", options);
|
||||
}
|
||||
if (!(rt->server_type == RTSP_SERVER_WMS && i > 1) &&
|
||||
rtp_set_remote_url(rtsp_st->rtp_handle, url) < 0) {
|
||||
|
@ -52,7 +52,7 @@ static int sap_read_close(AVFormatContext *s)
|
||||
{
|
||||
struct SAPState *sap = s->priv_data;
|
||||
if (sap->sdp_ctx)
|
||||
av_close_input_stream(sap->sdp_ctx);
|
||||
av_close_input_file(sap->sdp_ctx);
|
||||
if (sap->ann_fd)
|
||||
ffurl_close(sap->ann_fd);
|
||||
av_freep(&sap->sdp);
|
||||
@ -156,9 +156,8 @@ static int sap_read_header(AVFormatContext *s,
|
||||
goto fail;
|
||||
}
|
||||
sap->sdp_ctx->max_delay = s->max_delay;
|
||||
ap->prealloced_context = 1;
|
||||
ret = av_open_input_stream(&sap->sdp_ctx, &sap->sdp_pb, "temp.sdp",
|
||||
infmt, ap);
|
||||
sap->sdp_ctx->pb = &sap->sdp_pb;
|
||||
ret = avformat_open_input(&sap->sdp_ctx, "temp.sdp", infmt, NULL);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER)
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "libavcodec/raw.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "metadata.h"
|
||||
#include "id3v2.h"
|
||||
#include "libavutil/avstring.h"
|
||||
@ -460,6 +461,50 @@ static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeDa
|
||||
/************************************************************/
|
||||
/* input media file */
|
||||
|
||||
#if FF_API_FORMAT_PARAMETERS
|
||||
static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
|
||||
{
|
||||
char buf[1024];
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
if (!ap)
|
||||
return NULL;
|
||||
|
||||
if (ap->time_base.num) {
|
||||
snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
|
||||
av_dict_set(&opts, "framerate", buf, 0);
|
||||
}
|
||||
if (ap->sample_rate) {
|
||||
snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
|
||||
av_dict_set(&opts, "sample_rate", buf, 0);
|
||||
}
|
||||
if (ap->channels) {
|
||||
snprintf(buf, sizeof(buf), "%d", ap->channels);
|
||||
av_dict_set(&opts, "channels", buf, 0);
|
||||
}
|
||||
if (ap->width || ap->height) {
|
||||
snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
|
||||
av_dict_set(&opts, "video_size", buf, 0);
|
||||
}
|
||||
if (ap->pix_fmt != PIX_FMT_NONE) {
|
||||
av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
|
||||
}
|
||||
if (ap->channel) {
|
||||
snprintf(buf, sizeof(buf), "%d", ap->channel);
|
||||
av_dict_set(&opts, "channel", buf, 0);
|
||||
}
|
||||
if (ap->standard) {
|
||||
av_dict_set(&opts, "standard", ap->standard, 0);
|
||||
}
|
||||
if (ap->mpeg2ts_compute_pcr) {
|
||||
av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
|
||||
}
|
||||
if (ap->initial_pause) {
|
||||
av_dict_set(&opts, "initial_pause", "1", 0);
|
||||
}
|
||||
return opts;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a media file from an IO stream. 'fmt' must be specified.
|
||||
*/
|
||||
@ -468,6 +513,7 @@ int av_open_input_stream(AVFormatContext **ic_ptr,
|
||||
AVInputFormat *fmt, AVFormatParameters *ap)
|
||||
{
|
||||
int err;
|
||||
AVDictionary *opts;
|
||||
AVFormatContext *ic;
|
||||
AVFormatParameters default_ap;
|
||||
|
||||
@ -475,6 +521,7 @@ int av_open_input_stream(AVFormatContext **ic_ptr,
|
||||
ap=&default_ap;
|
||||
memset(ap, 0, sizeof(default_ap));
|
||||
}
|
||||
opts = convert_format_parameters(ap);
|
||||
|
||||
if(!ap->prealloced_context)
|
||||
ic = avformat_alloc_context();
|
||||
@ -484,67 +531,18 @@ int av_open_input_stream(AVFormatContext **ic_ptr,
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
ic->iformat = fmt;
|
||||
ic->pb = pb;
|
||||
ic->duration = AV_NOPTS_VALUE;
|
||||
ic->start_time = AV_NOPTS_VALUE;
|
||||
av_strlcpy(ic->filename, filename, sizeof(ic->filename));
|
||||
|
||||
/* allocate private data */
|
||||
if (fmt->priv_data_size > 0) {
|
||||
ic->priv_data = av_mallocz(fmt->priv_data_size);
|
||||
if (!ic->priv_data) {
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
if (fmt->priv_class) {
|
||||
*(const AVClass**)ic->priv_data= fmt->priv_class;
|
||||
av_opt_set_defaults(ic->priv_data);
|
||||
}
|
||||
} else {
|
||||
ic->priv_data = NULL;
|
||||
}
|
||||
|
||||
// e.g. AVFMT_NOFILE formats will not have a AVIOContext
|
||||
if (ic->pb)
|
||||
ff_id3v2_read(ic, ID3v2_DEFAULT_MAGIC);
|
||||
|
||||
if (!(ic->flags&AVFMT_FLAG_PRIV_OPT) && ic->iformat->read_header) {
|
||||
err = ic->iformat->read_header(ic, ap);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!(ic->flags&AVFMT_FLAG_PRIV_OPT) && pb && !ic->data_offset)
|
||||
ic->data_offset = avio_tell(ic->pb);
|
||||
err = avformat_open_input(ic_ptr, filename, fmt, &opts);
|
||||
|
||||
#if FF_API_OLD_METADATA
|
||||
ff_metadata_demux_compat(ic);
|
||||
#endif
|
||||
|
||||
ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
|
||||
|
||||
*ic_ptr = ic;
|
||||
return 0;
|
||||
fail:
|
||||
if (ic) {
|
||||
int i;
|
||||
av_freep(&ic->priv_data);
|
||||
for(i=0;i<ic->nb_streams;i++) {
|
||||
AVStream *st = ic->streams[i];
|
||||
if (st) {
|
||||
av_free(st->priv_data);
|
||||
av_free(st->codec->extradata);
|
||||
av_free(st->codec);
|
||||
av_free(st->info);
|
||||
}
|
||||
av_free(st);
|
||||
}
|
||||
}
|
||||
av_free(ic);
|
||||
*ic_ptr = NULL;
|
||||
fail:
|
||||
av_dict_free(&opts);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
int av_demuxer_open(AVFormatContext *ic, AVFormatParameters *ap){
|
||||
int err;
|
||||
@ -633,68 +631,124 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if FF_API_FORMAT_PARAMETERS
|
||||
int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
|
||||
AVInputFormat *fmt,
|
||||
int buf_size,
|
||||
AVFormatParameters *ap)
|
||||
{
|
||||
int err;
|
||||
AVProbeData probe_data, *pd = &probe_data;
|
||||
AVIOContext *pb = NULL;
|
||||
void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
|
||||
AVDictionary *opts = convert_format_parameters(ap);
|
||||
|
||||
pd->filename = "";
|
||||
if (filename)
|
||||
pd->filename = filename;
|
||||
pd->buf = NULL;
|
||||
pd->buf_size = 0;
|
||||
if (!ap || !ap->prealloced_context)
|
||||
*ic_ptr = NULL;
|
||||
|
||||
if (!fmt) {
|
||||
/* guess format if no file can be opened */
|
||||
fmt = av_probe_input_format(pd, 0);
|
||||
err = avformat_open_input(ic_ptr, filename, fmt, &opts);
|
||||
|
||||
av_dict_free(&opts);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* open input file and probe the format if necessary */
|
||||
static int init_input(AVFormatContext *s, const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVProbeData pd = {filename, NULL, 0};
|
||||
|
||||
if (s->pb) {
|
||||
s->flags |= AVFMT_FLAG_CUSTOM_IO;
|
||||
if (!s->iformat)
|
||||
return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
|
||||
else if (s->iformat->flags & AVFMT_NOFILE)
|
||||
return AVERROR(EINVAL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do not open file if the format does not need it. XXX: specific
|
||||
hack needed to handle RTSP/TCP */
|
||||
if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
|
||||
/* if no file needed do not try to open one */
|
||||
if ((err=avio_open(&pb, filename, AVIO_RDONLY)) < 0) {
|
||||
goto fail;
|
||||
}
|
||||
if (buf_size > 0) {
|
||||
ffio_set_buf_size(pb, buf_size);
|
||||
}
|
||||
if (!fmt && (err = av_probe_input_buffer(pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) {
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
|
||||
(!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
|
||||
return 0;
|
||||
|
||||
/* if still no format found, error */
|
||||
if (!fmt) {
|
||||
err = AVERROR_INVALIDDATA;
|
||||
if ((ret = avio_open(&s->pb, filename, AVIO_RDONLY)) < 0)
|
||||
return ret;
|
||||
if (s->iformat)
|
||||
return 0;
|
||||
return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
|
||||
}
|
||||
|
||||
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
|
||||
{
|
||||
AVFormatContext *s = *ps;
|
||||
int ret = 0;
|
||||
AVFormatParameters ap = { 0 };
|
||||
AVDictionary *tmp = NULL;
|
||||
|
||||
if (!s && !(s = avformat_alloc_context()))
|
||||
return AVERROR(ENOMEM);
|
||||
if (fmt)
|
||||
s->iformat = fmt;
|
||||
|
||||
if (options)
|
||||
av_dict_copy(&tmp, *options, 0);
|
||||
|
||||
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
|
||||
goto fail;
|
||||
|
||||
if ((ret = init_input(s, filename)) < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* check filename in case an image number is expected */
|
||||
if (fmt->flags & AVFMT_NEEDNUMBER) {
|
||||
if (s->iformat->flags & AVFMT_NEEDNUMBER) {
|
||||
if (!av_filename_number_test(filename)) {
|
||||
err = AVERROR_NUMEXPECTED;
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
|
||||
if (err)
|
||||
goto fail;
|
||||
return 0;
|
||||
fail:
|
||||
av_freep(&pd->buf);
|
||||
if (pb)
|
||||
avio_close(pb);
|
||||
if (ap && ap->prealloced_context)
|
||||
av_free(*ic_ptr);
|
||||
*ic_ptr = NULL;
|
||||
return err;
|
||||
|
||||
s->duration = s->start_time = AV_NOPTS_VALUE;
|
||||
av_strlcpy(s->filename, filename, sizeof(s->filename));
|
||||
|
||||
/* allocate private data */
|
||||
if (s->iformat->priv_data_size > 0) {
|
||||
if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
if (s->iformat->priv_class) {
|
||||
*(const AVClass**)s->priv_data = s->iformat->priv_class;
|
||||
av_opt_set_defaults(s->priv_data);
|
||||
if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
|
||||
if (s->pb)
|
||||
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
|
||||
|
||||
if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
|
||||
if ((ret = s->iformat->read_header(s, &ap)) < 0)
|
||||
goto fail;
|
||||
|
||||
if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
|
||||
s->data_offset = avio_tell(s->pb);
|
||||
|
||||
s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
|
||||
|
||||
if (options) {
|
||||
av_dict_free(options);
|
||||
*options = tmp;
|
||||
}
|
||||
*ps = s;
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av_dict_free(&tmp);
|
||||
if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
|
||||
avio_close(s->pb);
|
||||
avformat_free_context(s);
|
||||
*ps = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*******************************************************/
|
||||
@ -2694,7 +2748,8 @@ void avformat_free_context(AVFormatContext *s)
|
||||
|
||||
void av_close_input_file(AVFormatContext *s)
|
||||
{
|
||||
AVIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
|
||||
AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
|
||||
NULL : s->pb;
|
||||
av_close_input_stream(s);
|
||||
if (pb)
|
||||
avio_close(pb);
|
||||
@ -2812,6 +2867,7 @@ AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int6
|
||||
/************************************************************/
|
||||
/* output media file */
|
||||
|
||||
#if FF_API_FORMAT_PARAMETERS
|
||||
int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
|
||||
{
|
||||
if (s->oformat->priv_data_size > 0) {
|
||||
@ -2827,6 +2883,7 @@ int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
|
||||
const char *format, const char *filename)
|
||||
@ -2924,15 +2981,29 @@ static int validate_codec_tag(AVFormatContext *s, AVStream *st)
|
||||
return 1;
|
||||
}
|
||||
|
||||
#if FF_API_FORMAT_PARAMETERS
|
||||
int av_write_header(AVFormatContext *s)
|
||||
{
|
||||
int ret, i;
|
||||
return avformat_write_header(s, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
|
||||
{
|
||||
int ret = 0, i;
|
||||
AVStream *st;
|
||||
AVDictionary *tmp = NULL;
|
||||
|
||||
if (options)
|
||||
av_dict_copy(&tmp, *options, 0);
|
||||
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
|
||||
goto fail;
|
||||
|
||||
// some sanity checks
|
||||
if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
|
||||
av_log(s, AV_LOG_ERROR, "no streams\n");
|
||||
return AVERROR(EINVAL);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for(i=0;i<s->nb_streams;i++) {
|
||||
@ -2942,7 +3013,8 @@ int av_write_header(AVFormatContext *s)
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
if(st->codec->sample_rate<=0){
|
||||
av_log(s, AV_LOG_ERROR, "sample rate not set\n");
|
||||
return AVERROR(EINVAL);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
if(!st->codec->block_align)
|
||||
st->codec->block_align = st->codec->channels *
|
||||
@ -2951,15 +3023,18 @@ int av_write_header(AVFormatContext *s)
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
|
||||
av_log(s, AV_LOG_ERROR, "time base not set\n");
|
||||
return AVERROR(EINVAL);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
|
||||
av_log(s, AV_LOG_ERROR, "dimensions not set\n");
|
||||
return AVERROR(EINVAL);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
|
||||
av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
|
||||
return AVERROR(EINVAL);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -2976,7 +3051,8 @@ int av_write_header(AVFormatContext *s)
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"Tag %s/0x%08x incompatible with output codec id '%d'\n",
|
||||
tagbuf, st->codec->codec_tag, st->codec->codec_id);
|
||||
return AVERROR_INVALIDDATA;
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
}else
|
||||
st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
|
||||
@ -2989,8 +3065,16 @@ int av_write_header(AVFormatContext *s)
|
||||
|
||||
if (!s->priv_data && s->oformat->priv_data_size > 0) {
|
||||
s->priv_data = av_mallocz(s->oformat->priv_data_size);
|
||||
if (!s->priv_data)
|
||||
return AVERROR(ENOMEM);
|
||||
if (!s->priv_data) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
if (s->oformat->priv_class) {
|
||||
*(const AVClass**)s->priv_data= s->oformat->priv_class;
|
||||
av_opt_set_defaults(s->priv_data);
|
||||
if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
#if FF_API_OLD_METADATA
|
||||
@ -3005,7 +3089,7 @@ int av_write_header(AVFormatContext *s)
|
||||
if(s->oformat->write_header){
|
||||
ret = s->oformat->write_header(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* init PTS generation */
|
||||
@ -3024,12 +3108,22 @@ int av_write_header(AVFormatContext *s)
|
||||
break;
|
||||
}
|
||||
if (den != AV_NOPTS_VALUE) {
|
||||
if (den <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (den <= 0) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
av_frac_init(&st->pts, 0, 0, den);
|
||||
}
|
||||
}
|
||||
|
||||
if (options) {
|
||||
av_dict_free(options);
|
||||
*options = tmp;
|
||||
}
|
||||
return 0;
|
||||
fail:
|
||||
av_dict_free(&tmp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
//FIXME merge with compute_pkt_fields
|
||||
@ -3328,8 +3422,13 @@ static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
|
||||
|
||||
av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
|
||||
while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
|
||||
if(strcmp("language", tag->key))
|
||||
av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
|
||||
if(strcmp("language", tag->key)){
|
||||
char tmp[256];
|
||||
int i;
|
||||
av_strlcpy(tmp, tag->value, sizeof(tmp));
|
||||
for(i=0; i<strlen(tmp); i++) if(tmp[i]==0xd) tmp[i]=' ';
|
||||
av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -24,7 +24,7 @@
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBAVFORMAT_VERSION_MAJOR 52
|
||||
#define LIBAVFORMAT_VERSION_MINOR 109
|
||||
#define LIBAVFORMAT_VERSION_MINOR 110
|
||||
#define LIBAVFORMAT_VERSION_MICRO 0
|
||||
|
||||
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
||||
|
@ -40,7 +40,7 @@
|
||||
#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
|
||||
|
||||
#define LIBAVUTIL_VERSION_MAJOR 50
|
||||
#define LIBAVUTIL_VERSION_MINOR 42
|
||||
#define LIBAVUTIL_VERSION_MINOR 43
|
||||
#define LIBAVUTIL_VERSION_MICRO 0
|
||||
|
||||
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
|
||||
@ -64,6 +64,9 @@
|
||||
#ifndef FF_API_GET_BITS_PER_SAMPLE_FMT
|
||||
#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 52)
|
||||
#endif
|
||||
#ifndef FF_API_FIND_OPT
|
||||
#define FF_API_FIND_OPT (LIBAVUTIL_VERSION_MAJOR < 52)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Return the LIBAVUTIL_VERSION_INT constant.
|
||||
|
@ -19,6 +19,7 @@
|
||||
*/
|
||||
|
||||
#include <strings.h>
|
||||
#include "avstring.h"
|
||||
#include "dict.h"
|
||||
#include "internal.h"
|
||||
#include "mem.h"
|
||||
@ -51,6 +52,7 @@ int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags
|
||||
{
|
||||
AVDictionary *m = *pm;
|
||||
AVDictionaryEntry *tag = av_dict_get(m, key, NULL, flags);
|
||||
char *oldval = NULL;
|
||||
|
||||
if(!m)
|
||||
m = *pm = av_mallocz(sizeof(*m));
|
||||
@ -58,7 +60,10 @@ int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags
|
||||
if(tag) {
|
||||
if (flags & AV_DICT_DONT_OVERWRITE)
|
||||
return 0;
|
||||
av_free(tag->value);
|
||||
if (flags & AV_DICT_APPEND)
|
||||
oldval = tag->value;
|
||||
else
|
||||
av_free(tag->value);
|
||||
av_free(tag->key);
|
||||
*tag = m->elems[--m->count];
|
||||
} else {
|
||||
@ -75,6 +80,12 @@ int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags
|
||||
m->elems[m->count].key = av_strdup(key );
|
||||
if (flags & AV_DICT_DONT_STRDUP_VAL) {
|
||||
m->elems[m->count].value = value;
|
||||
} else if (oldval && flags & AV_DICT_APPEND) {
|
||||
int len = strlen(oldval) + strlen(value) + 1;
|
||||
if (!(oldval = av_realloc(oldval, len)))
|
||||
return AVERROR(ENOMEM);
|
||||
av_strlcat(oldval, value, len);
|
||||
m->elems[m->count].value = oldval;
|
||||
} else
|
||||
m->elems[m->count].value = av_strdup(value);
|
||||
m->count++;
|
||||
|
@ -29,6 +29,8 @@
|
||||
#define AV_DICT_DONT_STRDUP_KEY 4
|
||||
#define AV_DICT_DONT_STRDUP_VAL 8
|
||||
#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries.
|
||||
#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
|
||||
delimiter is added, the strings are simply concatenated. */
|
||||
|
||||
typedef struct {
|
||||
char *key;
|
||||
|
@ -76,7 +76,7 @@ double av_strtod(const char *numstr, char **tail)
|
||||
double d;
|
||||
char *next;
|
||||
if(numstr[0]=='0' && (numstr[1]|0x20)=='x') {
|
||||
d = strtol(numstr, &next, 16);
|
||||
d = strtoul(numstr, &next, 16);
|
||||
} else
|
||||
d = strtod(numstr, &next);
|
||||
/* if parsing succeeded, check for and interpret postfixes */
|
||||
|
@ -229,11 +229,11 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
|
||||
#endif
|
||||
|
||||
#ifndef AV_RB32
|
||||
# define AV_RB32(x) \
|
||||
((((const uint8_t*)(x))[0] << 24) | \
|
||||
(((const uint8_t*)(x))[1] << 16) | \
|
||||
(((const uint8_t*)(x))[2] << 8) | \
|
||||
((const uint8_t*)(x))[3])
|
||||
# define AV_RB32(x) \
|
||||
(((uint32_t)((const uint8_t*)(x))[0] << 24) | \
|
||||
(((const uint8_t*)(x))[1] << 16) | \
|
||||
(((const uint8_t*)(x))[2] << 8) | \
|
||||
((const uint8_t*)(x))[3])
|
||||
#endif
|
||||
#ifndef AV_WB32
|
||||
# define AV_WB32(p, d) do { \
|
||||
@ -245,11 +245,11 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
|
||||
#endif
|
||||
|
||||
#ifndef AV_RL32
|
||||
# define AV_RL32(x) \
|
||||
((((const uint8_t*)(x))[3] << 24) | \
|
||||
(((const uint8_t*)(x))[2] << 16) | \
|
||||
(((const uint8_t*)(x))[1] << 8) | \
|
||||
((const uint8_t*)(x))[0])
|
||||
# define AV_RL32(x) \
|
||||
(((uint32_t)((const uint8_t*)(x))[3] << 24) | \
|
||||
(((const uint8_t*)(x))[2] << 16) | \
|
||||
(((const uint8_t*)(x))[1] << 8) | \
|
||||
((const uint8_t*)(x))[0])
|
||||
#endif
|
||||
#ifndef AV_WL32
|
||||
# define AV_WL32(p, d) do { \
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user