Merge branch 'master' into release/5.0

This is necessary to have the recent DOVI additions
in the 5.0 release.

Merged-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
Andreas Rheinhardt 2022-01-04 13:43:52 +01:00
commit 312060ecfc
126 changed files with 2800 additions and 1430 deletions

View File

@ -90,7 +90,8 @@ SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
MMI-OBJS LSX-OBJS LASX-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
MMI-OBJS LSX-OBJS LASX-OBJS OBJS SLIBOBJS SHLIBOBJS \
STLIBOBJS HOSTOBJS TESTOBJS
define RESET
$(1) :=

32
configure vendored
View File

@ -2434,6 +2434,7 @@ CONFIG_EXTRA="
cbs_vp9
dirac_parse
dnn
dovi_rpu
dvprofile
exif
faandct
@ -2475,6 +2476,7 @@ CONFIG_EXTRA="
mpegaudio
mpegaudiodsp
mpegaudioheader
mpeg4audio
mpegvideo
mpegvideoenc
mss34dsp
@ -2706,6 +2708,7 @@ cbs_mpeg2_select="cbs"
cbs_vp9_select="cbs"
dct_select="rdft"
dirac_parse_select="golomb"
dovi_rpu_select="golomb"
dnn_suggest="libtensorflow libopenvino"
dnn_deps="avformat swscale"
error_resilience_select="me_cmp"
@ -2717,6 +2720,7 @@ h264dsp_select="startcode"
hevcparse_select="atsc_a53 golomb"
frame_thread_encoder_deps="encoders threads"
intrax8_select="blockdsp idctdsp"
iso_media_select="mpeg4audio"
mdct_select="fft"
mdct15_select="fft"
me_cmp_select="fdctdsp idctdsp pixblockdsp"
@ -2729,8 +2733,8 @@ vc1dsp_select="h264chroma qpeldsp startcode"
rdft_select="fft"
# decoders / encoders
aac_decoder_select="adts_header mdct15 mdct sinewin"
aac_fixed_decoder_select="adts_header mdct"
aac_decoder_select="adts_header mdct15 mdct mpeg4audio sinewin"
aac_fixed_decoder_select="adts_header mdct mpeg4audio"
aac_encoder_select="audio_frame_queue iirfilter lpc mdct sinewin"
aac_latm_decoder_select="aac_decoder aac_latm_parser"
ac3_decoder_select="ac3_parser ac3dsp bswapdsp fmtconvert mdct"
@ -2742,7 +2746,7 @@ adpcm_g722_decoder_select="g722dsp"
adpcm_g722_encoder_select="g722dsp"
aic_decoder_select="golomb idctdsp"
alac_encoder_select="lpc"
als_decoder_select="bswapdsp"
als_decoder_select="bswapdsp mpeg4audio"
amrnb_decoder_select="lsp"
amrwb_decoder_select="lsp"
amv_decoder_select="sp5x_decoder exif"
@ -2824,7 +2828,7 @@ h264_decoder_suggest="error_resilience"
hap_decoder_select="snappy texturedsp"
hap_encoder_deps="libsnappy"
hap_encoder_select="texturedspenc"
hevc_decoder_select="atsc_a53 bswapdsp cabac golomb hevcparse videodsp"
hevc_decoder_select="atsc_a53 bswapdsp cabac dovi_rpu golomb hevcparse videodsp"
huffyuv_decoder_select="bswapdsp huffyuvdsp llviddsp"
huffyuv_encoder_select="bswapdsp huffman huffyuvencdsp llvidencdsp"
hymt_decoder_select="huffyuv_decoder"
@ -2862,8 +2866,8 @@ mp3_decoder_select="mpegaudio"
mp3adu_decoder_select="mpegaudio"
mp3adufloat_decoder_select="mpegaudio"
mp3float_decoder_select="mpegaudio"
mp3on4_decoder_select="mpegaudio"
mp3on4float_decoder_select="mpegaudio"
mp3on4_decoder_select="mpegaudio mpeg4audio"
mp3on4float_decoder_select="mpegaudio mpeg4audio"
mpc7_decoder_select="bswapdsp mpegaudiodsp"
mpc8_decoder_select="mpegaudiodsp"
mpegvideo_decoder_select="mpegvideo"
@ -3240,7 +3244,7 @@ wmv3_crystalhd_decoder_select="crystalhd"
av1_qsv_decoder_select="qsvdec"
# parsers
aac_parser_select="adts_header"
aac_parser_select="adts_header mpeg4audio"
av1_parser_select="cbs_av1"
h264_parser_select="atsc_a53 golomb h264dsp h264parse"
hevc_parser_select="hevcparse"
@ -3250,7 +3254,7 @@ mpeg4video_parser_select="h263dsp mpegvideo qpeldsp"
vc1_parser_select="vc1dsp"
# bitstream_filters
aac_adtstoasc_bsf_select="adts_header"
aac_adtstoasc_bsf_select="adts_header mpeg4audio"
av1_frame_merge_bsf_select="cbs_av1"
av1_frame_split_bsf_select="cbs_av1"
av1_metadata_bsf_select="cbs_av1"
@ -3385,6 +3389,7 @@ videotoolbox_encoder_deps="videotoolbox VTCompressionSessionPrepareToEncodeFrame
# demuxers / muxers
ac3_demuxer_select="ac3_parser"
act_demuxer_select="riffdec"
adts_muxer_select="mpeg4audio"
aiff_muxer_select="iso_media"
asf_demuxer_select="riffdec"
asf_o_demuxer_select="riffdec"
@ -3420,11 +3425,11 @@ imf_demuxer_select="mxf_demuxer"
ipod_muxer_select="mov_muxer"
ismv_muxer_select="mov_muxer"
ivf_muxer_select="av1_metadata_bsf vp9_superframe_bsf"
latm_muxer_select="aac_adtstoasc_bsf"
latm_muxer_select="aac_adtstoasc_bsf mpeg4audio"
matroska_audio_muxer_select="matroska_muxer"
matroska_demuxer_select="riffdec"
matroska_demuxer_suggest="bzlib lzo zlib"
matroska_muxer_select="riffenc vp9_superframe_bsf aac_adtstoasc_bsf"
matroska_muxer_select="mpeg4audio riffenc vp9_superframe_bsf aac_adtstoasc_bsf"
mlp_demuxer_select="mlp_parser"
mmf_muxer_select="riffenc"
mov_demuxer_select="iso_media riffdec"
@ -3436,7 +3441,7 @@ mp4_muxer_select="mov_muxer"
mpegts_demuxer_select="iso_media"
mpegts_muxer_select="ac3_parser adts_muxer latm_muxer h264_mp4toannexb_bsf hevc_mp4toannexb_bsf"
mpegtsraw_demuxer_select="mpegts_demuxer"
mxf_muxer_select="golomb pcm_rechunk_bsf"
mxf_muxer_select="pcm_rechunk_bsf"
mxf_d10_muxer_select="mxf_muxer"
mxf_opatom_muxer_select="mxf_muxer"
nut_muxer_select="riffenc"
@ -3449,9 +3454,8 @@ ogv_muxer_select="ogg_muxer"
opus_muxer_select="ogg_muxer"
psp_muxer_select="mov_muxer"
rtp_demuxer_select="sdp_demuxer"
rtp_muxer_select="golomb jpegtables"
rtp_mpegts_muxer_select="mpegts_muxer rtp_muxer"
rtpdec_select="asf_demuxer jpegtables mov_demuxer mpegts_demuxer rm_demuxer rtp_protocol srtp"
rtpdec_select="asf_demuxer mov_demuxer mpegts_demuxer rm_demuxer rtp_protocol srtp"
rtsp_demuxer_select="http_protocol rtpdec"
rtsp_muxer_select="rtp_muxer http_protocol rtp_protocol rtpenc_chain"
sap_demuxer_select="sdp_demuxer"
@ -3472,7 +3476,7 @@ w64_muxer_select="wav_muxer"
wav_demuxer_select="riffdec"
wav_muxer_select="riffenc"
webm_chunk_muxer_select="webm_muxer"
webm_muxer_select="riffenc"
webm_muxer_select="mpeg4audio riffenc"
webm_dash_manifest_demuxer_select="matroska_demuxer"
wtv_demuxer_select="mpegts_demuxer riffdec"
wtv_muxer_select="mpegts_muxer riffenc"

View File

@ -14,6 +14,9 @@ libavutil: 2021-04-27
API changes, most recent first:
2022-01-04 - 78dc21b123e - lavu 57.16.100 - frame.h
Add AV_FRAME_DATA_DOVI_METADATA.
2022-01-03 - 70f318e6b6c - lavf 59.13.100 - avformat.h
Add AVFMT_EXPERIMENTAL flag.

View File

@ -157,6 +157,8 @@ include $(SRC_PATH)/ffbuild/arch.mak
OBJS += $(OBJS-yes)
SLIBOBJS += $(SLIBOBJS-yes)
SHLIBOBJS += $(SHLIBOBJS-yes)
STLIBOBJS += $(STLIBOBJS-yes)
FFLIBS := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS)
TESTPROGS += $(TESTPROGS-yes)
@ -165,6 +167,8 @@ FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(foreach lib,EXTRALIBS-$(NAME) $(FFLIBS:%=
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
SLIBOBJS := $(sort $(SLIBOBJS:%=$(SUBDIR)%))
SHLIBOBJS := $(sort $(SHLIBOBJS:%=$(SUBDIR)%))
STLIBOBJS := $(sort $(STLIBOBJS:%=$(SUBDIR)%))
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)tests/%) $(TESTPROGS:%=$(SUBDIR)tests/%.o)
TESTPROGS := $(TESTPROGS:%=$(SUBDIR)tests/%$(EXESUF))
HOSTOBJS := $(HOSTPROGS:%=$(SUBDIR)%.o)
@ -200,10 +204,12 @@ $(OBJS): | $(sort $(dir $(OBJS)))
$(HOBJS): | $(sort $(dir $(HOBJS)))
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
$(SLIBOBJS): | $(sort $(dir $(SLIBOBJS)))
$(SHLIBOBJS): | $(sort $(dir $(SHLIBOBJS)))
$(STLIBOBJS): | $(sort $(dir $(STLIBOBJS)))
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
$(TOOLOBJS): | tools
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(SHLIBOBJS) $(STLIBOBJS) $(TESTOBJS))
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.gz *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~ *.ilk *.pdb
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
@ -215,4 +221,4 @@ endef
$(eval $(RULES))
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SHLIBOBJS:.o=.d) $(STLIBOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)

View File

@ -14,10 +14,26 @@ INSTHEADERS := $(INSTHEADERS) $(HEADERS:%=$(SUBDIR)%)
all-$(CONFIG_STATIC): $(SUBDIR)$(LIBNAME) $(SUBDIR)lib$(FULLNAME).pc
all-$(CONFIG_SHARED): $(SUBDIR)$(SLIBNAME) $(SUBDIR)lib$(FULLNAME).pc
LIBOBJS := $(OBJS) $(SUBDIR)%.h.o $(TESTOBJS)
LIBOBJS := $(OBJS) $(SHLIBOBJS) $(STLIBOBJS) $(SUBDIR)%.h.o $(TESTOBJS)
$(LIBOBJS) $(LIBOBJS:.o=.s) $(LIBOBJS:.o=.i): CPPFLAGS += -DHAVE_AV_CONFIG_H
$(SUBDIR)$(LIBNAME): $(OBJS)
ifdef CONFIG_SHARED
# In case both shared libs and static libs are enabled, it can happen
# that a user might want to link e.g. libavformat statically, but
# libavcodec and the other libs dynamically. In this case
# libavformat won't be able to access libavcodec's internal symbols,
# so that they have to be duplicated into the archive just like
# for purely shared builds.
# Test programs are always statically linked against their library
# to be able to access their library's internals, even with shared builds.
# Yet linking against dependend libraries still uses dynamic linking.
# This means that we are in the scenario described above.
# In case only static libs are used, the linker will only use
# one of these copies; this depends on the duplicated object files
# containing exactly the same symbols.
OBJS += $(SHLIBOBJS)
endif
$(SUBDIR)$(LIBNAME): $(OBJS) $(STLIBOBJS)
$(RM) $@
$(AR) $(ARFLAGS) $(AR_O) $^
$(RANLIB) $@
@ -48,7 +64,7 @@ $(SUBDIR)lib$(NAME).ver: $(SUBDIR)lib$(NAME).v $(OBJS)
$(SUBDIR)$(SLIBNAME): $(SUBDIR)$(SLIBNAME_WITH_MAJOR)
$(Q)cd ./$(SUBDIR) && $(LN_S) $(SLIBNAME_WITH_MAJOR) $(SLIBNAME)
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SLIBOBJS) $(SUBDIR)lib$(NAME).ver
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SHLIBOBJS) $(SLIBOBJS) $(SUBDIR)lib$(NAME).ver
$(SLIB_CREATE_DEF_CMD)
$$(LD) $(SHFLAGS) $(LDFLAGS) $(LDSOFLAGS) $$(LD_O) $$(filter %.o,$$^) $(FFEXTRALIBS)
$(SLIB_EXTRA_CMD)

View File

@ -175,6 +175,10 @@ typedef enum {
SECTION_ID_FRAME_SIDE_DATA,
SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST,
SECTION_ID_FRAME_SIDE_DATA_TIMECODE,
SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST,
SECTION_ID_FRAME_SIDE_DATA_COMPONENT,
SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST,
SECTION_ID_FRAME_SIDE_DATA_PIECE,
SECTION_ID_FRAME_LOG,
SECTION_ID_FRAME_LOGS,
SECTION_ID_LIBRARY_VERSION,
@ -219,9 +223,13 @@ static struct section sections[] = {
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, SECTION_ID_FRAME_LOGS, -1 } },
[SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
[SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 }, .element_name = "side_data", .unique_name = "frame_side_data_list" },
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, -1 } },
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, "timecodes", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, -1 } },
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, "timecode", 0, { -1 } },
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST, -1 } },
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, "timecodes", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, -1 } },
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, "timecode", 0, { -1 } },
[SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST] = { SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST, "components", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_COMPONENT, -1 } },
[SECTION_ID_FRAME_SIDE_DATA_COMPONENT] = { SECTION_ID_FRAME_SIDE_DATA_COMPONENT, "component", 0, { SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST, -1 } },
[SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST] = { SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST, "pieces", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_PIECE, -1 } },
[SECTION_ID_FRAME_SIDE_DATA_PIECE] = { SECTION_ID_FRAME_SIDE_DATA_PIECE, "section", 0, { -1 } },
[SECTION_ID_FRAME_LOGS] = { SECTION_ID_FRAME_LOGS, "logs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_LOG, -1 } },
[SECTION_ID_FRAME_LOG] = { SECTION_ID_FRAME_LOG, "log", 0, { -1 }, },
[SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
@ -1809,6 +1817,16 @@ static void writer_register_all(void)
writer_print_string(w, k, pbuf.str, 0); \
} while (0)
#define print_list_fmt(k, f, n, ...) do { \
av_bprint_clear(&pbuf); \
for (int idx = 0; idx < n; idx++) { \
if (idx > 0) \
av_bprint_chars(&pbuf, ' ', 1); \
av_bprintf(&pbuf, f, __VA_ARGS__); \
} \
writer_print_string(w, k, pbuf.str, 0); \
} while (0)
#define print_int(k, v) writer_print_integer(w, k, v)
#define print_q(k, v, s) writer_print_rational(w, k, v, s)
#define print_str(k, v) writer_print_string(w, k, v, 0)
@ -1854,6 +1872,153 @@ static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id
return ret;
}
static void print_dovi_metadata(WriterContext *w, const AVDOVIMetadata *dovi)
{
if (!dovi)
return;
{
const AVDOVIRpuDataHeader *hdr = av_dovi_get_header(dovi);
const AVDOVIDataMapping *mapping = av_dovi_get_mapping(dovi);
const AVDOVIColorMetadata *color = av_dovi_get_color(dovi);
AVBPrint pbuf;
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
// header
print_int("rpu_type", hdr->rpu_type);
print_int("rpu_format", hdr->rpu_format);
print_int("vdr_rpu_profile", hdr->vdr_rpu_profile);
print_int("vdr_rpu_level", hdr->vdr_rpu_level);
print_int("chroma_resampling_explicit_filter_flag",
hdr->chroma_resampling_explicit_filter_flag);
print_int("coef_data_type", hdr->coef_data_type);
print_int("coef_log2_denom", hdr->coef_log2_denom);
print_int("vdr_rpu_normalized_idc", hdr->vdr_rpu_normalized_idc);
print_int("bl_video_full_range_flag", hdr->bl_video_full_range_flag);
print_int("bl_bit_depth", hdr->bl_bit_depth);
print_int("el_bit_depth", hdr->el_bit_depth);
print_int("vdr_bit_depth", hdr->vdr_bit_depth);
print_int("spatial_resampling_filter_flag",
hdr->spatial_resampling_filter_flag);
print_int("el_spatial_resampling_filter_flag",
hdr->el_spatial_resampling_filter_flag);
print_int("disable_residual_flag", hdr->disable_residual_flag);
// data mapping values
print_int("vdr_rpu_id", mapping->vdr_rpu_id);
print_int("mapping_color_space", mapping->mapping_color_space);
print_int("mapping_chroma_format_idc",
mapping->mapping_chroma_format_idc);
print_int("nlq_method_idc", mapping->nlq_method_idc);
switch (mapping->nlq_method_idc) {
case AV_DOVI_NLQ_NONE:
print_str("nlq_method_idc_name", "none");
break;
case AV_DOVI_NLQ_LINEAR_DZ:
print_str("nlq_method_idc_name", "linear_dz");
break;
default:
print_str("nlq_method_idc_name", "unknown");
break;
}
print_int("num_x_partitions", mapping->num_x_partitions);
print_int("num_y_partitions", mapping->num_y_partitions);
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST);
for (int c = 0; c < 3; c++) {
const AVDOVIReshapingCurve *curve = &mapping->curves[c];
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_COMPONENT);
print_list_fmt("pivots", "%"PRIu16, curve->num_pivots, curve->pivots[idx]);
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST);
for (int i = 0; i < curve->num_pivots - 1; i++) {
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_PIECE);
print_int("mapping_idc", curve->mapping_idc[i]);
switch (curve->mapping_idc[i]) {
case AV_DOVI_MAPPING_POLYNOMIAL:
print_str("mapping_idc_name", "polynomial");
print_int("poly_order", curve->poly_order[i]);
print_list_fmt("poly_coef", "%"PRIi64,
curve->poly_order[i] + 1,
curve->poly_coef[i][idx]);
break;
case AV_DOVI_MAPPING_MMR:
print_str("mapping_idc_name", "mmr");
print_int("mmr_order", curve->mmr_order[i]);
print_int("mmr_constant", curve->mmr_constant[i]);
print_list_fmt("mmr_coef", "%"PRIi64,
curve->mmr_order[i] * 7,
curve->mmr_coef[i][0][idx]);
break;
default:
print_str("mapping_idc_name", "unknown");
break;
}
// SECTION_ID_FRAME_SIDE_DATA_PIECE
writer_print_section_footer(w);
}
// SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST
writer_print_section_footer(w);
if (mapping->nlq_method_idc != AV_DOVI_NLQ_NONE) {
const AVDOVINLQParams *nlq = &mapping->nlq[c];
print_int("nlq_offset", nlq->nlq_offset);
print_int("vdr_in_max", nlq->vdr_in_max);
switch (mapping->nlq_method_idc) {
case AV_DOVI_NLQ_LINEAR_DZ:
print_int("linear_deadzone_slope", nlq->linear_deadzone_slope);
print_int("linear_deadzone_threshold", nlq->linear_deadzone_threshold);
break;
}
}
// SECTION_ID_FRAME_SIDE_DATA_COMPONENT
writer_print_section_footer(w);
}
// SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST
writer_print_section_footer(w);
// color metadata
print_int("dm_metadata_id", color->dm_metadata_id);
print_int("scene_refresh_flag", color->scene_refresh_flag);
print_list_fmt("ycc_to_rgb_matrix", "%d/%d",
FF_ARRAY_ELEMS(color->ycc_to_rgb_matrix),
color->ycc_to_rgb_matrix[idx].num,
color->ycc_to_rgb_matrix[idx].den);
print_list_fmt("ycc_to_rgb_offset", "%d/%d",
FF_ARRAY_ELEMS(color->ycc_to_rgb_offset),
color->ycc_to_rgb_offset[idx].num,
color->ycc_to_rgb_offset[idx].den);
print_list_fmt("rgb_to_lms_matrix", "%d/%d",
FF_ARRAY_ELEMS(color->rgb_to_lms_matrix),
color->rgb_to_lms_matrix[idx].num,
color->rgb_to_lms_matrix[idx].den);
print_int("signal_eotf", color->signal_eotf);
print_int("signal_eotf_param0", color->signal_eotf_param0);
print_int("signal_eotf_param1", color->signal_eotf_param1);
print_int("signal_eotf_param2", color->signal_eotf_param2);
print_int("signal_bit_depth", color->signal_bit_depth);
print_int("signal_color_space", color->signal_color_space);
print_int("signal_chroma_format", color->signal_chroma_format);
print_int("signal_full_range_flag", color->signal_full_range_flag);
print_int("source_min_pq", color->source_min_pq);
print_int("source_max_pq", color->source_max_pq);
print_int("source_diagonal", color->source_diagonal);
av_bprint_finalize(&pbuf, NULL);
}
}
static void print_dynamic_hdr10_plus(WriterContext *w, const AVDynamicHDRPlus *metadata)
{
if (!metadata)
@ -2370,6 +2535,8 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
if (tag)
print_str(tag->key, tag->value);
print_int("size", sd->size);
} else if (sd->type == AV_FRAME_DATA_DOVI_METADATA) {
print_dovi_metadata(w, (const AVDOVIMetadata *)sd->data);
}
writer_print_section_footer(w);
}

View File

@ -60,7 +60,7 @@ OBJS = ac3_parser.o \
# subsystems
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o ac3.o ac3tab.o
OBJS-$(CONFIG_ADTS_HEADER) += adts_header.o mpeg4audio.o
OBJS-$(CONFIG_ADTS_HEADER) += adts_header.o mpeg4audio_sample_rates.o
OBJS-$(CONFIG_AMF) += amfenc.o
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
OBJS-$(CONFIG_ATSC_A53) += atsc_a53.o
@ -77,6 +77,7 @@ OBJS-$(CONFIG_CBS_MPEG2) += cbs_mpeg2.o
OBJS-$(CONFIG_CBS_VP9) += cbs_vp9.o
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
OBJS-$(CONFIG_DOVI_RPU) += dovi_rpu.o
OBJS-$(CONFIG_ERROR_RESILIENCE) += error_resilience.o
OBJS-$(CONFIG_EXIF) += exif.o tiff_common.o
OBJS-$(CONFIG_FAANDCT) += faandct.o
@ -122,10 +123,12 @@ OBJS-$(CONFIG_MPEGAUDIODSP) += mpegaudiodsp.o \
mpegaudiodsp_data.o \
mpegaudiodsp_fixed.o \
mpegaudiodsp_float.o
OBJS-$(CONFIG_MPEGAUDIOHEADER) += mpegaudiodecheader.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGAUDIOHEADER) += mpegaudiodecheader.o mpegaudiotabs.o
OBJS-$(CONFIG_MPEG4AUDIO) += mpeg4audio.o mpeg4audio_sample_rates.o
OBJS-$(CONFIG_MPEGVIDEO) += mpegvideo.o mpegvideodsp.o rl.o \
mpegvideo_motion.o mpegutils.o \
mpegvideodata.o mpegpicture.o
mpegvideodata.o mpegpicture.o \
to_upper4.o
OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \
motion_est.o ratecontrol.o \
mpegvideoencdsp.o
@ -139,7 +142,6 @@ OBJS-$(CONFIG_QSVENC) += qsvenc.o
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
OBJS-$(CONFIG_RDFT) += rdft.o
OBJS-$(CONFIG_RV34DSP) += rv34dsp.o
OBJS-$(CONFIG_SHARED) += log2_tab.o reverse.o
OBJS-$(CONFIG_SINEWIN) += sinewin.o
OBJS-$(CONFIG_SNAPPY) += snappy.o
OBJS-$(CONFIG_STARTCODE) += startcode.o
@ -161,10 +163,10 @@ OBJS-$(CONFIG_ZERO12V_DECODER) += 012v.o
OBJS-$(CONFIG_A64MULTI_ENCODER) += a64multienc.o elbg.o
OBJS-$(CONFIG_A64MULTI5_ENCODER) += a64multienc.o elbg.o
OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps_common.o aacps_float.o \
mpeg4audio.o kbdwin.o \
kbdwin.o \
sbrdsp.o aacpsdsp_float.o cbrt_data.o
OBJS-$(CONFIG_AAC_FIXED_DECODER) += aacdec_fixed.o aactab.o aacsbr_fixed.o aacps_common.o aacps_fixed.o \
mpeg4audio.o kbdwin.o \
kbdwin.o \
sbrdsp_fixed.o aacpsdsp_fixed.o cbrt_data_fixed.o
OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o aacenctab.o \
aacpsy.o aactab.o \
@ -172,11 +174,14 @@ OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o aacenctab.o \
aacenc_tns.o \
aacenc_ltp.o \
aacenc_pred.o \
psymodel.o mpeg4audio.o kbdwin.o
psymodel.o kbdwin.o \
mpeg4audio_sample_rates.o
OBJS-$(CONFIG_AAC_MF_ENCODER) += mfenc.o mf_utils.o
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
OBJS-$(CONFIG_AC3_DECODER) += ac3dec_float.o ac3dec_data.o ac3.o kbdwin.o ac3tab.o
OBJS-$(CONFIG_AC3_FIXED_DECODER) += ac3dec_fixed.o ac3dec_data.o ac3.o kbdwin.o ac3tab.o
OBJS-$(CONFIG_AC3_DECODER) += ac3dec_float.o ac3dec_data.o ac3.o \
kbdwin.o ac3tab.o ac3_channel_layout_tab.o
OBJS-$(CONFIG_AC3_FIXED_DECODER) += ac3dec_fixed.o ac3dec_data.o ac3.o \
kbdwin.o ac3tab.o ac3_channel_layout_tab.o
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
ac3.o kbdwin.o
OBJS-$(CONFIG_AC3_FIXED_ENCODER) += ac3enc_fixed.o ac3enc.o ac3tab.o ac3.o kbdwin.o
@ -188,7 +193,7 @@ OBJS-$(CONFIG_ALAC_DECODER) += alac.o alac_data.o alacdsp.o
OBJS-$(CONFIG_ALAC_ENCODER) += alacenc.o alac_data.o
OBJS-$(CONFIG_ALIAS_PIX_DECODER) += aliaspixdec.o
OBJS-$(CONFIG_ALIAS_PIX_ENCODER) += aliaspixenc.o
OBJS-$(CONFIG_ALS_DECODER) += alsdec.o bgmc.o mlz.o mpeg4audio.o
OBJS-$(CONFIG_ALS_DECODER) += alsdec.o bgmc.o mlz.o
OBJS-$(CONFIG_AMRNB_DECODER) += amrnbdec.o celp_filters.o \
celp_math.o acelp_filters.o \
acelp_vectors.o \
@ -274,7 +279,8 @@ OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadata.o dcahuff.o \
dca_core.o dca_exss.o dca_xll.o dca_lbr.o \
dcadsp.o dcadct.o synth_filter.o
dcadsp.o dcadct.o dca_sample_rate_tab.o \
synth_filter.o
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dcadata.o dcahuff.o \
dcaadpcm.o
OBJS-$(CONFIG_DDS_DECODER) += dds.o
@ -469,17 +475,19 @@ OBJS-$(CONFIG_MP1_DECODER) += mpegaudiodec_fixed.o
OBJS-$(CONFIG_MP1FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP2_DECODER) += mpegaudiodec_fixed.o
OBJS-$(CONFIG_MP2_ENCODER) += mpegaudioenc_float.o mpegaudio.o \
mpegaudiodata.o mpegaudiodsp_data.o
mpegaudiodata.o mpegaudiodsp_data.o \
mpegaudiotabs.o
OBJS-$(CONFIG_MP2FIXED_ENCODER) += mpegaudioenc_fixed.o mpegaudio.o \
mpegaudiodata.o mpegaudiodsp_data.o
mpegaudiodata.o mpegaudiodsp_data.o \
mpegaudiotabs.o
OBJS-$(CONFIG_MP2FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec_fixed.o
OBJS-$(CONFIG_MP3_MF_ENCODER) += mfenc.o mf_utils.o
OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec_fixed.o
OBJS-$(CONFIG_MP3ADUFLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP3FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec_fixed.o mpeg4audio.o
OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o mpeg4audio.o
OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec_fixed.o
OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
@ -981,21 +989,27 @@ OBJS-$(CONFIG_VP9_VDPAU_HWACCEL) += vdpau_vp9.o
OBJS-$(CONFIG_VP9_VIDEOTOOLBOX_HWACCEL) += videotoolbox_vp9.o
OBJS-$(CONFIG_VP8_QSV_HWACCEL) += qsvdec.o
# libavformat dependencies
OBJS-$(CONFIG_ISO_MEDIA) += mpeg4audio.o mpegaudiodata.o
# Objects duplicated from other libraries for shared builds
SHLIBOBJS += log2_tab.o reverse.o
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
# General libavformat dependencies
OBJS-$(CONFIG_FITS_DEMUXER) += fits.o
OBJS-$(CONFIG_LATM_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_MATROSKA_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_MOV_DEMUXER) += ac3tab.o
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
OBJS-$(CONFIG_TAK_DEMUXER) += tak.o
OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o
# libavformat dependencies for static builds
STLIBOBJS-$(CONFIG_AVFORMAT) += to_upper4.o
STLIBOBJS-$(CONFIG_ISO_MEDIA) += mpegaudiotabs.o
STLIBOBJS-$(CONFIG_FLV_MUXER) += mpeg4audio_sample_rates.o
STLIBOBJS-$(CONFIG_HLS_DEMUXER) += ac3_channel_layout_tab.o
STLIBOBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio_sample_rates.o
STLIBOBJS-$(CONFIG_MOV_DEMUXER) += ac3_channel_layout_tab.o
STLIBOBJS-$(CONFIG_MXF_MUXER) += golomb.o
STLIBOBJS-$(CONFIG_MP3_MUXER) += mpegaudiotabs.o
STLIBOBJS-$(CONFIG_NUT_MUXER) += mpegaudiotabs.o
STLIBOBJS-$(CONFIG_RTPDEC) += jpegtables.o
STLIBOBJS-$(CONFIG_RTP_MUXER) += golomb.o jpegtables.o \
mpeg4audio_sample_rates.o
STLIBOBJS-$(CONFIG_SPDIF_MUXER) += dca_sample_rate_tab.o
# libavfilter dependencies
OBJS-$(CONFIG_ELBG_FILTER) += elbg.o
@ -1078,9 +1092,9 @@ OBJS-$(CONFIG_LIBZVBI_TELETEXT_DECODER) += libzvbi-teletextdec.o ass.o
# parsers
OBJS-$(CONFIG_AAC_LATM_PARSER) += latm_parser.o
OBJS-$(CONFIG_AAC_PARSER) += aac_parser.o aac_ac3_parser.o \
mpeg4audio.o
OBJS-$(CONFIG_AC3_PARSER) += ac3tab.o aac_ac3_parser.o
OBJS-$(CONFIG_AAC_PARSER) += aac_parser.o aac_ac3_parser.o
OBJS-$(CONFIG_AC3_PARSER) += aac_ac3_parser.o ac3tab.o \
ac3_channel_layout_tab.o
OBJS-$(CONFIG_ADX_PARSER) += adx_parser.o adx.o
OBJS-$(CONFIG_AMR_PARSER) += amr_parser.o
OBJS-$(CONFIG_AV1_PARSER) += av1_parser.o
@ -1090,7 +1104,8 @@ OBJS-$(CONFIG_BMP_PARSER) += bmp_parser.o
OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o
OBJS-$(CONFIG_COOK_PARSER) += cook_parser.o
OBJS-$(CONFIG_CRI_PARSER) += cri_parser.o
OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o dca_exss.o dca.o
OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o dca_exss.o dca.o \
dca_sample_rate_tab.o
OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o
OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o dnxhddata.o
OBJS-$(CONFIG_DOLBY_E_PARSER) += dolby_e_parser.o dolby_e_parse.o
@ -1137,7 +1152,7 @@ OBJS-$(CONFIG_XBM_PARSER) += xbm_parser.o
OBJS-$(CONFIG_XMA_PARSER) += xma_parser.o
# bitstream filters
OBJS-$(CONFIG_AAC_ADTSTOASC_BSF) += aac_adtstoasc_bsf.o mpeg4audio.o
OBJS-$(CONFIG_AAC_ADTSTOASC_BSF) += aac_adtstoasc_bsf.o
OBJS-$(CONFIG_AV1_METADATA_BSF) += av1_metadata_bsf.o
OBJS-$(CONFIG_AV1_FRAME_MERGE_BSF) += av1_frame_merge_bsf.o
OBJS-$(CONFIG_AV1_FRAME_SPLIT_BSF) += av1_frame_split_bsf.o
@ -1160,7 +1175,7 @@ OBJS-$(CONFIG_MJPEGA_DUMP_HEADER_BSF) += mjpega_dump_header_bsf.o
OBJS-$(CONFIG_MPEG4_UNPACK_BFRAMES_BSF) += mpeg4_unpack_bframes_bsf.o
OBJS-$(CONFIG_MOV2TEXTSUB_BSF) += movsub_bsf.o
OBJS-$(CONFIG_MP3_HEADER_DECOMPRESS_BSF) += mp3_header_decompress_bsf.o \
mpegaudiodata.o
mpegaudiotabs.o
OBJS-$(CONFIG_MPEG2_METADATA_BSF) += mpeg2_metadata_bsf.o
OBJS-$(CONFIG_NOISE_BSF) += noise_bsf.o
OBJS-$(CONFIG_NULL_BSF) += null_bsf.o

View File

@ -998,7 +998,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
/* Samplerate */
for (i = 0; i < 16; i++)
if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i])
if (avctx->sample_rate == ff_mpeg4audio_sample_rates[i])
break;
s->samplerate_index = i;
ERROR_IF(s->samplerate_index == 16 ||
@ -1143,7 +1143,7 @@ const AVCodec ff_aac_encoder = {
.encode2 = aac_encode_frame,
.close = aac_encode_end,
.defaults = aac_encode_defaults,
.supported_samplerates = mpeg4audio_sample_rates,
.supported_samplerates = ff_mpeg4audio_sample_rates,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,

View File

@ -81,13 +81,6 @@ static const uint8_t aac_chan_maps[AAC_MAX_CHANNELS][AAC_MAX_CHANNELS] = {
{ 2, 0, 1, 6, 7, 4, 5, 3 },
};
/* duplicated from avpriv_mpeg4audio_sample_rates to avoid shared build
* failures */
static const int mpeg4audio_sample_rates[16] = {
96000, 88200, 64000, 48000, 44100, 32000,
24000, 22050, 16000, 12000, 11025, 8000, 7350
};
/** bits needed to code codebook run value for long windows */
static const uint8_t run_value_bits_long[64] = {
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,

View File

@ -0,0 +1,22 @@
/*
* AC-3 channel layout table
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "ac3_channel_layout_tab.h"

View File

@ -0,0 +1,41 @@
/*
* AC-3 channel layout table
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_AC3_CHANNEL_LAYOUT_TAB_H
#define AVCODEC_AC3_CHANNEL_LAYOUT_TAB_H
#include <stdint.h>
#include "libavutil/channel_layout.h"
/**
* Map audio coding mode (acmod) to channel layout mask.
*/
const uint16_t ff_ac3_channel_layout_tab[8] = {
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_2_1,
AV_CH_LAYOUT_4POINT0,
AV_CH_LAYOUT_2_2,
AV_CH_LAYOUT_5POINT0
};
#endif

View File

@ -141,7 +141,7 @@ int ff_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr)
(hdr->num_blocks * 256);
hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on;
}
hdr->channel_layout = avpriv_ac3_channel_layout_tab[hdr->channel_mode];
hdr->channel_layout = ff_ac3_channel_layout_tab[hdr->channel_mode];
if (hdr->lfe_on)
hdr->channel_layout |= AV_CH_LOW_FREQUENCY;

View File

@ -1616,7 +1616,7 @@ dependent_frame:
return AVERROR_INVALIDDATA;
}
avctx->channels = s->out_channels;
avctx->channel_layout = avpriv_ac3_channel_layout_tab[s->output_mode & ~AC3_OUTPUT_LFEON];
avctx->channel_layout = ff_ac3_channel_layout_tab[s->output_mode & ~AC3_OUTPUT_LFEON];
if (s->output_mode & AC3_OUTPUT_LFEON)
avctx->channel_layout |= AV_CH_LOW_FREQUENCY;
@ -1700,7 +1700,7 @@ skip:
extended_channel_map[ch] = ch;
if (s->frame_type == EAC3_FRAME_TYPE_DEPENDENT) {
uint64_t ich_layout = avpriv_ac3_channel_layout_tab[s->prev_output_mode & ~AC3_OUTPUT_LFEON];
uint64_t ich_layout = ff_ac3_channel_layout_tab[s->prev_output_mode & ~AC3_OUTPUT_LFEON];
int channel_map_size = ff_ac3_channels_tab[s->output_mode & ~AC3_OUTPUT_LFEON] + s->lfe_on;
uint64_t channel_layout;
int extend = 0;

View File

@ -35,6 +35,7 @@
#include "ac3dsp.h"
#include "avcodec.h"
#include "fft.h"
#include "internal.h"
#include "mathops.h"
#include "me_cmp.h"
#include "put_bits.h"

View File

@ -82,20 +82,6 @@ const uint8_t ff_ac3_channels_tab[8] = {
2, 1, 2, 3, 3, 4, 4, 5
};
/**
* Map audio coding mode (acmod) to channel layout mask.
*/
const uint16_t avpriv_ac3_channel_layout_tab[8] = {
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_2_1,
AV_CH_LAYOUT_4POINT0,
AV_CH_LAYOUT_2_2,
AV_CH_LAYOUT_5POINT0
};
/**
* Table to remap channels from AC-3 order to SMPTE order.
* [channel_mode][lfe][ch]

View File

@ -24,13 +24,11 @@
#include <stdint.h>
#include "libavutil/internal.h"
#include "ac3.h"
#include "internal.h"
extern const uint16_t ff_ac3_frame_size_tab[38][3];
extern const uint8_t ff_ac3_channels_tab[8];
extern av_export_avcodec const uint16_t avpriv_ac3_channel_layout_tab[8];
extern const uint16_t ff_ac3_channel_layout_tab[8];
extern const uint8_t ff_ac3_dec_channel_map[8][2][6];
extern const int ff_ac3_sample_rate_tab[];
extern const uint16_t ff_ac3_bitrate_tab[19];

View File

@ -40,7 +40,7 @@ int ff_adts_header_parse(GetBitContext *gbc, AACADTSHeaderInfo *hdr)
crc_abs = get_bits1(gbc); /* protection_absent */
aot = get_bits(gbc, 2); /* profile_objecttype */
sr = get_bits(gbc, 4); /* sample_frequency_index */
if (!avpriv_mpeg4audio_sample_rates[sr])
if (!ff_mpeg4audio_sample_rates[sr])
return AAC_AC3_PARSE_ERROR_SAMPLE_RATE;
skip_bits1(gbc); /* private_bit */
ch = get_bits(gbc, 3); /* channel_configuration */
@ -63,7 +63,7 @@ int ff_adts_header_parse(GetBitContext *gbc, AACADTSHeaderInfo *hdr)
hdr->crc_absent = crc_abs;
hdr->num_aac_frames = rdb + 1;
hdr->sampling_index = sr;
hdr->sample_rate = avpriv_mpeg4audio_sample_rates[sr];
hdr->sample_rate = ff_mpeg4audio_sample_rates[sr];
hdr->samples = (rdb + 1) * 1024;
hdr->bit_rate = size * 8 * hdr->sample_rate / hdr->samples;
hdr->frame_length = size;

View File

@ -535,13 +535,12 @@ void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
pkt->duration = av_rescale_q(pkt->duration, src_tb, dst_tb);
}
int avpriv_packet_list_put(PacketList **packet_buffer,
PacketList **plast_pktl,
int avpriv_packet_list_put(PacketList *packet_buffer,
AVPacket *pkt,
int (*copy)(AVPacket *dst, const AVPacket *src),
int flags)
{
PacketList *pktl = av_malloc(sizeof(PacketList));
PacketListEntry *pktl = av_malloc(sizeof(*pktl));
int ret;
if (!pktl)
@ -565,44 +564,41 @@ int avpriv_packet_list_put(PacketList **packet_buffer,
pktl->next = NULL;
if (*packet_buffer)
(*plast_pktl)->next = pktl;
if (packet_buffer->head)
packet_buffer->tail->next = pktl;
else
*packet_buffer = pktl;
packet_buffer->head = pktl;
/* Add the packet in the buffered packet list. */
*plast_pktl = pktl;
packet_buffer->tail = pktl;
return 0;
}
int avpriv_packet_list_get(PacketList **pkt_buffer,
PacketList **pkt_buffer_end,
int avpriv_packet_list_get(PacketList *pkt_buffer,
AVPacket *pkt)
{
PacketList *pktl;
if (!*pkt_buffer)
PacketListEntry *pktl = pkt_buffer->head;
if (!pktl)
return AVERROR(EAGAIN);
pktl = *pkt_buffer;
*pkt = pktl->pkt;
*pkt_buffer = pktl->next;
if (!pktl->next)
*pkt_buffer_end = NULL;
pkt_buffer->head = pktl->next;
if (!pkt_buffer->head)
pkt_buffer->tail = NULL;
av_freep(&pktl);
return 0;
}
void avpriv_packet_list_free(PacketList **pkt_buf, PacketList **pkt_buf_end)
void avpriv_packet_list_free(PacketList *pkt_buf)
{
PacketList *tmp = *pkt_buf;
PacketListEntry *tmp = pkt_buf->head;
while (tmp) {
PacketList *pktl = tmp;
PacketListEntry *pktl = tmp;
tmp = pktl->next;
av_packet_unref(&pktl->pkt);
av_freep(&pktl);
}
*pkt_buf = NULL;
*pkt_buf_end = NULL;
pkt_buf->head = pkt_buf->tail = NULL;
}
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)

View File

@ -33,11 +33,6 @@
#include "get_bits.h"
#include "put_bits.h"
const uint32_t avpriv_dca_sample_rates[16] = {
0, 8000, 16000, 32000, 0, 0, 11025, 22050, 44100, 0, 0,
12000, 24000, 48000, 96000, 192000
};
const uint32_t ff_dca_sampling_freqs[16] = {
8000, 16000, 32000, 64000, 128000, 22050, 44100, 88200,
176400, 352800, 12000, 24000, 48000, 96000, 192000, 384000,
@ -112,7 +107,7 @@ int ff_dca_parse_core_frame_header(DCACoreFrameHeader *h, GetBitContext *gb)
return DCA_PARSE_ERROR_AMODE;
h->sr_code = get_bits(gb, 4);
if (!avpriv_dca_sample_rates[h->sr_code])
if (!ff_dca_sample_rates[h->sr_code])
return DCA_PARSE_ERROR_SAMPLE_RATE;
h->br_code = get_bits(gb, 5);

View File

@ -32,7 +32,6 @@
#include "libavutil/intreadwrite.h"
#include "get_bits.h"
#include "internal.h"
#define DCA_CORE_FRAME_HEADER_SIZE 18
@ -195,8 +194,7 @@ enum DCADownMixType {
DCA_DMIX_TYPE_COUNT
};
extern av_export_avcodec const uint32_t avpriv_dca_sample_rates[16];
extern const uint32_t ff_dca_sample_rates[16];
extern const uint32_t ff_dca_sampling_freqs[16];
extern const uint8_t ff_dca_freq_ranges[16];
extern const uint8_t ff_dca_bits_per_sample[8];

View File

@ -129,7 +129,7 @@ static int parse_frame_header(DCACoreDecoder *s)
s->npcmblocks = h.npcmblocks;
s->frame_size = h.frame_size;
s->audio_mode = h.audio_mode;
s->sample_rate = avpriv_dca_sample_rates[h.sr_code];
s->sample_rate = ff_dca_sample_rates[h.sr_code];
s->bit_rate = ff_dca_bit_rates[h.br_code];
s->drc_present = h.drc_present;
s->ts_present = h.ts_present;

View File

@ -267,7 +267,7 @@ static int dca_parse_params(DCAParseContext *pc1, const uint8_t *buf,
return AVERROR_INVALIDDATA;
*duration = h.npcmblocks * DCA_PCMBLOCK_SAMPLES;
*sample_rate = avpriv_dca_sample_rates[h.sr_code];
*sample_rate = ff_dca_sample_rates[h.sr_code];
if (*profile != FF_PROFILE_UNKNOWN)
return 0;

View File

@ -0,0 +1,25 @@
/*
* DCA sample rates
* Copyright (C) 2004 Gildas Bazin
* Copyright (C) 2004 Benjamin Zores
* Copyright (C) 2006 Benjamin Larsson
* Copyright (C) 2007 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dca_sample_rate_tab.h"

View File

@ -0,0 +1,33 @@
/*
* DCA sample rates
* Copyright (C) 2004 Gildas Bazin
* Copyright (C) 2004 Benjamin Zores
* Copyright (C) 2006 Benjamin Larssonb
* Copyright (C) 2007 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DCA_SAMPLE_RATE_TAB_H
#define AVCODEC_DCA_SAMPLE_RATE_TAB_H
#include <stdint.h>
const uint32_t ff_dca_sample_rates[16] = {
0, 8000, 16000, 32000, 0, 0, 11025, 22050, 44100, 0, 0,
12000, 24000, 48000, 96000, 192000
};
#endif

449
libavcodec/dovi_rpu.c Normal file
View File

@ -0,0 +1,449 @@
/*
* Dolby Vision RPU decoder
*
* Copyright (C) 2021 Jan Ekström
* Copyright (C) 2021 Niklas Haas
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/buffer.h"
#include "dovi_rpu.h"
#include "golomb.h"
#include "get_bits.h"
enum {
RPU_COEFF_FIXED = 0,
RPU_COEFF_FLOAT = 1,
};
/**
* Private contents of vdr_ref.
*/
typedef struct DOVIVdrRef {
AVDOVIDataMapping mapping;
AVDOVIColorMetadata color;
} DOVIVdrRef;
void ff_dovi_ctx_unref(DOVIContext *s)
{
for (int i = 0; i < FF_ARRAY_ELEMS(s->vdr_ref); i++)
av_buffer_unref(&s->vdr_ref[i]);
*s = (DOVIContext) {
.logctx = s->logctx,
};
}
void ff_dovi_ctx_flush(DOVIContext *s)
{
for (int i = 0; i < FF_ARRAY_ELEMS(s->vdr_ref); i++)
av_buffer_unref(&s->vdr_ref[i]);
*s = (DOVIContext) {
.logctx = s->logctx,
.dv_profile = s->dv_profile,
};
}
int ff_dovi_ctx_replace(DOVIContext *s, const DOVIContext *s0)
{
int ret;
s->logctx = s0->logctx;
s->mapping = s0->mapping;
s->color = s0->color;
s->dv_profile = s0->dv_profile;
for (int i = 0; i < DOVI_MAX_DM_ID; i++) {
if ((ret = av_buffer_replace(&s->vdr_ref[i], s0->vdr_ref[i])) < 0)
goto fail;
}
return 0;
fail:
ff_dovi_ctx_unref(s);
return ret;
}
void ff_dovi_update_cfg(DOVIContext *s, const AVDOVIDecoderConfigurationRecord *cfg)
{
if (!cfg)
return;
s->dv_profile = cfg->dv_profile;
}
int ff_dovi_attach_side_data(DOVIContext *s, AVFrame *frame)
{
AVFrameSideData *sd;
AVBufferRef *buf;
AVDOVIMetadata *dovi;
size_t dovi_size;
if (!s->mapping || !s->color)
return 0; /* incomplete dovi metadata */
dovi = av_dovi_metadata_alloc(&dovi_size);
if (!dovi)
return AVERROR(ENOMEM);
buf = av_buffer_create((uint8_t *) dovi, dovi_size, NULL, NULL, 0);
if (!buf) {
av_free(dovi);
return AVERROR(ENOMEM);
}
sd = av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_DOVI_METADATA, buf);
if (!sd) {
av_buffer_unref(&buf);
return AVERROR(ENOMEM);
}
/* Copy only the parts of these structs known to us at compiler-time. */
#define COPY(t, a, b, last) memcpy(a, b, offsetof(t, last) + sizeof((b)->last))
COPY(AVDOVIRpuDataHeader, av_dovi_get_header(dovi), &s->header, disable_residual_flag);
COPY(AVDOVIDataMapping, av_dovi_get_mapping(dovi), s->mapping, nlq[2].linear_deadzone_threshold);
COPY(AVDOVIColorMetadata, av_dovi_get_color(dovi), s->color, source_diagonal);
return 0;
}
static int guess_profile(const AVDOVIRpuDataHeader *hdr)
{
switch (hdr->vdr_rpu_profile) {
case 0:
if (hdr->bl_video_full_range_flag)
return 5;
break;
case 1:
if (hdr->el_spatial_resampling_filter_flag && !hdr->disable_residual_flag) {
if (hdr->vdr_bit_depth == 12) {
return 7;
} else {
return 4;
}
} else {
return 8;
}
}
return 0; /* unknown */
}
static inline uint64_t get_ue_coef(GetBitContext *gb, const AVDOVIRpuDataHeader *hdr)
{
uint64_t ipart;
union { uint32_t u32; float f32; } fpart;
switch (hdr->coef_data_type) {
case RPU_COEFF_FIXED:
ipart = get_ue_golomb_long(gb);
fpart.u32 = get_bits_long(gb, hdr->coef_log2_denom);
return (ipart << hdr->coef_log2_denom) + fpart.u32;
case RPU_COEFF_FLOAT:
fpart.u32 = get_bits_long(gb, 32);
return fpart.f32 * (1 << hdr->coef_log2_denom);
}
return 0; /* unreachable */
}
static inline int64_t get_se_coef(GetBitContext *gb, const AVDOVIRpuDataHeader *hdr)
{
int64_t ipart;
union { uint32_t u32; float f32; } fpart;
switch (hdr->coef_data_type) {
case RPU_COEFF_FIXED:
ipart = get_se_golomb_long(gb);
fpart.u32 = get_bits_long(gb, hdr->coef_log2_denom);
return (ipart << hdr->coef_log2_denom) + fpart.u32;
case RPU_COEFF_FLOAT:
fpart.u32 = get_bits_long(gb, 32);
return fpart.f32 * (1 << hdr->coef_log2_denom);
}
return 0; /* unreachable */
}
#define VALIDATE(VAR, MIN, MAX) \
do { \
if (VAR < MIN || VAR > MAX) { \
av_log(s->logctx, AV_LOG_ERROR, "RPU validation failed: " \
#MIN" <= "#VAR" = %d <= "#MAX"\n", (int) VAR); \
goto fail; \
} \
} while (0)
int ff_dovi_rpu_parse(DOVIContext *s, const uint8_t *rpu, size_t rpu_size)
{
AVDOVIRpuDataHeader *hdr = &s->header;
GetBitContext *gb = &(GetBitContext){0};
DOVIVdrRef *vdr;
int ret;
uint8_t nal_prefix;
uint8_t rpu_type;
uint8_t vdr_seq_info_present;
uint8_t vdr_dm_metadata_present;
uint8_t use_prev_vdr_rpu;
uint8_t use_nlq;
uint8_t profile;
if ((ret = init_get_bits8(gb, rpu, rpu_size)) < 0)
return ret;
/* RPU header, common values */
nal_prefix = get_bits(gb, 8);
VALIDATE(nal_prefix, 25, 25);
rpu_type = get_bits(gb, 6);
if (rpu_type != 2) {
av_log(s->logctx, AV_LOG_WARNING, "Unrecognized RPU type "
"%"PRIu8", ignoring\n", rpu_type);
return 0;
}
hdr->rpu_type = rpu_type;
hdr->rpu_format = get_bits(gb, 11);
/* Values specific to RPU type 2 */
hdr->vdr_rpu_profile = get_bits(gb, 4);
hdr->vdr_rpu_level = get_bits(gb, 4);
vdr_seq_info_present = get_bits1(gb);
if (vdr_seq_info_present) {
hdr->chroma_resampling_explicit_filter_flag = get_bits1(gb);
hdr->coef_data_type = get_bits(gb, 2);
VALIDATE(hdr->coef_data_type, RPU_COEFF_FIXED, RPU_COEFF_FLOAT);
switch (hdr->coef_data_type) {
case RPU_COEFF_FIXED:
hdr->coef_log2_denom = get_ue_golomb(gb);
VALIDATE(hdr->coef_log2_denom, 13, 32);
break;
case RPU_COEFF_FLOAT:
hdr->coef_log2_denom = 32; /* arbitrary, choose maximum precision */
break;
}
hdr->vdr_rpu_normalized_idc = get_bits(gb, 2);
hdr->bl_video_full_range_flag = get_bits1(gb);
if ((hdr->rpu_format & 0x700) == 0) {
int bl_bit_depth_minus8 = get_ue_golomb_31(gb);
int el_bit_depth_minus8 = get_ue_golomb_31(gb);
int vdr_bit_depth_minus8 = get_ue_golomb_31(gb);
VALIDATE(bl_bit_depth_minus8, 0, 8);
VALIDATE(el_bit_depth_minus8, 0, 8);
VALIDATE(vdr_bit_depth_minus8, 0, 8);
hdr->bl_bit_depth = bl_bit_depth_minus8 + 8;
hdr->el_bit_depth = el_bit_depth_minus8 + 8;
hdr->vdr_bit_depth = vdr_bit_depth_minus8 + 8;
hdr->spatial_resampling_filter_flag = get_bits1(gb);
skip_bits(gb, 3); /* reserved_zero_3bits */
hdr->el_spatial_resampling_filter_flag = get_bits1(gb);
hdr->disable_residual_flag = get_bits1(gb);
}
}
if (!hdr->bl_bit_depth) {
av_log(s->logctx, AV_LOG_ERROR, "Missing RPU VDR sequence info?\n");
goto fail;
}
vdr_dm_metadata_present = get_bits1(gb);
use_prev_vdr_rpu = get_bits1(gb);
use_nlq = (hdr->rpu_format & 0x700) == 0 && !hdr->disable_residual_flag;
profile = s->dv_profile ? s->dv_profile : guess_profile(hdr);
if (profile == 5 && use_nlq) {
av_log(s->logctx, AV_LOG_ERROR, "Profile 5 RPUs should not use NLQ\n");
goto fail;
}
if (use_prev_vdr_rpu) {
int prev_vdr_rpu_id = get_ue_golomb_31(gb);
VALIDATE(prev_vdr_rpu_id, 0, DOVI_MAX_DM_ID);
if (!s->vdr_ref[prev_vdr_rpu_id]) {
av_log(s->logctx, AV_LOG_ERROR, "Unknown previous RPU ID: %u\n",
prev_vdr_rpu_id);
goto fail;
}
vdr = (DOVIVdrRef *) s->vdr_ref[prev_vdr_rpu_id]->data;
s->mapping = &vdr->mapping;
} else {
int vdr_rpu_id = get_ue_golomb_31(gb);
VALIDATE(vdr_rpu_id, 0, DOVI_MAX_DM_ID);
if (!s->vdr_ref[vdr_rpu_id]) {
s->vdr_ref[vdr_rpu_id] = av_buffer_allocz(sizeof(DOVIVdrRef));
if (!s->vdr_ref[vdr_rpu_id])
return AVERROR(ENOMEM);
}
vdr = (DOVIVdrRef *) s->vdr_ref[vdr_rpu_id]->data;
s->mapping = &vdr->mapping;
vdr->mapping.vdr_rpu_id = vdr_rpu_id;
vdr->mapping.mapping_color_space = get_ue_golomb_31(gb);
vdr->mapping.mapping_chroma_format_idc = get_ue_golomb_31(gb);
for (int c = 0; c < 3; c++) {
AVDOVIReshapingCurve *curve = &vdr->mapping.curves[c];
int num_pivots_minus_2 = get_ue_golomb_31(gb);
int pivot = 0;
VALIDATE(num_pivots_minus_2, 0, AV_DOVI_MAX_PIECES - 1);
curve->num_pivots = num_pivots_minus_2 + 2;
for (int i = 0; i < curve->num_pivots; i++) {
pivot += get_bits(gb, hdr->bl_bit_depth);
curve->pivots[i] = av_clip_uint16(pivot);
}
}
if (use_nlq) {
vdr->mapping.nlq_method_idc = get_bits(gb, 3);
/**
* The patent mentions another legal value, NLQ_MU_LAW, but it's
* not documented anywhere how to parse or apply that type of NLQ.
*/
VALIDATE(vdr->mapping.nlq_method_idc, 0, AV_DOVI_NLQ_LINEAR_DZ);
} else {
vdr->mapping.nlq_method_idc = AV_DOVI_NLQ_NONE;
}
vdr->mapping.num_x_partitions = get_ue_golomb_long(gb) + 1;
vdr->mapping.num_y_partitions = get_ue_golomb_long(gb) + 1;
/* End of rpu_data_header(), start of vdr_rpu_data_payload() */
for (int c = 0; c < 3; c++) {
AVDOVIReshapingCurve *curve = &vdr->mapping.curves[c];
for (int i = 0; i < curve->num_pivots - 1; i++) {
int mapping_idc = get_ue_golomb_31(gb);
VALIDATE(mapping_idc, 0, 1);
curve->mapping_idc[i] = mapping_idc;
switch (mapping_idc) {
case AV_DOVI_MAPPING_POLYNOMIAL: {
int poly_order_minus1 = get_ue_golomb_31(gb);
VALIDATE(poly_order_minus1, 0, 1);
curve->poly_order[i] = poly_order_minus1 + 1;
if (poly_order_minus1 == 0) {
int linear_interp_flag = get_bits1(gb);
if (linear_interp_flag) {
/* lack of documentation/samples */
avpriv_request_sample(s->logctx, "Dolby Vision "
"linear interpolation");
ff_dovi_ctx_unref(s);
return AVERROR_PATCHWELCOME;
}
}
for (int k = 0; k <= curve->poly_order[i]; k++)
curve->poly_coef[i][k] = get_se_coef(gb, hdr);
break;
}
case AV_DOVI_MAPPING_MMR: {
int mmr_order_minus1 = get_bits(gb, 2);
VALIDATE(mmr_order_minus1, 0, 2);
curve->mmr_order[i] = mmr_order_minus1 + 1;
curve->mmr_constant[i] = get_se_coef(gb, hdr);
for (int j = 0; j < curve->mmr_order[i]; j++) {
for (int k = 0; k < 7; k++)
curve->mmr_coef[i][j][k] = get_se_coef(gb, hdr);
}
break;
}
}
}
}
if (use_nlq) {
for (int c = 0; c < 3; c++) {
AVDOVINLQParams *nlq = &vdr->mapping.nlq[c];
nlq->nlq_offset = get_bits(gb, hdr->el_bit_depth);
nlq->vdr_in_max = get_ue_coef(gb, hdr);
switch (vdr->mapping.nlq_method_idc) {
case AV_DOVI_NLQ_LINEAR_DZ:
nlq->linear_deadzone_slope = get_ue_coef(gb, hdr);
nlq->linear_deadzone_threshold = get_ue_coef(gb, hdr);
break;
}
}
}
}
if (vdr_dm_metadata_present) {
AVDOVIColorMetadata *color;
int affected_dm_id = get_ue_golomb_31(gb);
int current_dm_id = get_ue_golomb_31(gb);
VALIDATE(affected_dm_id, 0, DOVI_MAX_DM_ID);
VALIDATE(current_dm_id, 0, DOVI_MAX_DM_ID);
if (!s->vdr_ref[affected_dm_id]) {
s->vdr_ref[affected_dm_id] = av_buffer_allocz(sizeof(DOVIVdrRef));
if (!s->vdr_ref[affected_dm_id])
return AVERROR(ENOMEM);
}
if (!s->vdr_ref[current_dm_id]) {
av_log(s->logctx, AV_LOG_ERROR, "Unknown previous RPU DM ID: %u\n",
current_dm_id);
goto fail;
}
/* Update current pointer based on current_dm_id */
vdr = (DOVIVdrRef *) s->vdr_ref[current_dm_id]->data;
s->color = &vdr->color;
/* Update values of affected_dm_id */
vdr = (DOVIVdrRef *) s->vdr_ref[affected_dm_id]->data;
color = &vdr->color;
color->dm_metadata_id = affected_dm_id;
color->scene_refresh_flag = get_ue_golomb_31(gb);
for (int i = 0; i < 9; i++)
color->ycc_to_rgb_matrix[i] = av_make_q(get_sbits(gb, 16), 1 << 13);
for (int i = 0; i < 3; i++) {
int denom = profile == 4 ? (1 << 30) : (1 << 28);
unsigned offset = get_bits_long(gb, 32);
if (offset > INT_MAX) {
/* Ensure the result fits inside AVRational */
offset >>= 1;
denom >>= 1;
}
color->ycc_to_rgb_offset[i] = av_make_q(offset, denom);
}
for (int i = 0; i < 9; i++)
color->rgb_to_lms_matrix[i] = av_make_q(get_sbits(gb, 16), 1 << 14);
color->signal_eotf = get_bits(gb, 16);
color->signal_eotf_param0 = get_bits(gb, 16);
color->signal_eotf_param1 = get_bits(gb, 16);
color->signal_eotf_param2 = get_bits_long(gb, 32);
color->signal_bit_depth = get_bits(gb, 5);
VALIDATE(color->signal_bit_depth, 8, 16);
color->signal_color_space = get_bits(gb, 2);
color->signal_chroma_format = get_bits(gb, 2);
color->signal_full_range_flag = get_bits(gb, 2);
color->source_min_pq = get_bits(gb, 12);
color->source_max_pq = get_bits(gb, 12);
color->source_diagonal = get_bits(gb, 10);
}
/* FIXME: verify CRC32, requires implementation of AV_CRC_32_MPEG_2 */
return 0;
fail:
ff_dovi_ctx_unref(s); /* don't leak potentially invalid state */
return AVERROR(EINVAL);
}

87
libavcodec/dovi_rpu.h Normal file
View File

@ -0,0 +1,87 @@
/*
* Dolby Vision RPU decoder
*
* Copyright (C) 2021 Jan Ekström
* Copyright (C) 2021 Niklas Haas
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DOVI_RPU_H
#define AVCODEC_DOVI_RPU_H
#include "libavutil/dovi_meta.h"
#include "libavutil/frame.h"
#define DOVI_MAX_DM_ID 15
typedef struct DOVIContext {
void *logctx;
/**
* Currently active RPU data header, updates on every dovi_rpu_parse().
*/
AVDOVIRpuDataHeader header;
/**
* Currently active data mappings, or NULL. Points into memory owned by the
* corresponding rpu/vdr_ref, which becomes invalid on the next call to
* dovi_rpu_parse.
*/
const AVDOVIDataMapping *mapping;
const AVDOVIColorMetadata *color;
/**
* Private fields internal to dovi_rpu.c
*/
AVBufferRef *vdr_ref[DOVI_MAX_DM_ID+1];
uint8_t dv_profile;
} DOVIContext;
int ff_dovi_ctx_replace(DOVIContext *s, const DOVIContext *s0);
/**
* Completely reset a DOVIContext, preserving only logctx.
*/
void ff_dovi_ctx_unref(DOVIContext *s);
/**
* Partially reset the internal state. Resets per-frame state while preserving
* fields parsed from the configuration record.
*/
void ff_dovi_ctx_flush(DOVIContext *s);
/**
* Read the contents of an AVDOVIDecoderConfigurationRecord (usually provided
* by stream side data) and update internal state accordingly.
*/
void ff_dovi_update_cfg(DOVIContext *s, const AVDOVIDecoderConfigurationRecord *cfg);
/**
* Parse the contents of a Dovi RPU NAL and update the parsed values in the
* DOVIContext struct.
*
* Returns 0 or an error code.
*/
int ff_dovi_rpu_parse(DOVIContext *s, const uint8_t *rpu, size_t rpu_size);
/**
* Attach the decoded AVDOVIMetadata as side data to an AVFrame.
*/
int ff_dovi_attach_side_data(DOVIContext *s, AVFrame *frame);
#endif /* AVCODEC_DOVI_RPU_H */

View File

@ -24,6 +24,8 @@
* Tables taken directly from the E-AC-3 spec.
*/
#include <stddef.h>
#include "eac3_data.h"
#include "ac3.h"

View File

@ -164,20 +164,20 @@ static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
{
int ret;
ret = ff_mjpeg_build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance,
avpriv_mjpeg_val_dc, 0, avctx);
ret = ff_mjpeg_build_vlc(&c->dc_vlc[0], ff_mjpeg_bits_dc_luminance,
ff_mjpeg_val_dc, 0, avctx);
if (ret)
return ret;
ret = ff_mjpeg_build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance,
avpriv_mjpeg_val_dc, 0, avctx);
ret = ff_mjpeg_build_vlc(&c->dc_vlc[1], ff_mjpeg_bits_dc_chrominance,
ff_mjpeg_val_dc, 0, avctx);
if (ret)
return ret;
ret = ff_mjpeg_build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance,
avpriv_mjpeg_val_ac_luminance, 1, avctx);
ret = ff_mjpeg_build_vlc(&c->ac_vlc[0], ff_mjpeg_bits_ac_luminance,
ff_mjpeg_val_ac_luminance, 1, avctx);
if (ret)
return ret;
ret = ff_mjpeg_build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance,
avpriv_mjpeg_val_ac_chrominance, 1, avctx);
ret = ff_mjpeg_build_vlc(&c->ac_vlc[1], ff_mjpeg_bits_ac_chrominance,
ff_mjpeg_val_ac_chrominance, 1, avctx);
if (ret)
return ret;

View File

@ -2723,6 +2723,7 @@ error:
static int set_side_data(HEVCContext *s)
{
AVFrame *out = s->ref->frame;
int ret;
if (s->sei.frame_packing.present &&
s->sei.frame_packing.arrangement_type >= 3 &&
@ -2977,6 +2978,9 @@ static int set_side_data(HEVCContext *s)
s->rpu_buf = NULL;
}
if ((ret = ff_dovi_attach_side_data(&s->dovi_ctx, out)) < 0)
return ret;
return 0;
}
@ -3308,16 +3312,23 @@ static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
if (s->pkt.nb_nals > 1 && s->pkt.nals[s->pkt.nb_nals - 1].type == HEVC_NAL_UNSPEC62 &&
s->pkt.nals[s->pkt.nb_nals - 1].size > 2 && !s->pkt.nals[s->pkt.nb_nals - 1].nuh_layer_id
&& !s->pkt.nals[s->pkt.nb_nals - 1].temporal_id) {
H2645NAL *nal = &s->pkt.nals[s->pkt.nb_nals - 1];
if (s->rpu_buf) {
av_buffer_unref(&s->rpu_buf);
av_log(s->avctx, AV_LOG_WARNING, "Multiple Dolby Vision RPUs found in one AU. Skipping previous.\n");
}
s->rpu_buf = av_buffer_alloc(s->pkt.nals[s->pkt.nb_nals - 1].raw_size - 2);
s->rpu_buf = av_buffer_alloc(nal->raw_size - 2);
if (!s->rpu_buf)
return AVERROR(ENOMEM);
memcpy(s->rpu_buf->data, nal->raw_data + 2, nal->raw_size - 2);
memcpy(s->rpu_buf->data, s->pkt.nals[s->pkt.nb_nals - 1].raw_data + 2, s->pkt.nals[s->pkt.nb_nals - 1].raw_size - 2);
ret = ff_dovi_rpu_parse(&s->dovi_ctx, nal->data + 2, nal->size - 2);
if (ret < 0) {
av_buffer_unref(&s->rpu_buf);
av_log(s->avctx, AV_LOG_WARNING, "Error parsing DOVI NAL unit.\n");
/* ignore */
}
}
/* decode the NAL units */
@ -3450,8 +3461,8 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
AVPacket *avpkt)
{
int ret;
size_t new_extradata_size;
uint8_t *new_extradata;
uint8_t *sd;
size_t sd_size;
HEVCContext *s = avctx->priv_data;
if (!avpkt->size) {
@ -3463,14 +3474,17 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
return 0;
}
new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
&new_extradata_size);
if (new_extradata && new_extradata_size > 0) {
ret = hevc_decode_extradata(s, new_extradata, new_extradata_size, 0);
sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &sd_size);
if (sd && sd_size > 0) {
ret = hevc_decode_extradata(s, sd, sd_size, 0);
if (ret < 0)
return ret;
}
sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_DOVI_CONF, &sd_size);
if (sd && sd_size > 0)
ff_dovi_update_cfg(&s->dovi_ctx, (AVDOVIDecoderConfigurationRecord *) sd);
s->ref = NULL;
ret = decode_nal_units(s, avpkt->data, avpkt->size);
if (ret < 0)
@ -3563,6 +3577,7 @@ static av_cold int hevc_decode_free(AVCodecContext *avctx)
pic_arrays_free(s);
ff_dovi_ctx_unref(&s->dovi_ctx);
av_buffer_unref(&s->rpu_buf);
av_freep(&s->md5_ctx);
@ -3647,6 +3662,7 @@ static av_cold int hevc_init_context(AVCodecContext *avctx)
ff_bswapdsp_init(&s->bdsp);
s->dovi_ctx.logctx = avctx;
s->context_initialized = 1;
s->eos = 0;
@ -3755,6 +3771,10 @@ static int hevc_update_thread_context(AVCodecContext *dst,
if (ret < 0)
return ret;
ret = ff_dovi_ctx_replace(&s->dovi_ctx, &s0->dovi_ctx);
if (ret < 0)
return ret;
s->sei.frame_packing = s0->sei.frame_packing;
s->sei.display_orientation = s0->sei.display_orientation;
s->sei.mastering_display = s0->sei.mastering_display;
@ -3811,6 +3831,7 @@ static void hevc_decode_flush(AVCodecContext *avctx)
HEVCContext *s = avctx->priv_data;
ff_hevc_flush_dpb(s);
ff_hevc_reset_sei(&s->sei);
ff_dovi_ctx_flush(&s->dovi_ctx);
av_buffer_unref(&s->rpu_buf);
s->max_ra = INT_MAX;
s->eos = 1;

View File

@ -32,6 +32,7 @@
#include "avcodec.h"
#include "bswapdsp.h"
#include "cabac.h"
#include "dovi_rpu.h"
#include "get_bits.h"
#include "hevcpred.h"
#include "h2645_parse.h"
@ -574,6 +575,7 @@ typedef struct HEVCContext {
int nuh_layer_id;
AVBufferRef *rpu_buf; ///< 0 or 1 Dolby Vision RPUs.
DOVIContext dovi_ctx; ///< Dolby Vision decoding context
} HEVCContext;
/**

View File

@ -225,7 +225,7 @@ extern const uint8_t ff_log2_run[41];
*/
int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b);
unsigned int avpriv_toupper4(unsigned int x);
unsigned int ff_toupper4(unsigned int x);
void ff_color_frame(AVFrame *frame, const int color[4]);
@ -363,10 +363,4 @@ int ff_int_from_list_or_default(void *ctx, const char * val_name, int val,
void ff_dvdsub_parse_palette(uint32_t *palette, const char *p);
#if defined(_WIN32) && CONFIG_SHARED && !defined(BUILDING_avcodec)
# define av_export_avcodec __declspec(dllimport)
#else
# define av_export_avcodec
#endif
#endif /* AVCODEC_INTERNAL_H */

View File

@ -30,8 +30,7 @@
* MJPEG encoder and decoder.
*/
#include "jpegtables.h"
#include "jpegtabs.h"
#if 0
/* These are the sample quantization tables given in JPEG spec section K.1.
@ -59,66 +58,3 @@ static const unsigned char std_chrominance_quant_tbl[64] = {
99, 99, 99, 99, 99, 99, 99, 99
};
#endif
/* Set up the standard Huffman tables (cf. JPEG standard section K.3) */
/* IMPORTANT: these are only valid for 8-bit data precision! */
const uint8_t avpriv_mjpeg_bits_dc_luminance[17] =
{ /* 0-base */ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 };
const uint8_t avpriv_mjpeg_val_dc[12] =
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17] =
{ /* 0-base */ 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 };
const uint8_t avpriv_mjpeg_bits_ac_luminance[17] =
{ /* 0-base */ 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d };
const uint8_t avpriv_mjpeg_val_ac_luminance[] =
{ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa
};
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17] =
{ /* 0-base */ 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 };
const uint8_t avpriv_mjpeg_val_ac_chrominance[] =
{ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa
};

View File

@ -23,17 +23,15 @@
#include <stdint.h>
#include "internal.h"
extern const uint8_t ff_mjpeg_bits_dc_luminance[];
extern const uint8_t ff_mjpeg_val_dc[];
extern av_export_avcodec const uint8_t avpriv_mjpeg_bits_dc_luminance[];
extern av_export_avcodec const uint8_t avpriv_mjpeg_val_dc[];
extern const uint8_t ff_mjpeg_bits_dc_chrominance[];
extern av_export_avcodec const uint8_t avpriv_mjpeg_bits_dc_chrominance[];
extern const uint8_t ff_mjpeg_bits_ac_luminance[];
extern const uint8_t ff_mjpeg_val_ac_luminance[];
extern av_export_avcodec const uint8_t avpriv_mjpeg_bits_ac_luminance[];
extern av_export_avcodec const uint8_t avpriv_mjpeg_val_ac_luminance[];
extern av_export_avcodec const uint8_t avpriv_mjpeg_bits_ac_chrominance[];
extern av_export_avcodec const uint8_t avpriv_mjpeg_val_ac_chrominance[];
extern const uint8_t ff_mjpeg_bits_ac_chrominance[];
extern const uint8_t ff_mjpeg_val_ac_chrominance[];
#endif /* AVCODEC_JPEGTABLES_H */

92
libavcodec/jpegtabs.h Normal file
View File

@ -0,0 +1,92 @@
/*
* MJPEG tables
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2003 Alex Beregszaszi
* Copyright (c) 2003-2004 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_JPEGTABS_H
#define AVCODEC_JPEGTABS_H
#include <stdint.h>
#include "jpegtables.h"
/* Set up the standard Huffman tables (cf. JPEG standard section K.3) */
/* IMPORTANT: these are only valid for 8-bit data precision! */
const uint8_t ff_mjpeg_bits_dc_luminance[17] =
{ /* 0-base */ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 };
const uint8_t ff_mjpeg_val_dc[12] =
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
const uint8_t ff_mjpeg_bits_dc_chrominance[17] =
{ /* 0-base */ 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 };
const uint8_t ff_mjpeg_bits_ac_luminance[17] =
{ /* 0-base */ 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d };
const uint8_t ff_mjpeg_val_ac_luminance[] =
{ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa
};
const uint8_t ff_mjpeg_bits_ac_chrominance[17] =
{ /* 0-base */ 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 };
const uint8_t ff_mjpeg_val_ac_chrominance[] =
{ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa
};
#endif

View File

@ -293,12 +293,12 @@ static av_cold int ljpeg_encode_init(AVCodecContext *avctx)
ff_mjpeg_build_huffman_codes(s->huff_size_dc_luminance,
s->huff_code_dc_luminance,
avpriv_mjpeg_bits_dc_luminance,
avpriv_mjpeg_val_dc);
ff_mjpeg_bits_dc_luminance,
ff_mjpeg_val_dc);
ff_mjpeg_build_huffman_codes(s->huff_size_dc_chrominance,
s->huff_code_dc_chrominance,
avpriv_mjpeg_bits_dc_chrominance,
avpriv_mjpeg_val_dc);
ff_mjpeg_bits_dc_chrominance,
ff_mjpeg_val_dc);
return 0;
}

View File

@ -65,15 +65,15 @@ static uint8_t *append(uint8_t *buf, const uint8_t *src, int size)
static uint8_t *append_dht_segment(uint8_t *buf)
{
buf = append(buf, dht_segment_head, sizeof(dht_segment_head));
buf = append(buf, avpriv_mjpeg_bits_dc_luminance + 1, 16);
buf = append(buf, ff_mjpeg_bits_dc_luminance + 1, 16);
buf = append(buf, dht_segment_frag, sizeof(dht_segment_frag));
buf = append(buf, avpriv_mjpeg_val_dc, 12);
buf = append(buf, ff_mjpeg_val_dc, 12);
*(buf++) = 0x10;
buf = append(buf, avpriv_mjpeg_bits_ac_luminance + 1, 16);
buf = append(buf, avpriv_mjpeg_val_ac_luminance, 162);
buf = append(buf, ff_mjpeg_bits_ac_luminance + 1, 16);
buf = append(buf, ff_mjpeg_val_ac_luminance, 162);
*(buf++) = 0x11;
buf = append(buf, avpriv_mjpeg_bits_ac_chrominance + 1, 16);
buf = append(buf, avpriv_mjpeg_val_ac_chrominance, 162);
buf = append(buf, ff_mjpeg_bits_ac_chrominance + 1, 16);
buf = append(buf, ff_mjpeg_val_ac_chrominance, 162);
return buf;
}

View File

@ -61,18 +61,18 @@ static int init_default_huffman_tables(MJpegDecodeContext *s)
const uint8_t *values;
int length;
} ht[] = {
{ 0, 0, avpriv_mjpeg_bits_dc_luminance,
avpriv_mjpeg_val_dc, 12 },
{ 0, 1, avpriv_mjpeg_bits_dc_chrominance,
avpriv_mjpeg_val_dc, 12 },
{ 1, 0, avpriv_mjpeg_bits_ac_luminance,
avpriv_mjpeg_val_ac_luminance, 162 },
{ 1, 1, avpriv_mjpeg_bits_ac_chrominance,
avpriv_mjpeg_val_ac_chrominance, 162 },
{ 2, 0, avpriv_mjpeg_bits_ac_luminance,
avpriv_mjpeg_val_ac_luminance, 162 },
{ 2, 1, avpriv_mjpeg_bits_ac_chrominance,
avpriv_mjpeg_val_ac_chrominance, 162 },
{ 0, 0, ff_mjpeg_bits_dc_luminance,
ff_mjpeg_val_dc, 12 },
{ 0, 1, ff_mjpeg_bits_dc_chrominance,
ff_mjpeg_val_dc, 12 },
{ 1, 0, ff_mjpeg_bits_ac_luminance,
ff_mjpeg_val_ac_luminance, 162 },
{ 1, 1, ff_mjpeg_bits_ac_chrominance,
ff_mjpeg_val_ac_chrominance, 162 },
{ 2, 0, ff_mjpeg_bits_ac_luminance,
ff_mjpeg_val_ac_luminance, 162 },
{ 2, 1, ff_mjpeg_bits_ac_chrominance,
ff_mjpeg_val_ac_chrominance, 162 },
};
int i, ret;

View File

@ -288,20 +288,20 @@ av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
// they are needed at least right now for some processes like trellis.
ff_mjpeg_build_huffman_codes(m->huff_size_dc_luminance,
m->huff_code_dc_luminance,
avpriv_mjpeg_bits_dc_luminance,
avpriv_mjpeg_val_dc);
ff_mjpeg_bits_dc_luminance,
ff_mjpeg_val_dc);
ff_mjpeg_build_huffman_codes(m->huff_size_dc_chrominance,
m->huff_code_dc_chrominance,
avpriv_mjpeg_bits_dc_chrominance,
avpriv_mjpeg_val_dc);
ff_mjpeg_bits_dc_chrominance,
ff_mjpeg_val_dc);
ff_mjpeg_build_huffman_codes(m->huff_size_ac_luminance,
m->huff_code_ac_luminance,
avpriv_mjpeg_bits_ac_luminance,
avpriv_mjpeg_val_ac_luminance);
ff_mjpeg_bits_ac_luminance,
ff_mjpeg_val_ac_luminance);
ff_mjpeg_build_huffman_codes(m->huff_size_ac_chrominance,
m->huff_code_ac_chrominance,
avpriv_mjpeg_bits_ac_chrominance,
avpriv_mjpeg_val_ac_chrominance);
ff_mjpeg_bits_ac_chrominance,
ff_mjpeg_val_ac_chrominance);
init_uni_ac_vlc(m->huff_size_ac_luminance, m->uni_ac_vlc_len);
init_uni_ac_vlc(m->huff_size_ac_chrominance, m->uni_chroma_ac_vlc_len);

View File

@ -121,15 +121,15 @@ static void jpeg_table_header(AVCodecContext *avctx, PutBitContext *p,
size += put_huffman_table(p, 1, 1, s->mjpeg_ctx->bits_ac_chrominance,
s->mjpeg_ctx->val_ac_chrominance);
} else {
size += put_huffman_table(p, 0, 0, avpriv_mjpeg_bits_dc_luminance,
avpriv_mjpeg_val_dc);
size += put_huffman_table(p, 0, 1, avpriv_mjpeg_bits_dc_chrominance,
avpriv_mjpeg_val_dc);
size += put_huffman_table(p, 0, 0, ff_mjpeg_bits_dc_luminance,
ff_mjpeg_val_dc);
size += put_huffman_table(p, 0, 1, ff_mjpeg_bits_dc_chrominance,
ff_mjpeg_val_dc);
size += put_huffman_table(p, 1, 0, avpriv_mjpeg_bits_ac_luminance,
avpriv_mjpeg_val_ac_luminance);
size += put_huffman_table(p, 1, 1, avpriv_mjpeg_bits_ac_chrominance,
avpriv_mjpeg_val_ac_chrominance);
size += put_huffman_table(p, 1, 0, ff_mjpeg_bits_ac_luminance,
ff_mjpeg_val_ac_luminance);
size += put_huffman_table(p, 1, 1, ff_mjpeg_bits_ac_chrominance,
ff_mjpeg_val_ac_chrominance);
}
AV_WB16(ptr, size);
}

View File

@ -22,6 +22,7 @@
#include "libavutil/intreadwrite.h"
#include "bsf.h"
#include "bsf_internal.h"
#include "defs.h"
#include "mpegaudiodecheader.h"
#include "mpegaudiodata.h"
@ -67,10 +68,10 @@ static int mp3_header_decompress(AVBSFContext *ctx, AVPacket *out)
goto fail;
}
sample_rate= avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25); //in case sample rate is a little off
sample_rate = ff_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25); //in case sample rate is a little off
for(bitrate_index=2; bitrate_index<30; bitrate_index++){
frame_size = avpriv_mpa_bitrate_tab[lsf][2][bitrate_index>>1];
frame_size = ff_mpa_bitrate_tab[lsf][2][bitrate_index>>1];
frame_size = (frame_size * 144000) / (sample_rate << lsf) + (bitrate_index&1);
if(frame_size == buf_size + 4)
break;

View File

@ -2832,7 +2832,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, void *data,
}
#endif
s2->codec_tag = avpriv_toupper4(avctx->codec_tag);
s2->codec_tag = ff_toupper4(avctx->codec_tag);
if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32("VCR2")
|| s2->codec_tag == AV_RL32("BW10")
))

View File

@ -57,13 +57,6 @@ static int parse_config_ALS(GetBitContext *gb, MPEG4AudioConfig *c, void *logctx
return 0;
}
/* XXX: make sure to update the copies in the different encoders if you change
* this table */
const int avpriv_mpeg4audio_sample_rates[16] = {
96000, 88200, 64000, 48000, 44100, 32000,
24000, 22050, 16000, 12000, 11025, 8000, 7350
};
const uint8_t ff_mpeg4audio_channels[14] = {
0,
1, // mono (1/0)
@ -93,7 +86,7 @@ static inline int get_sample_rate(GetBitContext *gb, int *index)
{
*index = get_bits(gb, 4);
return *index == 0x0f ? get_bits(gb, 24) :
avpriv_mpeg4audio_sample_rates[*index];
ff_mpeg4audio_sample_rates[*index];
}
int ff_mpeg4audio_get_config_gb(MPEG4AudioConfig *c, GetBitContext *gb,

View File

@ -27,7 +27,6 @@
#include "libavutil/attributes.h"
#include "get_bits.h"
#include "internal.h"
#include "put_bits.h"
typedef struct MPEG4AudioConfig {
@ -45,7 +44,7 @@ typedef struct MPEG4AudioConfig {
int frame_length_short;
} MPEG4AudioConfig;
extern av_export_avcodec const int avpriv_mpeg4audio_sample_rates[16];
extern const int ff_mpeg4audio_sample_rates[16];
extern const uint8_t ff_mpeg4audio_channels[14];
/**

View File

@ -0,0 +1,23 @@
/*
* MPEG-4 Audio sample rates
* Copyright (c) 2008 Baptiste Coudurier <baptiste.coudurier@free.fr>
* Copyright (c) 2009 Alex Converse <alex.converse@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "mpeg4audio_sample_rates.h"

View File

@ -0,0 +1,30 @@
/*
* MPEG-4 Audio sample rates
* Copyright (c) 2008 Baptiste Coudurier <baptiste.coudurier@free.fr>
* Copyright (c) 2009 Alex Converse <alex.converse@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_MPEG4AUDIO_SAMPLE_RATES_H
#define AVCODEC_MPEG4AUDIO_SAMPLE_RATES_H
const int ff_mpeg4audio_sample_rates[16] = {
96000, 88200, 64000, 48000, 44100, 32000,
24000, 22050, 16000, 12000, 11025, 8000, 7350
};
#endif

View File

@ -26,19 +26,6 @@
#include "mpegaudiodata.h"
const uint16_t avpriv_mpa_bitrate_tab[2][3][15] = {
{ {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 },
{0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 },
{0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 } },
{ {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256},
{0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160},
{0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160}
}
};
const uint16_t avpriv_mpa_freq_tab[3] = { 44100, 48000, 32000 };
/*******************************************************/
/* layer 2 tables */

View File

@ -31,14 +31,13 @@
#include "config.h"
#include "internal.h"
#include "vlc.h"
#define MODE_EXT_MS_STEREO 2
#define MODE_EXT_I_STEREO 1
extern av_export_avcodec const uint16_t avpriv_mpa_bitrate_tab[2][3][15];
extern av_export_avcodec const uint16_t avpriv_mpa_freq_tab[3];
extern const uint16_t ff_mpa_bitrate_tab[2][3][15];
extern const uint16_t ff_mpa_freq_tab[3];
extern const int ff_mpa_sblimit_table[5];
extern const int ff_mpa_quant_steps[17];
extern const int ff_mpa_quant_bits[17];

View File

@ -52,9 +52,9 @@ int avpriv_mpegaudio_decode_header(MPADecodeHeader *s, uint32_t header)
s->layer = 4 - ((header >> 17) & 3);
/* extract frequency */
sample_rate_index = (header >> 10) & 3;
if (sample_rate_index >= FF_ARRAY_ELEMS(avpriv_mpa_freq_tab))
if (sample_rate_index >= FF_ARRAY_ELEMS(ff_mpa_freq_tab))
sample_rate_index = 0;
sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25);
sample_rate = ff_mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25);
sample_rate_index += 3 * (s->lsf + mpeg25);
s->sample_rate_index = sample_rate_index;
s->error_protection = ((header >> 16) & 1) ^ 1;
@ -75,7 +75,7 @@ int avpriv_mpegaudio_decode_header(MPADecodeHeader *s, uint32_t header)
s->nb_channels = 2;
if (bitrate_index != 0) {
frame_size = avpriv_mpa_bitrate_tab[s->lsf][s->layer - 1][bitrate_index];
frame_size = ff_mpa_bitrate_tab[s->lsf][s->layer - 1][bitrate_index];
s->bit_rate = frame_size * 1000;
switch(s->layer) {
case 1:

View File

@ -95,9 +95,9 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx)
/* encoding freq */
s->lsf = 0;
for(i=0;i<3;i++) {
if (avpriv_mpa_freq_tab[i] == freq)
if (ff_mpa_freq_tab[i] == freq)
break;
if ((avpriv_mpa_freq_tab[i] / 2) == freq) {
if ((ff_mpa_freq_tab[i] / 2) == freq) {
s->lsf = 1;
break;
}
@ -110,12 +110,12 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx)
/* encoding bitrate & frequency */
for(i=1;i<15;i++) {
if (avpriv_mpa_bitrate_tab[s->lsf][1][i] == bitrate)
if (ff_mpa_bitrate_tab[s->lsf][1][i] == bitrate)
break;
}
if (i == 15 && !avctx->bit_rate) {
i = 14;
bitrate = avpriv_mpa_bitrate_tab[s->lsf][1][i];
bitrate = ff_mpa_bitrate_tab[s->lsf][1][i];
avctx->bit_rate = bitrate * 1000;
}
if (i == 15){

View File

@ -0,0 +1,22 @@
/*
* MPEG Audio common tables
* copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "mpegaudiotabs.h"

View File

@ -0,0 +1,39 @@
/*
* MPEG Audio common tables
* copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_MPEGAUDIOTABS_H
#define AVCODEC_MPEGAUDIOTABS_H
#include <stdint.h>
const uint16_t ff_mpa_bitrate_tab[2][3][15] = {
{ { 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 },
{ 0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 },
{ 0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 } },
{ { 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256 },
{ 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 },
{ 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 }
}
};
const uint16_t ff_mpa_freq_tab[3] = { 44100, 48000, 32000 };
#endif

View File

@ -701,7 +701,7 @@ void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
s->workaround_bugs = avctx->workaround_bugs;
/* convert fourcc to upper case */
s->codec_tag = avpriv_toupper4(avctx->codec_tag);
s->codec_tag = ff_toupper4(avctx->codec_tag);
}
/**

View File

@ -124,10 +124,10 @@ static av_cold void mss4_init_vlcs(void)
for (unsigned i = 0, offset = 0; i < 2; i++) {
mss4_init_vlc(&dc_vlc[i], &offset, mss4_dc_vlc_lens[i], NULL);
mss4_init_vlc(&ac_vlc[i], &offset,
i ? avpriv_mjpeg_bits_ac_chrominance + 1
: avpriv_mjpeg_bits_ac_luminance + 1,
i ? avpriv_mjpeg_val_ac_chrominance
: avpriv_mjpeg_val_ac_luminance);
i ? ff_mjpeg_bits_ac_chrominance + 1
: ff_mjpeg_bits_ac_luminance + 1,
i ? ff_mjpeg_val_ac_chrominance
: ff_mjpeg_val_ac_luminance);
mss4_init_vlc(&vec_entry_vlc[i], &offset, mss4_vec_entry_vlc_lens[i],
mss4_vec_entry_vlc_syms[i]);
}

View File

@ -23,16 +23,19 @@
#include "packet.h"
typedef struct PacketList {
struct PacketList *next;
typedef struct PacketListEntry {
struct PacketListEntry *next;
AVPacket pkt;
} PacketListEntry;
typedef struct PacketList {
PacketListEntry *head, *tail;
} PacketList;
/**
* Append an AVPacket to the list.
*
* @param head List head element
* @param tail List tail element
* @param list A PacketList
* @param pkt The packet being appended. The data described in it will
* be made reference counted if it isn't already.
* @param copy A callback to copy the contents of the packet to the list.
@ -41,8 +44,7 @@ typedef struct PacketList {
* @return 0 on success, negative AVERROR value on failure. On failure,
the packet and the list are unchanged.
*/
int avpriv_packet_list_put(PacketList **head, PacketList **tail,
AVPacket *pkt,
int avpriv_packet_list_put(PacketList *list, AVPacket *pkt,
int (*copy)(AVPacket *dst, const AVPacket *src),
int flags);
@ -52,22 +54,17 @@ int avpriv_packet_list_put(PacketList **head, PacketList **tail,
* @note The pkt will be overwritten completely on success. The caller
* owns the packet and must unref it by itself.
*
* @param head List head element
* @param tail List tail element
* @param head A pointer to a PacketList struct
* @param pkt Pointer to an AVPacket struct
* @return 0 on success, and a packet is returned. AVERROR(EAGAIN) if
* the list was empty.
*/
int avpriv_packet_list_get(PacketList **head, PacketList **tail,
AVPacket *pkt);
int avpriv_packet_list_get(PacketList *list, AVPacket *pkt);
/**
* Wipe the list and unref all the packets in it.
*
* @param head List head element
* @param tail List tail element
*/
void avpriv_packet_list_free(PacketList **head, PacketList **tail);
void avpriv_packet_list_free(PacketList *list);
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type);

View File

@ -28,7 +28,7 @@
#include "raw.h"
#include "libavutil/common.h"
const PixelFormatTag ff_raw_pix_fmt_tags[] = {
static const PixelFormatTag raw_pix_fmt_tags[] = {
{ AV_PIX_FMT_YUV420P, MKTAG('I', '4', '2', '0') }, /* Planar formats */
{ AV_PIX_FMT_YUV420P, MKTAG('I', 'Y', 'U', 'V') },
{ AV_PIX_FMT_YUV420P, MKTAG('y', 'v', '1', '2') },
@ -299,12 +299,12 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = {
const struct PixelFormatTag *avpriv_get_raw_pix_fmt_tags(void)
{
return ff_raw_pix_fmt_tags;
return raw_pix_fmt_tags;
}
unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat fmt)
{
const PixelFormatTag *tags = ff_raw_pix_fmt_tags;
const PixelFormatTag *tags = raw_pix_fmt_tags;
while (tags->pix_fmt >= 0) {
if (tags->pix_fmt == fmt)
return tags->fourcc;
@ -313,7 +313,7 @@ unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat fmt)
return 0;
}
const PixelFormatTag avpriv_pix_fmt_bps_avi[] = {
static const PixelFormatTag pix_fmt_bps_avi[] = {
{ AV_PIX_FMT_PAL8, 1 },
{ AV_PIX_FMT_PAL8, 2 },
{ AV_PIX_FMT_PAL8, 4 },
@ -326,7 +326,7 @@ const PixelFormatTag avpriv_pix_fmt_bps_avi[] = {
{ AV_PIX_FMT_NONE, 0 },
};
const PixelFormatTag avpriv_pix_fmt_bps_mov[] = {
static const PixelFormatTag pix_fmt_bps_mov[] = {
{ AV_PIX_FMT_PAL8, 1 },
{ AV_PIX_FMT_PAL8, 2 },
{ AV_PIX_FMT_PAL8, 4 },
@ -337,3 +337,33 @@ const PixelFormatTag avpriv_pix_fmt_bps_mov[] = {
{ AV_PIX_FMT_PAL8, 33 },
{ AV_PIX_FMT_NONE, 0 },
};
static enum AVPixelFormat find_pix_fmt(const PixelFormatTag *tags,
unsigned int fourcc)
{
while (tags->pix_fmt != AV_PIX_FMT_NONE) {
if (tags->fourcc == fourcc)
return tags->pix_fmt;
tags++;
}
return AV_PIX_FMT_NONE;
}
enum AVPixelFormat avpriv_pix_fmt_find(enum PixelFormatTagLists list,
unsigned fourcc)
{
const PixelFormatTag *tags;
switch (list) {
case PIX_FMT_LIST_RAW:
tags = raw_pix_fmt_tags;
break;
case PIX_FMT_LIST_AVI:
tags = pix_fmt_bps_avi;
break;
case PIX_FMT_LIST_MOV:
tags = pix_fmt_bps_mov;
break;
}
return find_pix_fmt(tags, fourcc);
}

View File

@ -28,20 +28,21 @@
#define AVCODEC_RAW_H
#include "libavutil/pixfmt.h"
#include "internal.h"
typedef struct PixelFormatTag {
enum AVPixelFormat pix_fmt;
unsigned int fourcc;
} PixelFormatTag;
extern const PixelFormatTag ff_raw_pix_fmt_tags[]; // exposed through avpriv_get_raw_pix_fmt_tags()
const struct PixelFormatTag *avpriv_get_raw_pix_fmt_tags(void);
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc);
enum PixelFormatTagLists {
PIX_FMT_LIST_RAW,
PIX_FMT_LIST_AVI,
PIX_FMT_LIST_MOV,
};
extern av_export_avcodec const PixelFormatTag avpriv_pix_fmt_bps_avi[];
extern av_export_avcodec const PixelFormatTag avpriv_pix_fmt_bps_mov[];
enum AVPixelFormat avpriv_pix_fmt_find(enum PixelFormatTagLists list,
unsigned fourcc);
#endif /* AVCODEC_RAW_H */

View File

@ -76,15 +76,15 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
if ( avctx->codec_tag == MKTAG('r','a','w',' ')
|| avctx->codec_tag == MKTAG('N','O','1','6'))
avctx->pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_mov,
avctx->pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_MOV,
avctx->bits_per_coded_sample);
else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
avctx->pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_avi,
avctx->pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_AVI,
avctx->bits_per_coded_sample);
else if (avctx->codec_tag && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0))
avctx->pix_fmt = avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, avctx->codec_tag);
avctx->pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_RAW, avctx->codec_tag);
else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
avctx->pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_avi,
avctx->pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_AVI,
avctx->bits_per_coded_sample);
desc = av_pix_fmt_desc_get(avctx->pix_fmt);

23
libavcodec/to_upper4.c Normal file
View File

@ -0,0 +1,23 @@
/*
* Converting FOURCCs to uppercase
* Copyright (c) 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "to_upper4.h"

37
libavcodec/to_upper4.h Normal file
View File

@ -0,0 +1,37 @@
/*
* Converting FOURCCs to uppercase
* Copyright (c) 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_TO_UPPER4_H
#define AVCODEC_TO_UPPER4_H
#include "libavutil/avstring.h"
#include "internal.h"
unsigned int ff_toupper4(unsigned int x)
{
return av_toupper(x & 0xFF) |
(av_toupper((x >> 8) & 0xFF) << 8) |
(av_toupper((x >> 16) & 0xFF) << 16) |
((unsigned)av_toupper((x >> 24) & 0xFF) << 24);
}
#endif

View File

@ -436,17 +436,6 @@ void ff_color_frame(AVFrame *frame, const int c[4])
}
}
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags,
unsigned int fourcc)
{
while (tags->pix_fmt >= 0) {
if (tags->fourcc == fourcc)
return tags->pix_fmt;
tags++;
}
return AV_PIX_FMT_NONE;
}
int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec){
return !!(codec->caps_internal & FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM);
}
@ -874,14 +863,6 @@ const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index)
return &codec->hw_configs[index]->public;
}
unsigned int avpriv_toupper4(unsigned int x)
{
return av_toupper(x & 0xFF) +
(av_toupper((x >> 8) & 0xFF) << 8) +
(av_toupper((x >> 16) & 0xFF) << 16) +
((unsigned)av_toupper((x >> 24) & 0xFF) << 24);
}
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
{
int ret;

View File

@ -327,20 +327,20 @@ static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
switch (t) {
case 0:
lengths = avpriv_mjpeg_bits_dc_luminance + 1;
values = avpriv_mjpeg_val_dc;
lengths = ff_mjpeg_bits_dc_luminance + 1;
values = ff_mjpeg_val_dc;
break;
case 1:
lengths = avpriv_mjpeg_bits_ac_luminance + 1;
values = avpriv_mjpeg_val_ac_luminance;
lengths = ff_mjpeg_bits_ac_luminance + 1;
values = ff_mjpeg_val_ac_luminance;
break;
case 2:
lengths = avpriv_mjpeg_bits_dc_chrominance + 1;
values = avpriv_mjpeg_val_dc;
lengths = ff_mjpeg_bits_dc_chrominance + 1;
values = ff_mjpeg_val_dc;
break;
case 3:
lengths = avpriv_mjpeg_bits_ac_chrominance + 1;
values = avpriv_mjpeg_val_ac_chrominance;
lengths = ff_mjpeg_bits_ac_chrominance + 1;
values = ff_mjpeg_val_ac_chrominance;
break;
}

View File

@ -28,7 +28,7 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 59
#define LIBAVCODEC_VERSION_MINOR 16
#define LIBAVCODEC_VERSION_MINOR 18
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

View File

@ -9,7 +9,6 @@ OBJS = alldevices.o \
utils.o \
OBJS-$(HAVE_LIBC_MSVCRT) += file_open.o
OBJS-$(CONFIG_SHARED) += reverse.o
# input/output devices
OBJS-$(CONFIG_ALSA_INDEV) += alsa_dec.o alsa.o timefilter.o
@ -54,6 +53,9 @@ OBJS-$(CONFIG_XV_OUTDEV) += xv.o
OBJS-$(CONFIG_LIBCDIO_INDEV) += libcdio.o
OBJS-$(CONFIG_LIBDC1394_INDEV) += libdc1394.o
# Objects duplicated from other libraries for shared builds
SHLIBOBJS-$(CONFIG_DECKLINK_INDEV) += reverse.o
# Windows resource file
SLIBOBJS-$(HAVE_GNU_WINDRES) += avdeviceres.o

View File

@ -18,8 +18,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/thread.h"
#include "libavformat/internal.h"
#include "avdevice.h"

View File

@ -29,6 +29,9 @@
#define IDeckLinkProfileAttributes IDeckLinkAttributes
#endif
extern "C" {
#include "libavcodec/packet_internal.h"
}
#include "libavutil/thread.h"
#include "decklink_common_c.h"
#if CONFIG_LIBKLVANC
@ -75,7 +78,7 @@ class decklink_output_callback;
class decklink_input_callback;
typedef struct AVPacketQueue {
PacketList *first_pkt, *last_pkt;
PacketList pkt_list;
int nb_packets;
unsigned long long size;
int abort_request;

View File

@ -483,16 +483,16 @@ static void avpacket_queue_init(AVFormatContext *avctx, AVPacketQueue *q)
static void avpacket_queue_flush(AVPacketQueue *q)
{
PacketList *pkt, *pkt1;
PacketListEntry *pkt, *pkt1;
pthread_mutex_lock(&q->mutex);
for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
for (pkt = q->pkt_list.head; pkt != NULL; pkt = pkt1) {
pkt1 = pkt->next;
av_packet_unref(&pkt->pkt);
av_freep(&pkt);
}
q->last_pkt = NULL;
q->first_pkt = NULL;
q->pkt_list.head = NULL;
q->pkt_list.tail = NULL;
q->nb_packets = 0;
q->size = 0;
pthread_mutex_unlock(&q->mutex);
@ -516,7 +516,7 @@ static unsigned long long avpacket_queue_size(AVPacketQueue *q)
static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
{
PacketList *pkt1;
PacketListEntry *pkt1;
// Drop Packet if queue size is > maximum queue size
if (avpacket_queue_size(q) > (uint64_t)q->max_q_size) {
@ -530,7 +530,7 @@ static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
return -1;
}
pkt1 = (PacketList *)av_malloc(sizeof(PacketList));
pkt1 = (PacketListEntry *)av_malloc(sizeof(*pkt1));
if (!pkt1) {
av_packet_unref(pkt);
return -1;
@ -540,13 +540,13 @@ static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
pthread_mutex_lock(&q->mutex);
if (!q->last_pkt) {
q->first_pkt = pkt1;
if (!q->pkt_list.tail) {
q->pkt_list.head = pkt1;
} else {
q->last_pkt->next = pkt1;
q->pkt_list.tail->next = pkt1;
}
q->last_pkt = pkt1;
q->pkt_list.tail = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size + sizeof(*pkt1);
@ -558,17 +558,16 @@ static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
static int avpacket_queue_get(AVPacketQueue *q, AVPacket *pkt, int block)
{
PacketList *pkt1;
int ret;
pthread_mutex_lock(&q->mutex);
for (;; ) {
pkt1 = q->first_pkt;
PacketListEntry *pkt1 = q->pkt_list.head;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt) {
q->last_pkt = NULL;
q->pkt_list.head = pkt1->next;
if (!q->pkt_list.head) {
q->pkt_list.tail = NULL;
}
q->nb_packets--;
q->size -= pkt1->pkt.size + sizeof(*pkt1);

View File

@ -76,7 +76,7 @@ static enum AVPixelFormat dshow_pixfmt(DWORD biCompression, WORD biBitCount)
return AV_PIX_FMT_0RGB32;
}
}
return avpriv_find_pix_fmt(avpriv_get_raw_pix_fmt_tags(), biCompression); // all others
return avpriv_pix_fmt_find(PIX_FMT_LIST_RAW, biCompression); // all others
}
static enum AVColorRange dshow_color_range(DXVA2_ExtendedFormat *fmt_info)
@ -238,7 +238,7 @@ static int
dshow_read_close(AVFormatContext *s)
{
struct dshow_ctx *ctx = s->priv_data;
PacketList *pktl;
PacketListEntry *pktl;
if (ctx->control) {
IMediaControl_Stop(ctx->control);
@ -298,7 +298,7 @@ dshow_read_close(AVFormatContext *s)
pktl = ctx->pktl;
while (pktl) {
PacketList *next = pktl->next;
PacketListEntry *next = pktl->next;
av_packet_unref(&pktl->pkt);
av_free(pktl);
pktl = next;
@ -342,7 +342,7 @@ callback(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time, e
{
AVFormatContext *s = priv_data;
struct dshow_ctx *ctx = s->priv_data;
PacketList **ppktl, *pktl_next;
PacketListEntry **ppktl, *pktl_next;
// dump_videohdr(s, vdhdr);
@ -351,7 +351,7 @@ callback(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time, e
if(shall_we_drop(s, index, devtype))
goto fail;
pktl_next = av_mallocz(sizeof(PacketList));
pktl_next = av_mallocz(sizeof(*pktl_next));
if(!pktl_next)
goto fail;
@ -758,6 +758,31 @@ static struct dshow_format_info *dshow_get_format_info(AM_MEDIA_TYPE *type)
return fmt_info;
}
static void dshow_get_default_format(IPin *pin, IAMStreamConfig *config, enum dshowDeviceType devtype, AM_MEDIA_TYPE **type)
{
HRESULT hr;
if ((hr = IAMStreamConfig_GetFormat(config, type)) != S_OK) {
if (hr == E_NOTIMPL || !IsEqualGUID(&(*type)->majortype, devtype == VideoDevice ? &MEDIATYPE_Video : &MEDIATYPE_Audio)) {
// default not available or of wrong type,
// fall back to iterating exposed formats
// until one of the right type is found
IEnumMediaTypes* types = NULL;
if (IPin_EnumMediaTypes(pin, &types) != S_OK)
return;
IEnumMediaTypes_Reset(types);
while (IEnumMediaTypes_Next(types, 1, type, NULL) == S_OK) {
if (IsEqualGUID(&(*type)->majortype, devtype == VideoDevice ? &MEDIATYPE_Video : &MEDIATYPE_Audio)) {
break;
}
CoTaskMemFree(*type);
*type = NULL;
}
IEnumMediaTypes_Release(types);
}
}
}
/**
* Cycle through available formats available from the specified pin,
* try to set parameters specified through AVOptions, or the pin's
@ -813,32 +838,11 @@ dshow_cycle_formats(AVFormatContext *avctx, enum dshowDeviceType devtype,
use_default = !dshow_should_set_format(avctx, devtype);
if (use_default && pformat_set)
{
HRESULT hr;
// get default
if ((hr = IAMStreamConfig_GetFormat(config, &type)) != S_OK) {
if (hr == E_NOTIMPL || !IsEqualGUID(&type->majortype, devtype==VideoDevice ? &MEDIATYPE_Video : &MEDIATYPE_Audio)) {
// default not available or of wrong type,
// fall back to iterating exposed formats
// until one of the right type is found
IEnumMediaTypes *types = NULL;
if (IPin_EnumMediaTypes(pin, &types) != S_OK)
goto end;
IEnumMediaTypes_Reset(types);
while (IEnumMediaTypes_Next(types, 1, &type, NULL) == S_OK) {
if (IsEqualGUID(&type->majortype, devtype==VideoDevice ? &MEDIATYPE_Video : &MEDIATYPE_Audio)) {
break;
}
CoTaskMemFree(type);
type = NULL;
}
IEnumMediaTypes_Release(types);
}
if (!type)
// this pin does not expose any formats of the expected type
goto end;
}
dshow_get_default_format(pin, config, devtype, &type);
if (!type)
// this pin does not expose any formats of the expected type
goto end;
if (type) {
// interrogate default format, so we know what to search for below
@ -953,7 +957,7 @@ dshow_cycle_formats(AVFormatContext *avctx, enum dshowDeviceType devtype,
av_log(avctx, AV_LOG_INFO, "(%s)", chroma ? chroma : "unknown");
av_log(avctx, AV_LOG_INFO, "\n");
continue;
goto next;
}
if (requested_video_codec_id != AV_CODEC_ID_RAWVIDEO) {
if (requested_video_codec_id != fmt_info->codec_id)
@ -1038,16 +1042,33 @@ next:
if (type && type->pbFormat)
CoTaskMemFree(type->pbFormat);
CoTaskMemFree(type);
type = NULL;
}
// previously found a matching VIDEOINFOHEADER format and stored
// it for safe keeping. Searching further for a matching
// VIDEOINFOHEADER2 format yielded nothing. So set the pin's
// format based on the VIDEOINFOHEADER format.
// NB: this never applies to an audio format because
// previous_match_type always NULL in that case
if (!format_set && previous_match_type) {
if (IAMStreamConfig_SetFormat(config, previous_match_type) == S_OK)
format_set = 1;
// set the pin's format, if wanted
if (pformat_set && !format_set) {
if (previous_match_type) {
// previously found a matching VIDEOINFOHEADER format and stored
// it for safe keeping. Searching further for a matching
// VIDEOINFOHEADER2 format yielded nothing. So set the pin's
// format based on the VIDEOINFOHEADER format.
// NB: this never applies to an audio format because
// previous_match_type always NULL in that case
if (IAMStreamConfig_SetFormat(config, previous_match_type) == S_OK)
format_set = 1;
}
else if (use_default) {
// default format returned by device apparently was not contained
// in the capabilities of any of the formats returned by the device
// (sic?). Fall back to directly setting the default format
dshow_get_default_format(pin, config, devtype, &type);
if (IAMStreamConfig_SetFormat(config, type) == S_OK)
format_set = 1;
if (type && type->pbFormat)
CoTaskMemFree(type->pbFormat);
CoTaskMemFree(type);
type = NULL;
}
}
end:
@ -1847,7 +1868,7 @@ static int dshow_check_event_queue(IMediaEvent *media_event)
static int dshow_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct dshow_ctx *ctx = s->priv_data;
PacketList *pktl = NULL;
PacketListEntry *pktl = NULL;
while (!ctx->eof && !pktl) {
WaitForSingleObject(ctx->mutex, INFINITE);

View File

@ -70,7 +70,7 @@ enum dshowSourceFilterType {
};
#define DECLARE_QUERYINTERFACE(prefix, class, ...) \
long \
long WINAPI \
ff_dshow_##prefix##_QueryInterface(class *this, const GUID *riid, void **ppvObject) \
{ \
struct GUIDoffset ifaces[] = __VA_ARGS__; \
@ -93,14 +93,14 @@ ff_dshow_##prefix##_QueryInterface(class *this, const GUID *riid, void **ppvObje
return E_NOINTERFACE; \
}
#define DECLARE_ADDREF(prefix, class) \
unsigned long \
unsigned long WINAPI \
ff_dshow_##prefix##_AddRef(class *this) \
{ \
dshowdebug("ff_dshow_"AV_STRINGIFY(prefix)"_AddRef(%p)\t%ld\n", this, this->ref+1); \
return InterlockedIncrement(&this->ref); \
}
#define DECLARE_RELEASE(prefix, class) \
unsigned long \
unsigned long WINAPI \
ff_dshow_##prefix##_Release(class *this) \
{ \
long ref = InterlockedDecrement(&this->ref); \
@ -167,34 +167,34 @@ struct DShowPin {
IMemInputPinVtbl *imemvtbl;
};
long ff_dshow_pin_QueryInterface (DShowPin *, const GUID *, void **);
unsigned long ff_dshow_pin_AddRef (DShowPin *);
unsigned long ff_dshow_pin_Release (DShowPin *);
long ff_dshow_pin_Connect (DShowPin *, IPin *, const AM_MEDIA_TYPE *);
long ff_dshow_pin_ReceiveConnection (DShowPin *, IPin *, const AM_MEDIA_TYPE *);
long ff_dshow_pin_Disconnect (DShowPin *);
long ff_dshow_pin_ConnectedTo (DShowPin *, IPin **);
long ff_dshow_pin_ConnectionMediaType (DShowPin *, AM_MEDIA_TYPE *);
long ff_dshow_pin_QueryPinInfo (DShowPin *, PIN_INFO *);
long ff_dshow_pin_QueryDirection (DShowPin *, PIN_DIRECTION *);
long ff_dshow_pin_QueryId (DShowPin *, wchar_t **);
long ff_dshow_pin_QueryAccept (DShowPin *, const AM_MEDIA_TYPE *);
long ff_dshow_pin_EnumMediaTypes (DShowPin *, IEnumMediaTypes **);
long ff_dshow_pin_QueryInternalConnections(DShowPin *, IPin **, unsigned long *);
long ff_dshow_pin_EndOfStream (DShowPin *);
long ff_dshow_pin_BeginFlush (DShowPin *);
long ff_dshow_pin_EndFlush (DShowPin *);
long ff_dshow_pin_NewSegment (DShowPin *, REFERENCE_TIME, REFERENCE_TIME, double);
long WINAPI ff_dshow_pin_QueryInterface (DShowPin *, const GUID *, void **);
unsigned long WINAPI ff_dshow_pin_AddRef (DShowPin *);
unsigned long WINAPI ff_dshow_pin_Release (DShowPin *);
long WINAPI ff_dshow_pin_Connect (DShowPin *, IPin *, const AM_MEDIA_TYPE *);
long WINAPI ff_dshow_pin_ReceiveConnection (DShowPin *, IPin *, const AM_MEDIA_TYPE *);
long WINAPI ff_dshow_pin_Disconnect (DShowPin *);
long WINAPI ff_dshow_pin_ConnectedTo (DShowPin *, IPin **);
long WINAPI ff_dshow_pin_ConnectionMediaType (DShowPin *, AM_MEDIA_TYPE *);
long WINAPI ff_dshow_pin_QueryPinInfo (DShowPin *, PIN_INFO *);
long WINAPI ff_dshow_pin_QueryDirection (DShowPin *, PIN_DIRECTION *);
long WINAPI ff_dshow_pin_QueryId (DShowPin *, wchar_t **);
long WINAPI ff_dshow_pin_QueryAccept (DShowPin *, const AM_MEDIA_TYPE *);
long WINAPI ff_dshow_pin_EnumMediaTypes (DShowPin *, IEnumMediaTypes **);
long WINAPI ff_dshow_pin_QueryInternalConnections(DShowPin *, IPin **, unsigned long *);
long WINAPI ff_dshow_pin_EndOfStream (DShowPin *);
long WINAPI ff_dshow_pin_BeginFlush (DShowPin *);
long WINAPI ff_dshow_pin_EndFlush (DShowPin *);
long WINAPI ff_dshow_pin_NewSegment (DShowPin *, REFERENCE_TIME, REFERENCE_TIME, double);
long ff_dshow_meminputpin_QueryInterface (DShowMemInputPin *, const GUID *, void **);
unsigned long ff_dshow_meminputpin_AddRef (DShowMemInputPin *);
unsigned long ff_dshow_meminputpin_Release (DShowMemInputPin *);
long ff_dshow_meminputpin_GetAllocator (DShowMemInputPin *, IMemAllocator **);
long ff_dshow_meminputpin_NotifyAllocator (DShowMemInputPin *, IMemAllocator *, BOOL);
long ff_dshow_meminputpin_GetAllocatorRequirements(DShowMemInputPin *, ALLOCATOR_PROPERTIES *);
long ff_dshow_meminputpin_Receive (DShowMemInputPin *, IMediaSample *);
long ff_dshow_meminputpin_ReceiveMultiple (DShowMemInputPin *, IMediaSample **, long, long *);
long ff_dshow_meminputpin_ReceiveCanBlock (DShowMemInputPin *);
long WINAPI ff_dshow_meminputpin_QueryInterface (DShowMemInputPin *, const GUID *, void **);
unsigned long WINAPI ff_dshow_meminputpin_AddRef (DShowMemInputPin *);
unsigned long WINAPI ff_dshow_meminputpin_Release (DShowMemInputPin *);
long WINAPI ff_dshow_meminputpin_GetAllocator (DShowMemInputPin *, IMemAllocator **);
long WINAPI ff_dshow_meminputpin_NotifyAllocator (DShowMemInputPin *, IMemAllocator *, BOOL);
long WINAPI ff_dshow_meminputpin_GetAllocatorRequirements(DShowMemInputPin *, ALLOCATOR_PROPERTIES *);
long WINAPI ff_dshow_meminputpin_Receive (DShowMemInputPin *, IMediaSample *);
long WINAPI ff_dshow_meminputpin_ReceiveMultiple (DShowMemInputPin *, IMediaSample **, long, long *);
long WINAPI ff_dshow_meminputpin_ReceiveCanBlock (DShowMemInputPin *);
void ff_dshow_pin_Destroy(DShowPin *);
DShowPin *ff_dshow_pin_Create (DShowFilter *filter);
@ -212,13 +212,13 @@ struct DShowEnumPins {
DShowFilter *filter;
};
long ff_dshow_enumpins_QueryInterface(DShowEnumPins *, const GUID *, void **);
unsigned long ff_dshow_enumpins_AddRef (DShowEnumPins *);
unsigned long ff_dshow_enumpins_Release (DShowEnumPins *);
long ff_dshow_enumpins_Next (DShowEnumPins *, unsigned long, IPin **, unsigned long *);
long ff_dshow_enumpins_Skip (DShowEnumPins *, unsigned long);
long ff_dshow_enumpins_Reset (DShowEnumPins *);
long ff_dshow_enumpins_Clone (DShowEnumPins *, DShowEnumPins **);
long WINAPI ff_dshow_enumpins_QueryInterface(DShowEnumPins *, const GUID *, void **);
unsigned long WINAPI ff_dshow_enumpins_AddRef (DShowEnumPins *);
unsigned long WINAPI ff_dshow_enumpins_Release (DShowEnumPins *);
long WINAPI ff_dshow_enumpins_Next (DShowEnumPins *, unsigned long, IPin **, unsigned long *);
long WINAPI ff_dshow_enumpins_Skip (DShowEnumPins *, unsigned long);
long WINAPI ff_dshow_enumpins_Reset (DShowEnumPins *);
long WINAPI ff_dshow_enumpins_Clone (DShowEnumPins *, DShowEnumPins **);
void ff_dshow_enumpins_Destroy(DShowEnumPins *);
DShowEnumPins *ff_dshow_enumpins_Create (DShowPin *pin, DShowFilter *filter);
@ -233,13 +233,13 @@ struct DShowEnumMediaTypes {
AM_MEDIA_TYPE type;
};
long ff_dshow_enummediatypes_QueryInterface(DShowEnumMediaTypes *, const GUID *, void **);
unsigned long ff_dshow_enummediatypes_AddRef (DShowEnumMediaTypes *);
unsigned long ff_dshow_enummediatypes_Release (DShowEnumMediaTypes *);
long ff_dshow_enummediatypes_Next (DShowEnumMediaTypes *, unsigned long, AM_MEDIA_TYPE **, unsigned long *);
long ff_dshow_enummediatypes_Skip (DShowEnumMediaTypes *, unsigned long);
long ff_dshow_enummediatypes_Reset (DShowEnumMediaTypes *);
long ff_dshow_enummediatypes_Clone (DShowEnumMediaTypes *, DShowEnumMediaTypes **);
long WINAPI ff_dshow_enummediatypes_QueryInterface(DShowEnumMediaTypes *, const GUID *, void **);
unsigned long WINAPI ff_dshow_enummediatypes_AddRef (DShowEnumMediaTypes *);
unsigned long WINAPI ff_dshow_enummediatypes_Release (DShowEnumMediaTypes *);
long WINAPI ff_dshow_enummediatypes_Next (DShowEnumMediaTypes *, unsigned long, AM_MEDIA_TYPE **, unsigned long *);
long WINAPI ff_dshow_enummediatypes_Skip (DShowEnumMediaTypes *, unsigned long);
long WINAPI ff_dshow_enummediatypes_Reset (DShowEnumMediaTypes *);
long WINAPI ff_dshow_enummediatypes_Clone (DShowEnumMediaTypes *, DShowEnumMediaTypes **);
void ff_dshow_enummediatypes_Destroy(DShowEnumMediaTypes *);
DShowEnumMediaTypes *ff_dshow_enummediatypes_Create(const AM_MEDIA_TYPE *type);
@ -262,21 +262,21 @@ struct DShowFilter {
void (*callback)(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time, enum dshowDeviceType type);
};
long ff_dshow_filter_QueryInterface (DShowFilter *, const GUID *, void **);
unsigned long ff_dshow_filter_AddRef (DShowFilter *);
unsigned long ff_dshow_filter_Release (DShowFilter *);
long ff_dshow_filter_GetClassID (DShowFilter *, CLSID *);
long ff_dshow_filter_Stop (DShowFilter *);
long ff_dshow_filter_Pause (DShowFilter *);
long ff_dshow_filter_Run (DShowFilter *, REFERENCE_TIME);
long ff_dshow_filter_GetState (DShowFilter *, DWORD, FILTER_STATE *);
long ff_dshow_filter_SetSyncSource (DShowFilter *, IReferenceClock *);
long ff_dshow_filter_GetSyncSource (DShowFilter *, IReferenceClock **);
long ff_dshow_filter_EnumPins (DShowFilter *, IEnumPins **);
long ff_dshow_filter_FindPin (DShowFilter *, const wchar_t *, IPin **);
long ff_dshow_filter_QueryFilterInfo(DShowFilter *, FILTER_INFO *);
long ff_dshow_filter_JoinFilterGraph(DShowFilter *, IFilterGraph *, const wchar_t *);
long ff_dshow_filter_QueryVendorInfo(DShowFilter *, wchar_t **);
long WINAPI ff_dshow_filter_QueryInterface (DShowFilter *, const GUID *, void **);
unsigned long WINAPI ff_dshow_filter_AddRef (DShowFilter *);
unsigned long WINAPI ff_dshow_filter_Release (DShowFilter *);
long WINAPI ff_dshow_filter_GetClassID (DShowFilter *, CLSID *);
long WINAPI ff_dshow_filter_Stop (DShowFilter *);
long WINAPI ff_dshow_filter_Pause (DShowFilter *);
long WINAPI ff_dshow_filter_Run (DShowFilter *, REFERENCE_TIME);
long WINAPI ff_dshow_filter_GetState (DShowFilter *, DWORD, FILTER_STATE *);
long WINAPI ff_dshow_filter_SetSyncSource (DShowFilter *, IReferenceClock *);
long WINAPI ff_dshow_filter_GetSyncSource (DShowFilter *, IReferenceClock **);
long WINAPI ff_dshow_filter_EnumPins (DShowFilter *, IEnumPins **);
long WINAPI ff_dshow_filter_FindPin (DShowFilter *, const wchar_t *, IPin **);
long WINAPI ff_dshow_filter_QueryFilterInfo(DShowFilter *, FILTER_INFO *);
long WINAPI ff_dshow_filter_JoinFilterGraph(DShowFilter *, IFilterGraph *, const wchar_t *);
long WINAPI ff_dshow_filter_QueryVendorInfo(DShowFilter *, wchar_t **);
void ff_dshow_filter_Destroy(DShowFilter *);
DShowFilter *ff_dshow_filter_Create (void *, void *, enum dshowDeviceType);
@ -322,7 +322,7 @@ struct dshow_ctx {
HANDLE mutex;
HANDLE event[2]; /* event[0] is set by DirectShow
* event[1] is set by callback() */
PacketList *pktl;
PacketListEntry *pktl;
int eof;

View File

@ -204,5 +204,9 @@ end:
IAMTVTuner_Release(tv_tuner_filter);
if (tv_tuner_base_filter)
IBaseFilter_Release(tv_tuner_base_filter);
if (tv_audio_filter)
IAMAudioInputMixer_Release(tv_audio_filter);
if (tv_audio_base_filter)
IBaseFilter_Release(tv_audio_base_filter);
return hr;
}

View File

@ -26,7 +26,7 @@ DECLARE_QUERYINTERFACE(enummediatypes, DShowEnumMediaTypes,
DECLARE_ADDREF(enummediatypes, DShowEnumMediaTypes)
DECLARE_RELEASE(enummediatypes, DShowEnumMediaTypes)
long ff_dshow_enummediatypes_Next(DShowEnumMediaTypes *this, unsigned long n,
long WINAPI ff_dshow_enummediatypes_Next(DShowEnumMediaTypes *this, unsigned long n,
AM_MEDIA_TYPE **types, unsigned long *fetched)
{
int count = 0;
@ -50,20 +50,20 @@ long ff_dshow_enummediatypes_Next(DShowEnumMediaTypes *this, unsigned long n,
return S_FALSE;
return S_OK;
}
long ff_dshow_enummediatypes_Skip(DShowEnumMediaTypes *this, unsigned long n)
long WINAPI ff_dshow_enummediatypes_Skip(DShowEnumMediaTypes *this, unsigned long n)
{
dshowdebug("ff_dshow_enummediatypes_Skip(%p)\n", this);
if (n) /* Any skip will always fall outside of the only valid type. */
return S_FALSE;
return S_OK;
}
long ff_dshow_enummediatypes_Reset(DShowEnumMediaTypes *this)
long WINAPI ff_dshow_enummediatypes_Reset(DShowEnumMediaTypes *this)
{
dshowdebug("ff_dshow_enummediatypes_Reset(%p)\n", this);
this->pos = 0;
return S_OK;
}
long ff_dshow_enummediatypes_Clone(DShowEnumMediaTypes *this, DShowEnumMediaTypes **enums)
long WINAPI ff_dshow_enummediatypes_Clone(DShowEnumMediaTypes *this, DShowEnumMediaTypes **enums)
{
DShowEnumMediaTypes *new;
dshowdebug("ff_dshow_enummediatypes_Clone(%p)\n", this);

View File

@ -26,7 +26,7 @@ DECLARE_QUERYINTERFACE(enumpins, DShowEnumPins,
DECLARE_ADDREF(enumpins, DShowEnumPins)
DECLARE_RELEASE(enumpins, DShowEnumPins)
long ff_dshow_enumpins_Next(DShowEnumPins *this, unsigned long n, IPin **pins,
long WINAPI ff_dshow_enumpins_Next(DShowEnumPins *this, unsigned long n, IPin **pins,
unsigned long *fetched)
{
int count = 0;
@ -45,20 +45,20 @@ long ff_dshow_enumpins_Next(DShowEnumPins *this, unsigned long n, IPin **pins,
return S_FALSE;
return S_OK;
}
long ff_dshow_enumpins_Skip(DShowEnumPins *this, unsigned long n)
long WINAPI ff_dshow_enumpins_Skip(DShowEnumPins *this, unsigned long n)
{
dshowdebug("ff_dshow_enumpins_Skip(%p)\n", this);
if (n) /* Any skip will always fall outside of the only valid pin. */
return S_FALSE;
return S_OK;
}
long ff_dshow_enumpins_Reset(DShowEnumPins *this)
long WINAPI ff_dshow_enumpins_Reset(DShowEnumPins *this)
{
dshowdebug("ff_dshow_enumpins_Reset(%p)\n", this);
this->pos = 0;
return S_OK;
}
long ff_dshow_enumpins_Clone(DShowEnumPins *this, DShowEnumPins **pins)
long WINAPI ff_dshow_enumpins_Clone(DShowEnumPins *this, DShowEnumPins **pins)
{
DShowEnumPins *new;
dshowdebug("ff_dshow_enumpins_Clone(%p)\n", this);

View File

@ -26,32 +26,32 @@ DECLARE_QUERYINTERFACE(filter, DShowFilter,
DECLARE_ADDREF(filter, DShowFilter)
DECLARE_RELEASE(filter, DShowFilter)
long ff_dshow_filter_GetClassID(DShowFilter *this, CLSID *id)
long WINAPI ff_dshow_filter_GetClassID(DShowFilter *this, CLSID *id)
{
dshowdebug("ff_dshow_filter_GetClassID(%p)\n", this);
/* I'm not creating a ClassID just for this. */
return E_FAIL;
}
long ff_dshow_filter_Stop(DShowFilter *this)
long WINAPI ff_dshow_filter_Stop(DShowFilter *this)
{
dshowdebug("ff_dshow_filter_Stop(%p)\n", this);
this->state = State_Stopped;
return S_OK;
}
long ff_dshow_filter_Pause(DShowFilter *this)
long WINAPI ff_dshow_filter_Pause(DShowFilter *this)
{
dshowdebug("ff_dshow_filter_Pause(%p)\n", this);
this->state = State_Paused;
return S_OK;
}
long ff_dshow_filter_Run(DShowFilter *this, REFERENCE_TIME start)
long WINAPI ff_dshow_filter_Run(DShowFilter *this, REFERENCE_TIME start)
{
dshowdebug("ff_dshow_filter_Run(%p) %"PRId64"\n", this, start);
this->state = State_Running;
this->start_time = start;
return S_OK;
}
long ff_dshow_filter_GetState(DShowFilter *this, DWORD ms, FILTER_STATE *state)
long WINAPI ff_dshow_filter_GetState(DShowFilter *this, DWORD ms, FILTER_STATE *state)
{
dshowdebug("ff_dshow_filter_GetState(%p)\n", this);
if (!state)
@ -59,7 +59,7 @@ long ff_dshow_filter_GetState(DShowFilter *this, DWORD ms, FILTER_STATE *state)
*state = this->state;
return S_OK;
}
long ff_dshow_filter_SetSyncSource(DShowFilter *this, IReferenceClock *clock)
long WINAPI ff_dshow_filter_SetSyncSource(DShowFilter *this, IReferenceClock *clock)
{
dshowdebug("ff_dshow_filter_SetSyncSource(%p)\n", this);
@ -73,7 +73,7 @@ long ff_dshow_filter_SetSyncSource(DShowFilter *this, IReferenceClock *clock)
return S_OK;
}
long ff_dshow_filter_GetSyncSource(DShowFilter *this, IReferenceClock **clock)
long WINAPI ff_dshow_filter_GetSyncSource(DShowFilter *this, IReferenceClock **clock)
{
dshowdebug("ff_dshow_filter_GetSyncSource(%p)\n", this);
@ -85,7 +85,7 @@ long ff_dshow_filter_GetSyncSource(DShowFilter *this, IReferenceClock **clock)
return S_OK;
}
long ff_dshow_filter_EnumPins(DShowFilter *this, IEnumPins **enumpin)
long WINAPI ff_dshow_filter_EnumPins(DShowFilter *this, IEnumPins **enumpin)
{
DShowEnumPins *new;
dshowdebug("ff_dshow_filter_EnumPins(%p)\n", this);
@ -99,7 +99,7 @@ long ff_dshow_filter_EnumPins(DShowFilter *this, IEnumPins **enumpin)
*enumpin = (IEnumPins *) new;
return S_OK;
}
long ff_dshow_filter_FindPin(DShowFilter *this, const wchar_t *id, IPin **pin)
long WINAPI ff_dshow_filter_FindPin(DShowFilter *this, const wchar_t *id, IPin **pin)
{
DShowPin *found = NULL;
dshowdebug("ff_dshow_filter_FindPin(%p)\n", this);
@ -116,7 +116,7 @@ long ff_dshow_filter_FindPin(DShowFilter *this, const wchar_t *id, IPin **pin)
return S_OK;
}
long ff_dshow_filter_QueryFilterInfo(DShowFilter *this, FILTER_INFO *info)
long WINAPI ff_dshow_filter_QueryFilterInfo(DShowFilter *this, FILTER_INFO *info)
{
dshowdebug("ff_dshow_filter_QueryFilterInfo(%p)\n", this);
@ -128,7 +128,7 @@ long ff_dshow_filter_QueryFilterInfo(DShowFilter *this, FILTER_INFO *info)
return S_OK;
}
long ff_dshow_filter_JoinFilterGraph(DShowFilter *this, IFilterGraph *graph,
long WINAPI ff_dshow_filter_JoinFilterGraph(DShowFilter *this, IFilterGraph *graph,
const wchar_t *name)
{
dshowdebug("ff_dshow_filter_JoinFilterGraph(%p)\n", this);
@ -139,7 +139,7 @@ long ff_dshow_filter_JoinFilterGraph(DShowFilter *this, IFilterGraph *graph,
return S_OK;
}
long ff_dshow_filter_QueryVendorInfo(DShowFilter *this, wchar_t **info)
long WINAPI ff_dshow_filter_QueryVendorInfo(DShowFilter *this, wchar_t **info)
{
dshowdebug("ff_dshow_filter_QueryVendorInfo(%p)\n", this);

View File

@ -29,13 +29,13 @@ DECLARE_QUERYINTERFACE(pin, DShowPin,
DECLARE_ADDREF(pin, DShowPin)
DECLARE_RELEASE(pin, DShowPin)
long ff_dshow_pin_Connect(DShowPin *this, IPin *pin, const AM_MEDIA_TYPE *type)
long WINAPI ff_dshow_pin_Connect(DShowPin *this, IPin *pin, const AM_MEDIA_TYPE *type)
{
dshowdebug("ff_dshow_pin_Connect(%p, %p, %p)\n", this, pin, type);
/* Input pins receive connections. */
return S_FALSE;
}
long ff_dshow_pin_ReceiveConnection(DShowPin *this, IPin *pin,
long WINAPI ff_dshow_pin_ReceiveConnection(DShowPin *this, IPin *pin,
const AM_MEDIA_TYPE *type)
{
enum dshowDeviceType devtype = this->filter->type;
@ -62,7 +62,7 @@ long ff_dshow_pin_ReceiveConnection(DShowPin *this, IPin *pin,
return S_OK;
}
long ff_dshow_pin_Disconnect(DShowPin *this)
long WINAPI ff_dshow_pin_Disconnect(DShowPin *this)
{
dshowdebug("ff_dshow_pin_Disconnect(%p)\n", this);
@ -75,7 +75,7 @@ long ff_dshow_pin_Disconnect(DShowPin *this)
return S_OK;
}
long ff_dshow_pin_ConnectedTo(DShowPin *this, IPin **pin)
long WINAPI ff_dshow_pin_ConnectedTo(DShowPin *this, IPin **pin)
{
dshowdebug("ff_dshow_pin_ConnectedTo(%p)\n", this);
@ -88,7 +88,7 @@ long ff_dshow_pin_ConnectedTo(DShowPin *this, IPin **pin)
return S_OK;
}
long ff_dshow_pin_ConnectionMediaType(DShowPin *this, AM_MEDIA_TYPE *type)
long WINAPI ff_dshow_pin_ConnectionMediaType(DShowPin *this, AM_MEDIA_TYPE *type)
{
dshowdebug("ff_dshow_pin_ConnectionMediaType(%p)\n", this);
@ -99,7 +99,7 @@ long ff_dshow_pin_ConnectionMediaType(DShowPin *this, AM_MEDIA_TYPE *type)
return ff_copy_dshow_media_type(type, &this->type);
}
long ff_dshow_pin_QueryPinInfo(DShowPin *this, PIN_INFO *info)
long WINAPI ff_dshow_pin_QueryPinInfo(DShowPin *this, PIN_INFO *info)
{
dshowdebug("ff_dshow_pin_QueryPinInfo(%p)\n", this);
@ -115,7 +115,7 @@ long ff_dshow_pin_QueryPinInfo(DShowPin *this, PIN_INFO *info)
return S_OK;
}
long ff_dshow_pin_QueryDirection(DShowPin *this, PIN_DIRECTION *dir)
long WINAPI ff_dshow_pin_QueryDirection(DShowPin *this, PIN_DIRECTION *dir)
{
dshowdebug("ff_dshow_pin_QueryDirection(%p)\n", this);
if (!dir)
@ -123,7 +123,7 @@ long ff_dshow_pin_QueryDirection(DShowPin *this, PIN_DIRECTION *dir)
*dir = PINDIR_INPUT;
return S_OK;
}
long ff_dshow_pin_QueryId(DShowPin *this, wchar_t **id)
long WINAPI ff_dshow_pin_QueryId(DShowPin *this, wchar_t **id)
{
dshowdebug("ff_dshow_pin_QueryId(%p)\n", this);
@ -134,12 +134,12 @@ long ff_dshow_pin_QueryId(DShowPin *this, wchar_t **id)
return S_OK;
}
long ff_dshow_pin_QueryAccept(DShowPin *this, const AM_MEDIA_TYPE *type)
long WINAPI ff_dshow_pin_QueryAccept(DShowPin *this, const AM_MEDIA_TYPE *type)
{
dshowdebug("ff_dshow_pin_QueryAccept(%p)\n", this);
return S_FALSE;
}
long ff_dshow_pin_EnumMediaTypes(DShowPin *this, IEnumMediaTypes **enumtypes)
long WINAPI ff_dshow_pin_EnumMediaTypes(DShowPin *this, IEnumMediaTypes **enumtypes)
{
const AM_MEDIA_TYPE *type = NULL;
DShowEnumMediaTypes *new;
@ -154,31 +154,31 @@ long ff_dshow_pin_EnumMediaTypes(DShowPin *this, IEnumMediaTypes **enumtypes)
*enumtypes = (IEnumMediaTypes *) new;
return S_OK;
}
long ff_dshow_pin_QueryInternalConnections(DShowPin *this, IPin **pin,
long WINAPI ff_dshow_pin_QueryInternalConnections(DShowPin *this, IPin **pin,
unsigned long *npin)
{
dshowdebug("ff_dshow_pin_QueryInternalConnections(%p)\n", this);
return E_NOTIMPL;
}
long ff_dshow_pin_EndOfStream(DShowPin *this)
long WINAPI ff_dshow_pin_EndOfStream(DShowPin *this)
{
dshowdebug("ff_dshow_pin_EndOfStream(%p)\n", this);
/* I don't care. */
return S_OK;
}
long ff_dshow_pin_BeginFlush(DShowPin *this)
long WINAPI ff_dshow_pin_BeginFlush(DShowPin *this)
{
dshowdebug("ff_dshow_pin_BeginFlush(%p)\n", this);
/* I don't care. */
return S_OK;
}
long ff_dshow_pin_EndFlush(DShowPin *this)
long WINAPI ff_dshow_pin_EndFlush(DShowPin *this)
{
dshowdebug("ff_dshow_pin_EndFlush(%p)\n", this);
/* I don't care. */
return S_OK;
}
long ff_dshow_pin_NewSegment(DShowPin *this, REFERENCE_TIME start, REFERENCE_TIME stop,
long WINAPI ff_dshow_pin_NewSegment(DShowPin *this, REFERENCE_TIME start, REFERENCE_TIME stop,
double rate)
{
dshowdebug("ff_dshow_pin_NewSegment(%p)\n", this);
@ -250,43 +250,43 @@ DECLARE_DESTROY(pin, DShowPin, ff_dshow_pin_Free)
/*****************************************************************************
* DShowMemInputPin
****************************************************************************/
long ff_dshow_meminputpin_QueryInterface(DShowMemInputPin *this, const GUID *riid,
long WINAPI ff_dshow_meminputpin_QueryInterface(DShowMemInputPin *this, const GUID *riid,
void **ppvObject)
{
DShowPin *pin = (DShowPin *) ((uint8_t *) this - imemoffset);
dshowdebug("ff_dshow_meminputpin_QueryInterface(%p)\n", this);
return ff_dshow_pin_QueryInterface(pin, riid, ppvObject);
}
unsigned long ff_dshow_meminputpin_AddRef(DShowMemInputPin *this)
unsigned long WINAPI ff_dshow_meminputpin_AddRef(DShowMemInputPin *this)
{
DShowPin *pin = (DShowPin *) ((uint8_t *) this - imemoffset);
dshowdebug("ff_dshow_meminputpin_AddRef(%p)\n", this);
return ff_dshow_pin_AddRef(pin);
}
unsigned long ff_dshow_meminputpin_Release(DShowMemInputPin *this)
unsigned long WINAPI ff_dshow_meminputpin_Release(DShowMemInputPin *this)
{
DShowPin *pin = (DShowPin *) ((uint8_t *) this - imemoffset);
dshowdebug("ff_dshow_meminputpin_Release(%p)\n", this);
return ff_dshow_pin_Release(pin);
}
long ff_dshow_meminputpin_GetAllocator(DShowMemInputPin *this, IMemAllocator **alloc)
long WINAPI ff_dshow_meminputpin_GetAllocator(DShowMemInputPin *this, IMemAllocator **alloc)
{
dshowdebug("ff_dshow_meminputpin_GetAllocator(%p)\n", this);
return VFW_E_NO_ALLOCATOR;
}
long ff_dshow_meminputpin_NotifyAllocator(DShowMemInputPin *this, IMemAllocator *alloc,
long WINAPI ff_dshow_meminputpin_NotifyAllocator(DShowMemInputPin *this, IMemAllocator *alloc,
BOOL rdwr)
{
dshowdebug("ff_dshow_meminputpin_NotifyAllocator(%p)\n", this);
return S_OK;
}
long ff_dshow_meminputpin_GetAllocatorRequirements(DShowMemInputPin *this,
long WINAPI ff_dshow_meminputpin_GetAllocatorRequirements(DShowMemInputPin *this,
ALLOCATOR_PROPERTIES *props)
{
dshowdebug("ff_dshow_meminputpin_GetAllocatorRequirements(%p)\n", this);
return E_NOTIMPL;
}
long ff_dshow_meminputpin_Receive(DShowMemInputPin *this, IMediaSample *sample)
long WINAPI ff_dshow_meminputpin_Receive(DShowMemInputPin *this, IMediaSample *sample)
{
DShowPin *pin = (DShowPin *) ((uint8_t *) this - imemoffset);
enum dshowDeviceType devtype = pin->filter->type;
@ -354,7 +354,7 @@ long ff_dshow_meminputpin_Receive(DShowMemInputPin *this, IMediaSample *sample)
return S_OK;
}
long ff_dshow_meminputpin_ReceiveMultiple(DShowMemInputPin *this,
long WINAPI ff_dshow_meminputpin_ReceiveMultiple(DShowMemInputPin *this,
IMediaSample **samples, long n, long *nproc)
{
int i;
@ -366,7 +366,7 @@ long ff_dshow_meminputpin_ReceiveMultiple(DShowMemInputPin *this,
*nproc = n;
return S_OK;
}
long ff_dshow_meminputpin_ReceiveCanBlock(DShowMemInputPin *this)
long WINAPI ff_dshow_meminputpin_ReceiveCanBlock(DShowMemInputPin *this)
{
dshowdebug("ff_dshow_meminputpin_ReceiveCanBlock(%p)\n", this);
/* I swear I will not block. */

View File

@ -28,7 +28,7 @@
#include "libavutil/version.h"
#define LIBAVDEVICE_VERSION_MAJOR 59
#define LIBAVDEVICE_VERSION_MINOR 2
#define LIBAVDEVICE_VERSION_MINOR 4
#define LIBAVDEVICE_VERSION_MICRO 100
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \

View File

@ -45,7 +45,7 @@ struct vfw_ctx {
HWND hwnd;
HANDLE mutex;
HANDLE event;
PacketList *pktl;
PacketListEntry *pktl;
unsigned int curbufsize;
unsigned int frame_num;
char *video_size; /**< A string describing video size, set by a private option. */
@ -179,7 +179,7 @@ static LRESULT CALLBACK videostream_cb(HWND hwnd, LPVIDEOHDR vdhdr)
{
AVFormatContext *s;
struct vfw_ctx *ctx;
PacketList **ppktl, *pktl_next;
PacketListEntry **ppktl, *pktl_next;
s = (AVFormatContext *) GetWindowLongPtr(hwnd, GWLP_USERDATA);
ctx = s->priv_data;
@ -191,7 +191,7 @@ static LRESULT CALLBACK videostream_cb(HWND hwnd, LPVIDEOHDR vdhdr)
WaitForSingleObject(ctx->mutex, INFINITE);
pktl_next = av_mallocz(sizeof(PacketList));
pktl_next = av_mallocz(sizeof(*pktl_next));
if(!pktl_next)
goto fail;
@ -220,7 +220,7 @@ fail:
static int vfw_read_close(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
PacketList *pktl;
PacketListEntry *pktl;
if(ctx->hwnd) {
SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0, 0);
@ -234,7 +234,7 @@ static int vfw_read_close(AVFormatContext *s)
pktl = ctx->pktl;
while (pktl) {
PacketList *next = pktl->next;
PacketListEntry *next = pktl->next;
av_packet_unref(&pktl->pkt);
av_free(pktl);
pktl = next;
@ -440,7 +440,7 @@ fail:
static int vfw_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct vfw_ctx *ctx = s->priv_data;
PacketList *pktl = NULL;
PacketListEntry *pktl = NULL;
while(!pktl) {
WaitForSingleObject(ctx->mutex, INFINITE);

View File

@ -577,13 +577,14 @@ OBJS-$(CONFIG_SPECTRUMSYNTH_FILTER) += vaf_spectrumsynth.o
OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o
OBJS-$(CONFIG_MOVIE_FILTER) += src_movie.o
# Objects duplicated from other libraries for shared builds
SHLIBOBJS += log2_tab.o
# Windows resource file
SLIBOBJS-$(HAVE_GNU_WINDRES) += avfilterres.o
SKIPHEADERS-$(CONFIG_LIBVIDSTAB) += vidstabutils.h
OBJS-$(CONFIG_SHARED) += log2_tab.o
SKIPHEADERS-$(CONFIG_QSVVPP) += qsvvpp.h
SKIPHEADERS-$(CONFIG_OPENCL) += opencl.h
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_vpp.h

View File

@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 8
#define LIBAVFILTER_VERSION_MINOR 22
#define LIBAVFILTER_VERSION_MINOR 24
#define LIBAVFILTER_VERSION_MICRO 100

View File

@ -27,6 +27,7 @@
#include "libavutil/bswap.h"
#include "libavutil/adler32.h"
#include "libavutil/display.h"
#include "libavutil/dovi_meta.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/film_grain_params.h"
@ -429,6 +430,110 @@ static void dump_sei_film_grain_params_metadata(AVFilterContext *ctx, const AVFr
}
}
static void dump_dovi_metadata(AVFilterContext *ctx, const AVFrameSideData *sd)
{
const AVDOVIMetadata *dovi = (AVDOVIMetadata *) sd->data;
const AVDOVIRpuDataHeader *hdr = av_dovi_get_header(dovi);
const AVDOVIDataMapping *mapping = av_dovi_get_mapping(dovi);
const AVDOVIColorMetadata *color = av_dovi_get_color(dovi);
av_log(ctx, AV_LOG_INFO, "Dolby Vision Metadata:\n");
av_log(ctx, AV_LOG_INFO, " rpu_type=%"PRIu8"; ", hdr->rpu_type);
av_log(ctx, AV_LOG_INFO, "rpu_format=%"PRIu16"; ", hdr->rpu_format);
av_log(ctx, AV_LOG_INFO, "vdr_rpu_profile=%"PRIu8"; ", hdr->vdr_rpu_profile);
av_log(ctx, AV_LOG_INFO, "vdr_rpu_level=%"PRIu8"; ", hdr->vdr_rpu_level);
av_log(ctx, AV_LOG_INFO, "chroma_resampling_explicit_filter_flag=%"PRIu8"; ", hdr->chroma_resampling_explicit_filter_flag);
av_log(ctx, AV_LOG_INFO, "coef_data_type=%"PRIu8"; ", hdr->coef_data_type);
av_log(ctx, AV_LOG_INFO, "coef_log2_denom=%"PRIu8"; ", hdr->coef_log2_denom);
av_log(ctx, AV_LOG_INFO, "vdr_rpu_normalized_idc=%"PRIu8"; ", hdr->vdr_rpu_normalized_idc);
av_log(ctx, AV_LOG_INFO, "bl_video_full_range_flag=%"PRIu8"; ", hdr->bl_video_full_range_flag);
av_log(ctx, AV_LOG_INFO, "bl_bit_depth=%"PRIu8"; ", hdr->bl_bit_depth);
av_log(ctx, AV_LOG_INFO, "el_bit_depth=%"PRIu8"; ", hdr->el_bit_depth);
av_log(ctx, AV_LOG_INFO, "vdr_bit_depth=%"PRIu8"; ", hdr->vdr_bit_depth);
av_log(ctx, AV_LOG_INFO, "spatial_resampling_filter_flag=%"PRIu8"; ", hdr->spatial_resampling_filter_flag);
av_log(ctx, AV_LOG_INFO, "el_spatial_resampling_filter_flag=%"PRIu8"; ", hdr->el_spatial_resampling_filter_flag);
av_log(ctx, AV_LOG_INFO, "disable_residual_flag=%"PRIu8"\n", hdr->disable_residual_flag);
av_log(ctx, AV_LOG_INFO, " data mapping: ");
av_log(ctx, AV_LOG_INFO, "vdr_rpu_id=%"PRIu8"; ", mapping->vdr_rpu_id);
av_log(ctx, AV_LOG_INFO, "mapping_color_space=%"PRIu8"; ", mapping->mapping_color_space);
av_log(ctx, AV_LOG_INFO, "mapping_chroma_format_idc=%"PRIu8"; ", mapping->mapping_chroma_format_idc);
av_log(ctx, AV_LOG_INFO, "nlq_method_idc=%d; ", (int) mapping->nlq_method_idc);
av_log(ctx, AV_LOG_INFO, "num_x_partitions=%"PRIu32"; ", mapping->num_x_partitions);
av_log(ctx, AV_LOG_INFO, "num_y_partitions=%"PRIu32"\n", mapping->num_y_partitions);
for (int c = 0; c < 3; c++) {
const AVDOVIReshapingCurve *curve = &mapping->curves[c];
const AVDOVINLQParams *nlq = &mapping->nlq[c];
av_log(ctx, AV_LOG_INFO, " channel %d: ", c);
av_log(ctx, AV_LOG_INFO, "pivots={ ");
for (int i = 0; i < curve->num_pivots; i++)
av_log(ctx, AV_LOG_INFO, "%"PRIu16" ", curve->pivots[i]);
av_log(ctx, AV_LOG_INFO, "}; mapping_idc={ ");
for (int i = 0; i < curve->num_pivots - 1; i++)
av_log(ctx, AV_LOG_INFO, "%d ", (int) curve->mapping_idc[i]);
av_log(ctx, AV_LOG_INFO, "}; poly_order={ ");
for (int i = 0; i < curve->num_pivots - 1; i++)
av_log(ctx, AV_LOG_INFO, "%"PRIu8" ", curve->poly_order[i]);
av_log(ctx, AV_LOG_INFO, "}; poly_coef={ ");
for (int i = 0; i < curve->num_pivots - 1; i++) {
av_log(ctx, AV_LOG_INFO, "{%"PRIi64", %"PRIi64", %"PRIi64"} ",
curve->poly_coef[i][0],
curve->poly_coef[i][1],
curve->poly_coef[i][2]);
}
av_log(ctx, AV_LOG_INFO, "}; mmr_order={ ");
for (int i = 0; i < curve->num_pivots - 1; i++)
av_log(ctx, AV_LOG_INFO, "%"PRIu8" ", curve->mmr_order[i]);
av_log(ctx, AV_LOG_INFO, "}; mmr_constant={ ");
for (int i = 0; i < curve->num_pivots - 1; i++)
av_log(ctx, AV_LOG_INFO, "%"PRIi64" ", curve->mmr_constant[i]);
av_log(ctx, AV_LOG_INFO, "}; mmr_coef={ ");
for (int i = 0; i < curve->num_pivots - 1; i++) {
av_log(ctx, AV_LOG_INFO, "{");
for (int j = 0; j < curve->mmr_order[i]; j++) {
for (int k = 0; k < 7; k++)
av_log(ctx, AV_LOG_INFO, "%"PRIi64" ", curve->mmr_coef[i][j][k]);
}
av_log(ctx, AV_LOG_INFO, "} ");
}
av_log(ctx, AV_LOG_INFO, "}; nlq_offset=%"PRIu64"; ", nlq->nlq_offset);
av_log(ctx, AV_LOG_INFO, "vdr_in_max=%"PRIu64"; ", nlq->vdr_in_max);
switch (mapping->nlq_method_idc) {
case AV_DOVI_NLQ_LINEAR_DZ:
av_log(ctx, AV_LOG_INFO, "linear_deadzone_slope=%"PRIu64"; ", nlq->linear_deadzone_slope);
av_log(ctx, AV_LOG_INFO, "linear_deadzone_threshold=%"PRIu64"\n", nlq->linear_deadzone_threshold);
break;
}
}
av_log(ctx, AV_LOG_INFO, " color metadata: ");
av_log(ctx, AV_LOG_INFO, "dm_metadata_id=%"PRIu8"; ", color->dm_metadata_id);
av_log(ctx, AV_LOG_INFO, "scene_refresh_flag=%"PRIu8"; ", color->scene_refresh_flag);
av_log(ctx, AV_LOG_INFO, "ycc_to_rgb_matrix={ ");
for (int i = 0; i < 9; i++)
av_log(ctx, AV_LOG_INFO, "%f ", av_q2d(color->ycc_to_rgb_matrix[i]));
av_log(ctx, AV_LOG_INFO, "}; ycc_to_rgb_offset={ ");
for (int i = 0; i < 3; i++)
av_log(ctx, AV_LOG_INFO, "%f ", av_q2d(color->ycc_to_rgb_offset[i]));
av_log(ctx, AV_LOG_INFO, "}; rgb_to_lms_matrix={ ");
for (int i = 0; i < 9; i++)
av_log(ctx, AV_LOG_INFO, "%f ", av_q2d(color->rgb_to_lms_matrix[i]));
av_log(ctx, AV_LOG_INFO, "}; signal_eotf=%"PRIu16"; ", color->signal_eotf);
av_log(ctx, AV_LOG_INFO, "signal_eotf_param0=%"PRIu16"; ", color->signal_eotf_param0);
av_log(ctx, AV_LOG_INFO, "signal_eotf_param1=%"PRIu16"; ", color->signal_eotf_param1);
av_log(ctx, AV_LOG_INFO, "signal_eotf_param2=%"PRIu32"; ", color->signal_eotf_param2);
av_log(ctx, AV_LOG_INFO, "signal_bit_depth=%"PRIu8"; ", color->signal_bit_depth);
av_log(ctx, AV_LOG_INFO, "signal_color_space=%"PRIu8"; ", color->signal_color_space);
av_log(ctx, AV_LOG_INFO, "signal_chroma_format=%"PRIu8"; ", color->signal_chroma_format);
av_log(ctx, AV_LOG_INFO, "signal_full_range_flag=%"PRIu8"; ", color->signal_full_range_flag);
av_log(ctx, AV_LOG_INFO, "source_min_pq=%"PRIu16"; ", color->source_min_pq);
av_log(ctx, AV_LOG_INFO, "source_max_pq=%"PRIu16"; ", color->source_max_pq);
av_log(ctx, AV_LOG_INFO, "source_diagonal=%"PRIu16"; ", color->source_diagonal);
}
static void dump_color_property(AVFilterContext *ctx, AVFrame *frame)
{
const char *color_range_str = av_color_range_name(frame->color_range);
@ -617,6 +722,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
case AV_FRAME_DATA_FILM_GRAIN_PARAMS:
dump_sei_film_grain_params_metadata(ctx, sd);
break;
case AV_FRAME_DATA_DOVI_METADATA:
dump_dovi_metadata(ctx, sd);
break;
default:
av_log(ctx, AV_LOG_WARNING, "unknown side data type %d "
"(%"SIZE_SPECIFIER" bytes)\n", sd->type, sd->size);

View File

@ -61,7 +61,6 @@ OBJS-$(CONFIG_RTPDEC) += rdt.o \
rtpdec_vp9.o \
rtpdec_xiph.o
OBJS-$(CONFIG_RTPENC_CHAIN) += rtpenc_chain.o rtp.o
OBJS-$(CONFIG_SHARED) += log2_tab.o golomb_tab.o
OBJS-$(CONFIG_SRTP) += srtp.o
# muxers/demuxers
@ -316,11 +315,11 @@ OBJS-$(CONFIG_M4V_MUXER) += rawenc.o
OBJS-$(CONFIG_MATROSKA_DEMUXER) += matroskadec.o matroska.o \
flac_picture.o isom_tags.o rmsipr.o \
oggparsevorbis.o vorbiscomment.o \
qtpalette.o replaygain.o
qtpalette.o replaygain.o dovi_isom.o
OBJS-$(CONFIG_MATROSKA_MUXER) += matroskaenc.o matroska.o \
av1.o avc.o hevc.o isom_tags.o \
flacenc_header.o avlanguage.o \
vorbiscomment.o wv.o
vorbiscomment.o wv.o dovi_isom.o
OBJS-$(CONFIG_MCA_DEMUXER) += mca.o
OBJS-$(CONFIG_MCC_DEMUXER) += mccdec.o subtitles.o
OBJS-$(CONFIG_MD5_MUXER) += hashenc.o
@ -339,10 +338,11 @@ OBJS-$(CONFIG_MMF_MUXER) += mmf.o rawenc.o
OBJS-$(CONFIG_MODS_DEMUXER) += mods.o
OBJS-$(CONFIG_MOFLEX_DEMUXER) += moflex.o
OBJS-$(CONFIG_MOV_DEMUXER) += mov.o mov_chan.o mov_esds.o \
qtpalette.o replaygain.o
qtpalette.o replaygain.o dovi_isom.o
OBJS-$(CONFIG_MOV_MUXER) += movenc.o av1.o avc.o hevc.o vpcc.o \
movenchint.o mov_chan.o rtp.o \
movenccenc.o movenc_ttml.o rawutils.o
movenccenc.o movenc_ttml.o rawutils.o \
dovi_isom.o
OBJS-$(CONFIG_MP2_MUXER) += rawenc.o
OBJS-$(CONFIG_MP3_DEMUXER) += mp3dec.o replaygain.o
OBJS-$(CONFIG_MP3_MUXER) += mp3enc.o rawenc.o id3v2enc.o
@ -679,6 +679,21 @@ OBJS-$(CONFIG_LIBSRT_PROTOCOL) += libsrt.o
OBJS-$(CONFIG_LIBSSH_PROTOCOL) += libssh.o
OBJS-$(CONFIG_LIBZMQ_PROTOCOL) += libzmq.o
# Objects duplicated from other libraries for shared builds
SHLIBOBJS += log2_tab.o to_upper4.o
SHLIBOBJS-$(CONFIG_ISO_MEDIA) += mpegaudiotabs.o
SHLIBOBJS-$(CONFIG_FLV_MUXER) += mpeg4audio_sample_rates.o
SHLIBOBJS-$(CONFIG_HLS_DEMUXER) += ac3_channel_layout_tab.o
SHLIBOBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio_sample_rates.o
SHLIBOBJS-$(CONFIG_MOV_DEMUXER) += ac3_channel_layout_tab.o
SHLIBOBJS-$(CONFIG_MP3_MUXER) += mpegaudiotabs.o
SHLIBOBJS-$(CONFIG_MXF_MUXER) += golomb_tab.o
SHLIBOBJS-$(CONFIG_NUT_MUXER) += mpegaudiotabs.o
SHLIBOBJS-$(CONFIG_RTPDEC) += jpegtables.o
SHLIBOBJS-$(CONFIG_RTP_MUXER) += golomb_tab.o jpegtables.o \
mpeg4audio_sample_rates.o
SHLIBOBJS-$(CONFIG_SPDIF_MUXER) += dca_sample_rate_tab.o
# libavdevice dependencies
OBJS-$(CONFIG_IEC61883_INDEV) += dv.o

View File

@ -0,0 +1,22 @@
/*
* AC-3 channel layout table
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/ac3_channel_layout_tab.h"

View File

@ -37,7 +37,7 @@ typedef struct AIFFOutputContext {
int64_t frames;
int64_t ssnd;
int audio_stream_idx;
PacketList *pict_list, *pict_list_end;
PacketList pict_list;
int write_id3v2;
int id3v2_version;
} AIFFOutputContext;
@ -48,9 +48,9 @@ static int put_id3v2_tags(AVFormatContext *s, AIFFOutputContext *aiff)
uint64_t pos, end, size;
ID3v2EncContext id3v2 = { 0 };
AVIOContext *pb = s->pb;
PacketList *pict_list = aiff->pict_list;
PacketListEntry *list_entry = aiff->pict_list.head;
if (!s->metadata && !s->nb_chapters && !aiff->pict_list)
if (!s->metadata && !s->nb_chapters && !list_entry)
return 0;
avio_wl32(pb, MKTAG('I', 'D', '3', ' '));
@ -59,10 +59,10 @@ static int put_id3v2_tags(AVFormatContext *s, AIFFOutputContext *aiff)
ff_id3v2_start(&id3v2, pb, aiff->id3v2_version, ID3v2_DEFAULT_MAGIC);
ff_id3v2_write_metadata(s, &id3v2);
while (pict_list) {
if ((ret = ff_id3v2_write_apic(s, &id3v2, &pict_list->pkt)) < 0)
while (list_entry) {
if ((ret = ff_id3v2_write_apic(s, &id3v2, &list_entry->pkt)) < 0)
return ret;
pict_list = pict_list->next;
list_entry = list_entry->next;
}
ff_id3v2_finish(&id3v2, pb, s->metadata_header_padding);
@ -218,8 +218,7 @@ static int aiff_write_packet(AVFormatContext *s, AVPacket *pkt)
if (s->streams[pkt->stream_index]->nb_frames >= 1)
return 0;
return avpriv_packet_list_put(&aiff->pict_list, &aiff->pict_list_end,
pkt, av_packet_ref, 0);
return avpriv_packet_list_put(&aiff->pict_list, pkt, NULL, 0);
}
return 0;
@ -265,7 +264,7 @@ static void aiff_deinit(AVFormatContext *s)
{
AIFFOutputContext *aiff = s->priv_data;
avpriv_packet_list_free(&aiff->pict_list, &aiff->pict_list_end);
avpriv_packet_list_free(&aiff->pict_list);
}
#define OFFSET(x) offsetof(AIFFOutputContext, x)

View File

@ -448,7 +448,7 @@ static int avi_write_header(AVFormatContext *s)
par->bits_per_coded_sample = 16;
avist->pal_offset = avio_tell(pb) + 40;
ff_put_bmp_header(pb, par, 0, 0, avi->flipped_raw_rgb);
pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_avi,
pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_AVI,
par->bits_per_coded_sample);
if ( !par->codec_tag
&& par->codec_id == AV_CODEC_ID_RAWVIDEO

View File

@ -0,0 +1,25 @@
/*
* DCA sample rates
* Copyright (C) 2004 Gildas Bazin
* Copyright (C) 2004 Benjamin Zores
* Copyright (C) 2006 Benjamin Larsson
* Copyright (C) 2007 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/dca_sample_rate_tab.h"

View File

@ -537,7 +537,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
#endif
for (;;) {
PacketList *pktl = si->raw_packet_buffer;
PacketListEntry *pktl = si->raw_packet_buffer.head;
AVStream *st;
FFStream *sti;
const AVPacket *pkt1;
@ -548,8 +548,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
if ((err = probe_codec(s, st, NULL)) < 0)
return err;
if (ffstream(st)->request_probe <= 0) {
avpriv_packet_list_get(&si->raw_packet_buffer,
&si->raw_packet_buffer_end, pkt);
avpriv_packet_list_get(&si->raw_packet_buffer, pkt);
si->raw_packet_buffer_size -= pkt->size;
return 0;
}
@ -624,13 +623,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
return 0;
err = avpriv_packet_list_put(&si->raw_packet_buffer,
&si->raw_packet_buffer_end,
pkt, NULL, 0);
if (err < 0) {
av_packet_unref(pkt);
return err;
}
pkt1 = &si->raw_packet_buffer_end->pkt;
pkt1 = &si->raw_packet_buffer.tail->pkt;
si->raw_packet_buffer_size += pkt1->size;
if ((err = probe_codec(s, st, pkt1)) < 0)
@ -716,13 +714,14 @@ static int has_decode_delay_been_guessed(AVStream *st)
return sti->nb_decoded_frames >= 20;
}
static PacketList *get_next_pkt(AVFormatContext *s, AVStream *st, PacketList *pktl)
static PacketListEntry *get_next_pkt(AVFormatContext *s, AVStream *st,
PacketListEntry *pktl)
{
FFFormatContext *const si = ffformatcontext(s);
if (pktl->next)
return pktl->next;
if (pktl == si->packet_buffer_end)
return si->parse_queue;
if (pktl == si->packet_buffer.tail)
return si->parse_queue.head;
return NULL;
}
@ -774,7 +773,7 @@ static int64_t select_from_pts_buffer(AVStream *st, int64_t *pts_buffer, int64_t
* of the packets in a window.
*/
static void update_dts_from_pts(AVFormatContext *s, int stream_index,
PacketList *pkt_buffer)
PacketListEntry *pkt_buffer)
{
AVStream *const st = s->streams[stream_index];
int delay = ffstream(st)->avctx->has_b_frames;
@ -804,7 +803,7 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index,
FFFormatContext *const si = ffformatcontext(s);
AVStream *const st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
PacketList *pktl = si->packet_buffer ? si->packet_buffer : si->parse_queue;
PacketListEntry *pktl = si->packet_buffer.head ? si->packet_buffer.head : si->parse_queue.head;
uint64_t shift;
@ -823,7 +822,7 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index,
if (is_relative(pts))
pts += shift;
for (PacketList *pktl_it = pktl; pktl_it; pktl_it = get_next_pkt(s, st, pktl_it)) {
for (PacketListEntry *pktl_it = pktl; pktl_it; pktl_it = get_next_pkt(s, st, pktl_it)) {
if (pktl_it->pkt.stream_index != stream_index)
continue;
if (is_relative(pktl_it->pkt.pts))
@ -856,7 +855,7 @@ static void update_initial_durations(AVFormatContext *s, AVStream *st,
{
FFFormatContext *const si = ffformatcontext(s);
FFStream *const sti = ffstream(st);
PacketList *pktl = si->packet_buffer ? si->packet_buffer : si->parse_queue;
PacketListEntry *pktl = si->packet_buffer.head ? si->packet_buffer.head : si->parse_queue.head;
int64_t cur_dts = RELATIVE_TS_BASE;
if (sti->first_dts != AV_NOPTS_VALUE) {
@ -882,7 +881,7 @@ static void update_initial_durations(AVFormatContext *s, AVStream *st,
av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(sti->first_dts));
return;
}
pktl = si->packet_buffer ? si->packet_buffer : si->parse_queue;
pktl = si->packet_buffer.head ? si->packet_buffer.head : si->parse_queue.head;
sti->first_dts = cur_dts;
} else if (sti->cur_dts != RELATIVE_TS_BASE)
return;
@ -998,7 +997,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
}
}
if (pkt->duration > 0 && (si->packet_buffer || si->parse_queue))
if (pkt->duration > 0 && (si->packet_buffer.head || si->parse_queue.head))
update_initial_durations(s, st, pkt->stream_index, pkt->duration);
/* Correct timestamps with byte offset if demuxers only have timestamps
@ -1195,7 +1194,6 @@ static int parse_packet(AVFormatContext *s, AVPacket *pkt,
compute_pkt_fields(s, st, sti->parser, out_pkt, next_dts, next_pts);
ret = avpriv_packet_list_put(&si->parse_queue,
&si->parse_queue_end,
out_pkt, NULL, 0);
if (ret < 0)
goto fail;
@ -1225,7 +1223,7 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
int ret, got_packet = 0;
AVDictionary *metadata = NULL;
while (!got_packet && !si->parse_queue) {
while (!got_packet && !si->parse_queue.head) {
AVStream *st;
FFStream *sti;
@ -1338,8 +1336,8 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
}
}
if (!got_packet && si->parse_queue)
ret = avpriv_packet_list_get(&si->parse_queue, &si->parse_queue_end, pkt);
if (!got_packet && si->parse_queue.head)
ret = avpriv_packet_list_get(&si->parse_queue, pkt);
if (ret >= 0) {
AVStream *const st = s->streams[pkt->stream_index];
@ -1420,9 +1418,8 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
AVStream *st;
if (!genpts) {
ret = si->packet_buffer
? avpriv_packet_list_get(&si->packet_buffer,
&si->packet_buffer_end, pkt)
ret = si->packet_buffer.head
? avpriv_packet_list_get(&si->packet_buffer, pkt)
: read_frame_internal(s, pkt);
if (ret < 0)
return ret;
@ -1430,7 +1427,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
}
for (;;) {
PacketList *pktl = si->packet_buffer;
PacketListEntry *pktl = si->packet_buffer.head;
if (pktl) {
AVPacket *next_pkt = &pktl->pkt;
@ -1463,15 +1460,14 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
// 3. the packets for this stream at the end of the files had valid dts.
next_pkt->pts = last_dts + next_pkt->duration;
}
pktl = si->packet_buffer;
pktl = si->packet_buffer.head;
}
/* read packet from packet buffer, if there is data */
st = s->streams[next_pkt->stream_index];
if (!(next_pkt->pts == AV_NOPTS_VALUE && st->discard < AVDISCARD_ALL &&
next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
ret = avpriv_packet_list_get(&si->packet_buffer,
&si->packet_buffer_end, pkt);
ret = avpriv_packet_list_get(&si->packet_buffer, pkt);
goto return_packet;
}
}
@ -1486,7 +1482,6 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
}
ret = avpriv_packet_list_put(&si->packet_buffer,
&si->packet_buffer_end,
pkt, NULL, 0);
if (ret < 0) {
av_packet_unref(pkt);
@ -2598,12 +2593,11 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
ret = avpriv_packet_list_put(&si->packet_buffer,
&si->packet_buffer_end,
pkt1, NULL, 0);
if (ret < 0)
goto unref_then_goto_end;
pkt = &si->packet_buffer_end->pkt;
pkt = &si->packet_buffer.tail->pkt;
} else {
pkt = pkt1;
}
@ -2751,8 +2745,8 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
// EOF already reached while reading the stream above.
// So continue with reoordering DTS with whatever delay we have.
if (si->packet_buffer && !has_decode_delay_been_guessed(st)) {
update_dts_from_pts(ic, stream_index, si->packet_buffer);
if (si->packet_buffer.head && !has_decode_delay_been_guessed(st)) {
update_dts_from_pts(ic, stream_index, si->packet_buffer.head);
}
}
}
@ -2790,7 +2784,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
if (avctx->codec_id == AV_CODEC_ID_RAWVIDEO && !avctx->codec_tag && !avctx->bits_per_coded_sample) {
uint32_t tag= avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt);
if (avpriv_find_pix_fmt(avpriv_get_raw_pix_fmt_tags(), tag) == avctx->pix_fmt)
if (avpriv_pix_fmt_find(PIX_FMT_LIST_RAW, tag) == avctx->pix_fmt)
avctx->codec_tag= tag;
}

118
libavformat/dovi_isom.c Normal file
View File

@ -0,0 +1,118 @@
/*
* DOVI ISO Media common code
*
* Copyright (c) 2020 Vacing Fang <vacingfang@tencent.com>
* Copyright (c) 2021 quietvoid
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/dovi_meta.h"
#include "libavcodec/put_bits.h"
#include "avformat.h"
#include "dovi_isom.h"
int ff_isom_parse_dvcc_dvvc(AVFormatContext *s, AVStream *st, const uint8_t *buf_ptr, uint64_t size)
{
uint32_t buf;
AVDOVIDecoderConfigurationRecord *dovi;
size_t dovi_size;
int ret;
if (size > (1 << 30) || size < 4)
return AVERROR_INVALIDDATA;
dovi = av_dovi_alloc(&dovi_size);
if (!dovi)
return AVERROR(ENOMEM);
dovi->dv_version_major = *buf_ptr++; // 8 bits
dovi->dv_version_minor = *buf_ptr++; // 8 bits
buf = *buf_ptr++ << 8;
buf |= *buf_ptr++;
dovi->dv_profile = (buf >> 9) & 0x7f; // 7 bits
dovi->dv_level = (buf >> 3) & 0x3f; // 6 bits
dovi->rpu_present_flag = (buf >> 2) & 0x01; // 1 bit
dovi->el_present_flag = (buf >> 1) & 0x01; // 1 bit
dovi->bl_present_flag = buf & 0x01; // 1 bit
// Has enough remaining data
if (size >= 5) {
dovi->dv_bl_signal_compatibility_id = ((*buf_ptr++) >> 4) & 0x0f; // 4 bits
} else {
// 0 stands for None
// Dolby Vision V1.2.93 profiles and levels
dovi->dv_bl_signal_compatibility_id = 0;
}
ret = av_stream_add_side_data(st, AV_PKT_DATA_DOVI_CONF,
(uint8_t *)dovi, dovi_size);
if (ret < 0) {
av_free(dovi);
return ret;
}
av_log(s, AV_LOG_TRACE, "DOVI in dvcC/dvvC/dvwC box, version: %d.%d, profile: %d, level: %d, "
"rpu flag: %d, el flag: %d, bl flag: %d, compatibility id: %d\n",
dovi->dv_version_major, dovi->dv_version_minor,
dovi->dv_profile, dovi->dv_level,
dovi->rpu_present_flag,
dovi->el_present_flag,
dovi->bl_present_flag,
dovi->dv_bl_signal_compatibility_id);
return 0;
}
void ff_isom_put_dvcc_dvvc(AVFormatContext *s, uint8_t out[ISOM_DVCC_DVVC_SIZE],
AVDOVIDecoderConfigurationRecord *dovi)
{
PutBitContext pb;
init_put_bits(&pb, out, ISOM_DVCC_DVVC_SIZE);
put_bits(&pb, 8, dovi->dv_version_major);
put_bits(&pb, 8, dovi->dv_version_minor);
put_bits(&pb, 7, dovi->dv_profile & 0x7f);
put_bits(&pb, 6, dovi->dv_level & 0x3f);
put_bits(&pb, 1, !!dovi->rpu_present_flag);
put_bits(&pb, 1, !!dovi->el_present_flag);
put_bits(&pb, 1, !!dovi->bl_present_flag);
put_bits(&pb, 4, dovi->dv_bl_signal_compatibility_id & 0x0f);
put_bits(&pb, 28, 0); /* reserved */
put_bits32(&pb, 0); /* reserved */
put_bits32(&pb, 0); /* reserved */
put_bits32(&pb, 0); /* reserved */
put_bits32(&pb, 0); /* reserved */
flush_put_bits(&pb);
av_log(s, AV_LOG_DEBUG, "DOVI in %s box, version: %d.%d, profile: %d, level: %d, "
"rpu flag: %d, el flag: %d, bl flag: %d, compatibility id: %d\n",
dovi->dv_profile > 10 ? "dvwC" : (dovi->dv_profile > 7 ? "dvvC" : "dvcC"),
dovi->dv_version_major, dovi->dv_version_minor,
dovi->dv_profile, dovi->dv_level,
dovi->rpu_present_flag,
dovi->el_present_flag,
dovi->bl_present_flag,
dovi->dv_bl_signal_compatibility_id);
}

35
libavformat/dovi_isom.h Normal file
View File

@ -0,0 +1,35 @@
/*
* DOVI ISO Media common code
* Copyright (c) 2021 quietvoid
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_DOVI_ISOM_H
#define AVFORMAT_DOVI_ISOM_H
#include "libavutil/dovi_meta.h"
#include "avformat.h"
#define ISOM_DVCC_DVVC_SIZE 24
int ff_isom_parse_dvcc_dvvc(AVFormatContext *s, AVStream *st, const uint8_t *buf_ptr, uint64_t size);
void ff_isom_put_dvcc_dvvc(AVFormatContext *s, uint8_t out[ISOM_DVCC_DVVC_SIZE],
AVDOVIDecoderConfigurationRecord *dovi);
#endif /* AVFORMAT_DOVI_ISOM_H */

View File

@ -40,7 +40,7 @@ typedef struct FlacMuxerContext {
int audio_stream_idx;
int waiting_pics;
/* audio packets are queued here until we get all the attached pictures */
PacketList *queue, *queue_end;
PacketList queue;
/* updated streaminfo sent by the encoder at the end */
uint8_t streaminfo[FLAC_STREAMINFO_SIZE];
@ -306,8 +306,8 @@ static int flac_queue_flush(AVFormatContext *s)
if (ret < 0)
write = 0;
while (c->queue) {
avpriv_packet_list_get(&c->queue, &c->queue_end, pkt);
while (c->queue.head) {
avpriv_packet_list_get(&c->queue, pkt);
if (write && (ret = flac_write_audio_packet(s, pkt)) < 0)
write = 0;
av_packet_unref(pkt);
@ -347,7 +347,7 @@ static void flac_deinit(struct AVFormatContext *s)
{
FlacMuxerContext *c = s->priv_data;
avpriv_packet_list_free(&c->queue, &c->queue_end);
avpriv_packet_list_free(&c->queue);
for (unsigned i = 0; i < s->nb_streams; i++)
av_packet_free((AVPacket **)&s->streams[i]->priv_data);
}
@ -360,7 +360,7 @@ static int flac_write_packet(struct AVFormatContext *s, AVPacket *pkt)
if (pkt->stream_index == c->audio_stream_idx) {
if (c->waiting_pics) {
/* buffer audio packets until we get all the pictures */
ret = avpriv_packet_list_put(&c->queue, &c->queue_end, pkt, av_packet_ref, 0);
ret = avpriv_packet_list_put(&c->queue, pkt, NULL, 0);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Out of memory in packet queue; skipping attached pictures\n");
c->waiting_pics = 0;

View File

@ -24,6 +24,7 @@
#include "libavutil/intfloat.h"
#include "libavutil/avassert.h"
#include "libavutil/mathematics.h"
#include "libavcodec/mpeg4audio.h"
#include "avio_internal.h"
#include "avio.h"
#include "avc.h"
@ -33,7 +34,6 @@
#include "metadata.h"
#include "libavutil/opt.h"
#include "libavcodec/put_bits.h"
#include "libavcodec/aacenctab.h"
static const AVCodecTag flv_video_codec_ids[] = {
@ -514,7 +514,7 @@ static void flv_write_codec_header(AVFormatContext* s, AVCodecParameters* par, i
for (samplerate_index = 0; samplerate_index < 16;
samplerate_index++)
if (flv->audio_par->sample_rate
== mpeg4audio_sample_rates[samplerate_index])
== ff_mpeg4audio_sample_rates[samplerate_index])
break;
init_put_bits(&pbc, data, sizeof(data));
@ -576,15 +576,9 @@ static int flv_append_keyframe_info(AVFormatContext *s, FLVContext *flv, double
static int shift_data(AVFormatContext *s)
{
int ret = 0;
int n = 0;
int ret;
int64_t metadata_size = 0;
FLVContext *flv = s->priv_data;
int64_t pos, pos_end = avio_tell(s->pb); /* Save the pre-shift size. */
uint8_t *buf, *read_buf[2];
int read_buf_id = 0;
int read_size[2];
AVIOContext *read_pb;
metadata_size = flv->filepositions_count * 9 * 2 + 10; /* filepositions and times value */
metadata_size += 2 + 13; /* filepositions String */
@ -596,58 +590,17 @@ static int shift_data(AVFormatContext *s)
if (metadata_size < 0)
return metadata_size;
buf = av_malloc_array(metadata_size, 2);
if (!buf) {
return AVERROR(ENOMEM);
}
read_buf[0] = buf;
read_buf[1] = buf + metadata_size;
ret = ff_format_shift_data(s, flv->keyframes_info_offset, metadata_size);
if (ret < 0)
return ret;
avio_seek(s->pb, flv->metadata_size_pos, SEEK_SET);
avio_wb24(s->pb, flv->metadata_totalsize + metadata_size);
avio_seek(s->pb, flv->metadata_totalsize_pos, SEEK_SET);
avio_seek(s->pb, flv->metadata_totalsize_pos + metadata_size, SEEK_SET);
avio_wb32(s->pb, flv->metadata_totalsize + 11 + metadata_size);
/* Shift the data: the AVIO context of the output can only be used for
* writing, so we re-open the same output, but for reading. It also avoids
* a read/seek/write/seek back and forth. */
avio_flush(s->pb);
ret = s->io_open(s, &read_pb, s->url, AVIO_FLAG_READ, NULL);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Unable to re-open %s output file for "
"the second pass (add_keyframe_index)\n", s->url);
goto end;
}
/* Get ready for writing. */
avio_seek(s->pb, flv->keyframes_info_offset + metadata_size, SEEK_SET);
/* start reading at where the keyframe index information will be placed */
avio_seek(read_pb, flv->keyframes_info_offset, SEEK_SET);
pos = avio_tell(read_pb);
#define READ_BLOCK do { \
read_size[read_buf_id] = avio_read(read_pb, read_buf[read_buf_id], metadata_size); \
read_buf_id ^= 1; \
} while (0)
/* shift data by chunk of at most keyframe *filepositions* and *times* size */
READ_BLOCK;
do {
READ_BLOCK;
n = read_size[read_buf_id];
if (n < 0)
break;
avio_write(s->pb, read_buf[read_buf_id], n);
pos += n;
} while (pos <= pos_end);
ff_format_io_close(s, &read_pb);
end:
av_free(buf);
return ret;
return 0;
}
static int flv_init(struct AVFormatContext *s)

View File

@ -26,6 +26,8 @@
* https://developer.apple.com/library/ios/documentation/AudioVideo/Conceptual/HLS_Sample_Encryption
*/
#include "libavutil/channel_layout.h"
#include "hls_sample_encryption.h"
#include "libavcodec/adts_header.h"
@ -129,7 +131,7 @@ int ff_hls_senc_parse_audio_setup_info(AVStream *st, HLSAudioSetupInfo *info)
st->codecpar->sample_rate = eac3_sample_rate_tab[fscod];
st->codecpar->channel_layout = avpriv_ac3_channel_layout_tab[acmod];
st->codecpar->channel_layout = ff_ac3_channel_layout_tab[acmod];
if (lfeon)
st->codecpar->channel_layout |= AV_CH_LOW_FREQUENCY;

View File

@ -25,6 +25,7 @@
#include "libavcodec/avcodec.h"
#include "libavcodec/bsf.h"
#include "libavcodec/packet_internal.h"
#include "avformat.h"
#include "os_support.h"
@ -92,8 +93,7 @@ typedef struct FFFormatContext {
* not decoded, for example to get the codec parameters in MPEG
* streams.
*/
struct PacketList *packet_buffer;
struct PacketList *packet_buffer_end;
PacketList packet_buffer;
/* av_seek_frame() support */
int64_t data_offset; /**< offset of the first packet */
@ -104,13 +104,11 @@ typedef struct FFFormatContext {
* be identified, as parsing cannot be done without knowing the
* codec.
*/
struct PacketList *raw_packet_buffer;
struct PacketList *raw_packet_buffer_end;
PacketList raw_packet_buffer;
/**
* Packets split by the parser get queued here.
*/
struct PacketList *parse_queue;
struct PacketList *parse_queue_end;
PacketList parse_queue;
/**
* The generic code uses this as a temporary packet
* to parse packets or for muxing, especially flushing.
@ -393,7 +391,7 @@ typedef struct FFStream {
/**
* last packet in packet_buffer for this stream when muxing.
*/
struct PacketList *last_in_packet_buffer;
PacketListEntry *last_in_packet_buffer;
int64_t last_IP_pts;
int last_IP_duration;
@ -1019,4 +1017,11 @@ void ff_format_set_url(AVFormatContext *s, char *url);
void avpriv_register_devices(const AVOutputFormat * const o[], const AVInputFormat * const i[]);
/**
* Make shift_size amount of space at read_start by shifting data in the output
* at read_start until the current IO position. The underlying IO context must
* be seekable.
*/
int ff_format_shift_data(AVFormatContext *s, int64_t read_start, int shift_size);
#endif /* AVFORMAT_INTERNAL_H */

View File

@ -362,7 +362,7 @@ int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext
return ret;
st->codecpar->channels = cfg.channels;
if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4
st->codecpar->sample_rate = avpriv_mpa_freq_tab[cfg.sampling_index];
st->codecpar->sample_rate = ff_mpa_freq_tab[cfg.sampling_index];
else if (cfg.ext_sample_rate)
st->codecpar->sample_rate = cfg.ext_sample_rate;
else

24
libavformat/jpegtables.c Normal file
View File

@ -0,0 +1,24 @@
/*
* MJPEG tables
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2003 Alex Beregszaszi
* Copyright (c) 2003-2004 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/jpegtabs.h"

View File

@ -111,6 +111,7 @@
#define MATROSKA_ID_TRACKCONTENTENCODING 0x6240
#define MATROSKA_ID_TRACKTIMECODESCALE 0x23314F
#define MATROSKA_ID_TRACKMAXBLKADDID 0x55EE
#define MATROSKA_ID_TRACKBLKADDMAPPING 0x41E4
/* IDs in the trackvideo master */
#define MATROSKA_ID_VIDEOFRAMERATE 0x2383E3
@ -189,6 +190,12 @@
#define MATROSKA_ID_ENCODINGSIGKEYID 0x47E4
#define MATROSKA_ID_ENCODINGSIGNATURE 0x47E3
/* IDs in the block addition mapping master */
#define MATROSKA_ID_BLKADDIDVALUE 0x41F0
#define MATROSKA_ID_BLKADDIDNAME 0x41A4
#define MATROSKA_ID_BLKADDIDTYPE 0x41E7
#define MATROSKA_ID_BLKADDIDEXTRADATA 0x41ED
/* ID in the cues master */
#define MATROSKA_ID_POINTENTRY 0xBB
@ -385,4 +392,6 @@ extern const char * const ff_matroska_video_stereo_plane[MATROSKA_VIDEO_STEREO_P
int ff_mkv_stereo3d_conv(AVStream *st, MatroskaVideoStereoModeType stereo_mode);
#define DVCC_DVVC_BLOCK_TYPE_NAME "Dolby Vision configuration"
#endif /* AVFORMAT_MATROSKA_H */

View File

@ -53,6 +53,7 @@
#include "avformat.h"
#include "avio_internal.h"
#include "dovi_isom.h"
#include "internal.h"
#include "isom.h"
#include "matroska.h"
@ -239,6 +240,13 @@ typedef struct MatroskaTrackOperation {
EbmlList combine_planes;
} MatroskaTrackOperation;
typedef struct MatroskaBlockAdditionMapping {
uint64_t value;
char *name;
uint64_t type;
EbmlBin extradata;
} MatroskaBlockAdditionMapping;
typedef struct MatroskaTrack {
uint64_t num;
uint64_t uid;
@ -269,6 +277,7 @@ typedef struct MatroskaTrack {
int ms_compat;
int needs_decoding;
uint64_t max_block_additional_id;
EbmlList block_addition_mappings;
uint32_t palette[AVPALETTE_COUNT];
int has_palette;
@ -387,8 +396,7 @@ typedef struct MatroskaDemuxContext {
AVPacket *pkt;
/* the packet queue */
PacketList *queue;
PacketList *queue_end;
PacketList queue;
int done;
@ -419,8 +427,8 @@ typedef struct MatroskaDemuxContext {
// incomplete type (6.7.2 in C90, 6.9.2 in C99).
// Removing the sizes breaks MSVC.
static EbmlSyntax ebml_syntax[3], matroska_segment[9], matroska_track_video_color[15], matroska_track_video[19],
matroska_track[32], matroska_track_encoding[6], matroska_track_encodings[2],
matroska_track_combine_planes[2], matroska_track_operation[2], matroska_tracks[2],
matroska_track[33], matroska_track_encoding[6], matroska_track_encodings[2],
matroska_track_combine_planes[2], matroska_track_operation[2], matroska_block_addition_mapping[5], matroska_tracks[2],
matroska_attachments[2], matroska_chapter_entry[9], matroska_chapter[6], matroska_chapters[2],
matroska_index_entry[3], matroska_index[2], matroska_tag[3], matroska_tags[2], matroska_seekhead[2],
matroska_blockadditions[2], matroska_blockgroup[8], matroska_cluster_parsing[8];
@ -570,6 +578,14 @@ static EbmlSyntax matroska_track_operation[] = {
CHILD_OF(matroska_track)
};
static EbmlSyntax matroska_block_addition_mapping[] = {
{ MATROSKA_ID_BLKADDIDVALUE, EBML_UINT, 0, 0, offsetof(MatroskaBlockAdditionMapping, value) },
{ MATROSKA_ID_BLKADDIDNAME, EBML_STR, 0, 0, offsetof(MatroskaBlockAdditionMapping, name) },
{ MATROSKA_ID_BLKADDIDTYPE, EBML_UINT, 0, 0, offsetof(MatroskaBlockAdditionMapping, type) },
{ MATROSKA_ID_BLKADDIDEXTRADATA, EBML_BIN, 0, 0, offsetof(MatroskaBlockAdditionMapping, extradata) },
CHILD_OF(matroska_track)
};
static EbmlSyntax matroska_track[] = {
{ MATROSKA_ID_TRACKNUMBER, EBML_UINT, 0, 0, offsetof(MatroskaTrack, num) },
{ MATROSKA_ID_TRACKNAME, EBML_UTF8, 0, 0, offsetof(MatroskaTrack, name) },
@ -593,6 +609,7 @@ static EbmlSyntax matroska_track[] = {
{ MATROSKA_ID_TRACKOPERATION, EBML_NEST, 0, 0, offsetof(MatroskaTrack, operation), { .n = matroska_track_operation } },
{ MATROSKA_ID_TRACKCONTENTENCODINGS, EBML_NEST, 0, 0, 0, { .n = matroska_track_encodings } },
{ MATROSKA_ID_TRACKMAXBLKADDID, EBML_UINT, 0, 0, offsetof(MatroskaTrack, max_block_additional_id), { .u = 0 } },
{ MATROSKA_ID_TRACKBLKADDMAPPING, EBML_NEST, 0, sizeof(MatroskaBlockAdditionMapping), offsetof(MatroskaTrack, block_addition_mappings), { .n = matroska_block_addition_mapping } },
{ MATROSKA_ID_SEEKPREROLL, EBML_UINT, 0, 0, offsetof(MatroskaTrack, seek_preroll), { .u = 0 } },
{ MATROSKA_ID_TRACKFLAGENABLED, EBML_NONE },
{ MATROSKA_ID_TRACKFLAGLACING, EBML_NONE },
@ -2011,8 +2028,8 @@ static int matroska_aac_sri(int samplerate)
{
int sri;
for (sri = 0; sri < FF_ARRAY_ELEMS(avpriv_mpeg4audio_sample_rates); sri++)
if (avpriv_mpeg4audio_sample_rates[sri] == samplerate)
for (sri = 0; sri < FF_ARRAY_ELEMS(ff_mpeg4audio_sample_rates); sri++)
if (ff_mpeg4audio_sample_rates[sri] == samplerate)
break;
return sri;
}
@ -2311,6 +2328,38 @@ static int mkv_parse_video_projection(AVStream *st, const MatroskaTrack *track,
return 0;
}
static int mkv_parse_dvcc_dvvc(AVFormatContext *s, AVStream *st, const MatroskaTrack *track,
EbmlBin *bin)
{
return ff_isom_parse_dvcc_dvvc(s, st, bin->data, bin->size);
}
static int mkv_parse_block_addition_mappings(AVFormatContext *s, AVStream *st, const MatroskaTrack *track)
{
const EbmlList *mappings_list = &track->block_addition_mappings;
MatroskaBlockAdditionMapping *mappings = mappings_list->elem;
int ret;
for (int i = 0; i < mappings_list->nb_elem; i++) {
MatroskaBlockAdditionMapping *mapping = &mappings[i];
switch (mapping->type) {
case MKBETAG('d','v','c','C'):
case MKBETAG('d','v','v','C'):
if ((ret = mkv_parse_dvcc_dvvc(s, st, track, &mapping->extradata)) < 0)
return ret;
break;
default:
av_log(s, AV_LOG_DEBUG,
"Unknown block additional mapping type 0x%"PRIx64", value %"PRIu64", name \"%s\"\n",
mapping->type, mapping->value, mapping->name ? mapping->name : "");
}
}
return 0;
}
static int get_qt_codec(MatroskaTrack *track, uint32_t *fourcc, enum AVCodecID *codec_id)
{
const AVCodecTag *codec_tags;
@ -2898,6 +2947,10 @@ static int matroska_parse_tracks(AVFormatContext *s)
if (track->flag_textdescriptions)
st->disposition |= AV_DISPOSITION_DESCRIPTIONS;
}
ret = mkv_parse_block_addition_mappings(s, st, track);
if (ret < 0)
return ret;
}
return 0;
@ -3058,11 +3111,11 @@ static int matroska_read_header(AVFormatContext *s)
static int matroska_deliver_packet(MatroskaDemuxContext *matroska,
AVPacket *pkt)
{
if (matroska->queue) {
if (matroska->queue.head) {
MatroskaTrack *tracks = matroska->tracks.elem;
MatroskaTrack *track;
avpriv_packet_list_get(&matroska->queue, &matroska->queue_end, pkt);
avpriv_packet_list_get(&matroska->queue, pkt);
track = &tracks[pkt->stream_index];
if (track->has_palette) {
uint8_t *pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
@ -3084,7 +3137,7 @@ static int matroska_deliver_packet(MatroskaDemuxContext *matroska,
*/
static void matroska_clear_queue(MatroskaDemuxContext *matroska)
{
avpriv_packet_list_free(&matroska->queue, &matroska->queue_end);
avpriv_packet_list_free(&matroska->queue);
}
static int matroska_parse_laces(MatroskaDemuxContext *matroska, uint8_t **buf,
@ -3250,7 +3303,7 @@ static int matroska_parse_rm_audio(MatroskaDemuxContext *matroska,
track->audio.buf_timecode = AV_NOPTS_VALUE;
pkt->pos = pos;
pkt->stream_index = st->index;
ret = avpriv_packet_list_put(&matroska->queue, &matroska->queue_end, pkt, NULL, 0);
ret = avpriv_packet_list_put(&matroska->queue, pkt, NULL, 0);
if (ret < 0) {
av_packet_unref(pkt);
return AVERROR(ENOMEM);
@ -3472,7 +3525,7 @@ static int matroska_parse_webvtt(MatroskaDemuxContext *matroska,
pkt->duration = duration;
pkt->pos = pos;
err = avpriv_packet_list_put(&matroska->queue, &matroska->queue_end, pkt, NULL, 0);
err = avpriv_packet_list_put(&matroska->queue, pkt, NULL, 0);
if (err < 0) {
av_packet_unref(pkt);
return AVERROR(ENOMEM);
@ -3574,7 +3627,7 @@ static int matroska_parse_frame(MatroskaDemuxContext *matroska,
pkt->pos = pos;
pkt->duration = lace_duration;
res = avpriv_packet_list_put(&matroska->queue, &matroska->queue_end, pkt, NULL, 0);
res = avpriv_packet_list_put(&matroska->queue, pkt, NULL, 0);
if (res < 0) {
av_packet_unref(pkt);
return AVERROR(ENOMEM);
@ -3976,10 +4029,10 @@ static int webm_clusters_start_with_keyframe(AVFormatContext *s)
matroska_reset_status(matroska, 0, cluster_pos);
matroska_clear_queue(matroska);
if (matroska_parse_cluster(matroska) < 0 ||
!matroska->queue) {
!matroska->queue.head) {
break;
}
pkt = &matroska->queue->pkt;
pkt = &matroska->queue.head->pkt;
// 4 + read is the length of the cluster id and the cluster length field.
cluster_pos += 4 + read + cluster_length;
if (!(pkt->flags & AV_PKT_FLAG_KEY)) {

View File

@ -27,6 +27,7 @@
#include "avformat.h"
#include "avio_internal.h"
#include "avlanguage.h"
#include "dovi_isom.h"
#include "flacenc.h"
#include "internal.h"
#include "isom.h"
@ -1120,6 +1121,37 @@ static int mkv_write_stereo_mode(AVFormatContext *s, AVIOContext *pb,
return 0;
}
static void mkv_write_dovi(AVFormatContext *s, AVIOContext *pb, AVStream *st)
{
AVDOVIDecoderConfigurationRecord *dovi = (AVDOVIDecoderConfigurationRecord *)
av_stream_get_side_data(st, AV_PKT_DATA_DOVI_CONF, NULL);
if (dovi && dovi->dv_profile <= 10) {
ebml_master mapping;
uint8_t buf[ISOM_DVCC_DVVC_SIZE];
uint32_t type;
uint64_t expected_size = (2 + 1 + (sizeof(DVCC_DVVC_BLOCK_TYPE_NAME) - 1))
+ (2 + 1 + 4) + (2 + 1 + ISOM_DVCC_DVVC_SIZE);
if (dovi->dv_profile > 7) {
type = MKBETAG('d', 'v', 'v', 'C');
} else {
type = MKBETAG('d', 'v', 'c', 'C');
}
ff_isom_put_dvcc_dvvc(s, buf, dovi);
mapping = start_ebml_master(pb, MATROSKA_ID_TRACKBLKADDMAPPING, expected_size);
put_ebml_string(pb, MATROSKA_ID_BLKADDIDNAME, DVCC_DVVC_BLOCK_TYPE_NAME);
put_ebml_uint(pb, MATROSKA_ID_BLKADDIDTYPE, type);
put_ebml_binary(pb, MATROSKA_ID_BLKADDIDEXTRADATA, buf, sizeof(buf));
end_ebml_master(pb, mapping);
}
}
static int mkv_write_track(AVFormatContext *s, MatroskaMuxContext *mkv,
AVStream *st, mkv_track *track, AVIOContext *pb,
int is_default)
@ -1319,6 +1351,11 @@ static int mkv_write_track(AVFormatContext *s, MatroskaMuxContext *mkv,
mkv_write_video_projection(s, pb, st);
end_ebml_master(pb, subinfo);
if (mkv->mode != MODE_WEBM) {
mkv_write_dovi(s, pb, st);
}
break;
case AVMEDIA_TYPE_AUDIO:

View File

@ -55,6 +55,7 @@
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
#include "dovi_isom.h"
#include "riff.h"
#include "isom.h"
#include "libavcodec/get_bits.h"
@ -812,7 +813,7 @@ static int mov_read_dac3(MOVContext *c, AVIOContext *pb, MOVAtom atom)
acmod = (ac3info >> 11) & 0x7;
lfeon = (ac3info >> 10) & 0x1;
st->codecpar->channels = ((int[]){2,1,2,3,3,4,4,5})[acmod] + lfeon;
st->codecpar->channel_layout = avpriv_ac3_channel_layout_tab[acmod];
st->codecpar->channel_layout = ff_ac3_channel_layout_tab[acmod];
if (lfeon)
st->codecpar->channel_layout |= AV_CH_LOW_FREQUENCY;
*ast = bsmod;
@ -845,7 +846,7 @@ static int mov_read_dec3(MOVContext *c, AVIOContext *pb, MOVAtom atom)
bsmod = (eac3info >> 12) & 0x1f;
acmod = (eac3info >> 9) & 0x7;
lfeon = (eac3info >> 8) & 0x1;
st->codecpar->channel_layout = avpriv_ac3_channel_layout_tab[acmod];
st->codecpar->channel_layout = ff_ac3_channel_layout_tab[acmod];
if (lfeon)
st->codecpar->channel_layout |= AV_CH_LOW_FREQUENCY;
st->codecpar->channels = av_get_channel_layout_nb_channels(st->codecpar->channel_layout);
@ -7062,58 +7063,21 @@ static int mov_read_dmlp(MOVContext *c, AVIOContext *pb, MOVAtom atom)
static int mov_read_dvcc_dvvc(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
uint32_t buf;
AVDOVIDecoderConfigurationRecord *dovi;
size_t dovi_size;
uint8_t buf[ISOM_DVCC_DVVC_SIZE];
int ret;
int64_t read_size = atom.size;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
if ((uint64_t)atom.size > (1<<30) || atom.size < 4)
return AVERROR_INVALIDDATA;
// At most 24 bytes
read_size = FFMIN(read_size, ISOM_DVCC_DVVC_SIZE);
dovi = av_dovi_alloc(&dovi_size);
if (!dovi)
return AVERROR(ENOMEM);
dovi->dv_version_major = avio_r8(pb);
dovi->dv_version_minor = avio_r8(pb);
buf = avio_rb16(pb);
dovi->dv_profile = (buf >> 9) & 0x7f; // 7 bits
dovi->dv_level = (buf >> 3) & 0x3f; // 6 bits
dovi->rpu_present_flag = (buf >> 2) & 0x01; // 1 bit
dovi->el_present_flag = (buf >> 1) & 0x01; // 1 bit
dovi->bl_present_flag = buf & 0x01; // 1 bit
if (atom.size >= 24) { // 4 + 4 + 4 * 4
buf = avio_r8(pb);
dovi->dv_bl_signal_compatibility_id = (buf >> 4) & 0x0f; // 4 bits
} else {
// 0 stands for None
// Dolby Vision V1.2.93 profiles and levels
dovi->dv_bl_signal_compatibility_id = 0;
}
ret = av_stream_add_side_data(st, AV_PKT_DATA_DOVI_CONF,
(uint8_t *)dovi, dovi_size);
if (ret < 0) {
av_free(dovi);
if ((ret = ffio_read_size(pb, buf, read_size)) < 0)
return ret;
}
av_log(c, AV_LOG_TRACE, "DOVI in dvcC/dvvC/dvwC box, version: %d.%d, profile: %d, level: %d, "
"rpu flag: %d, el flag: %d, bl flag: %d, compatibility id: %d\n",
dovi->dv_version_major, dovi->dv_version_minor,
dovi->dv_profile, dovi->dv_level,
dovi->rpu_present_flag,
dovi->el_present_flag,
dovi->bl_present_flag,
dovi->dv_bl_signal_compatibility_id
);
return 0;
return ff_isom_parse_dvcc_dvvc(c->fc, st, buf, read_size);
}
static int mov_read_kind(MOVContext *c, AVIOContext *pb, MOVAtom atom)

View File

@ -27,6 +27,7 @@
#include "movenc.h"
#include "avformat.h"
#include "avio_internal.h"
#include "dovi_isom.h"
#include "riff.h"
#include "avio.h"
#include "isom.h"
@ -1636,7 +1637,7 @@ static int mov_get_rawvideo_codec_tag(AVFormatContext *s, MOVTrack *track)
}
}
pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_mov,
pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_MOV,
track->par->bits_per_coded_sample);
if (tag == MKTAG('r','a','w',' ') &&
track->par->format != pix_fmt &&
@ -1716,7 +1717,7 @@ static unsigned int validate_codec_tag(const AVCodecTag *const *tags,
for (i = 0; tags && tags[i]; i++) {
const AVCodecTag *codec_tags = tags[i];
while (codec_tags->id != AV_CODEC_ID_NONE) {
if (avpriv_toupper4(codec_tags->tag) == avpriv_toupper4(tag) &&
if (ff_toupper4(codec_tags->tag) == ff_toupper4(tag) &&
codec_tags->id == codec_id)
return codec_tags->tag;
codec_tags++;
@ -1911,6 +1912,8 @@ static int mov_write_sv3d_tag(AVFormatContext *s, AVIOContext *pb, AVSphericalMa
static int mov_write_dvcc_dvvc_tag(AVFormatContext *s, AVIOContext *pb, AVDOVIDecoderConfigurationRecord *dovi)
{
uint8_t buf[ISOM_DVCC_DVVC_SIZE];
avio_wb32(pb, 32); /* size = 8 + 24 */
if (dovi->dv_profile > 10)
ffio_wfourcc(pb, "dvwC");
@ -1918,23 +1921,10 @@ static int mov_write_dvcc_dvvc_tag(AVFormatContext *s, AVIOContext *pb, AVDOVIDe
ffio_wfourcc(pb, "dvvC");
else
ffio_wfourcc(pb, "dvcC");
avio_w8(pb, dovi->dv_version_major);
avio_w8(pb, dovi->dv_version_minor);
avio_wb16(pb, (dovi->dv_profile << 9) | (dovi->dv_level << 3) |
(dovi->rpu_present_flag << 2) | (dovi->el_present_flag << 1) |
dovi->bl_present_flag);
avio_wb32(pb, (dovi->dv_bl_signal_compatibility_id << 28) | 0);
ffio_fill(pb, 0, 4 * 4); /* reserved */
av_log(s, AV_LOG_DEBUG, "DOVI in %s box, version: %d.%d, profile: %d, level: %d, "
"rpu flag: %d, el flag: %d, bl flag: %d, compatibility id: %d\n",
dovi->dv_profile > 10 ? "dvwC" : (dovi->dv_profile > 7 ? "dvvC" : "dvcC"),
dovi->dv_version_major, dovi->dv_version_minor,
dovi->dv_profile, dovi->dv_level,
dovi->rpu_present_flag,
dovi->el_present_flag,
dovi->bl_present_flag,
dovi->dv_bl_signal_compatibility_id);
ff_isom_put_dvcc_dvvc(s, buf, dovi);
avio_write(pb, buf, sizeof(buf));
return 32; /* 8 + 24 */
}
@ -5349,7 +5339,7 @@ static int mov_write_squashed_packet(AVFormatContext *s, MOVTrack *track)
switch (track->st->codecpar->codec_id) {
case AV_CODEC_ID_TTML: {
int had_packets = !!track->squashed_packet_queue;
int had_packets = !!track->squashed_packet_queue.head;
if ((ret = ff_mov_generate_squashed_ttml_packet(s, track, squashed_packet)) < 0) {
goto finish_squash;
@ -6197,9 +6187,10 @@ static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
return AVERROR(EINVAL);
}
/* The following will reset pkt and is only allowed to be used
* because we return immediately. afterwards. */
if ((ret = avpriv_packet_list_put(&trk->squashed_packet_queue,
&trk->squashed_packet_queue_end,
pkt, av_packet_ref, 0)) < 0) {
pkt, NULL, 0)) < 0) {
return ret;
}
@ -6465,29 +6456,28 @@ static void mov_free(AVFormatContext *s)
}
for (i = 0; i < mov->nb_streams; i++) {
if (mov->tracks[i].tag == MKTAG('r','t','p',' '))
ff_mov_close_hinting(&mov->tracks[i]);
else if (mov->tracks[i].tag == MKTAG('t','m','c','d') && mov->nb_meta_tmcd)
av_freep(&mov->tracks[i].par);
av_freep(&mov->tracks[i].cluster);
av_freep(&mov->tracks[i].frag_info);
av_packet_free(&mov->tracks[i].cover_image);
MOVTrack *const track = &mov->tracks[i];
if (mov->tracks[i].eac3_priv) {
struct eac3_info *info = mov->tracks[i].eac3_priv;
if (track->tag == MKTAG('r','t','p',' '))
ff_mov_close_hinting(track);
else if (track->tag == MKTAG('t','m','c','d') && mov->nb_meta_tmcd)
av_freep(&track->par);
av_freep(&track->cluster);
av_freep(&track->frag_info);
av_packet_free(&track->cover_image);
if (track->eac3_priv) {
struct eac3_info *info = track->eac3_priv;
av_packet_free(&info->pkt);
av_freep(&mov->tracks[i].eac3_priv);
av_freep(&track->eac3_priv);
}
if (mov->tracks[i].vos_len)
av_freep(&mov->tracks[i].vos_data);
if (track->vos_len)
av_freep(&track->vos_data);
ff_mov_cenc_free(&mov->tracks[i].cenc);
ffio_free_dyn_buf(&mov->tracks[i].mdat_buf);
ff_mov_cenc_free(&track->cenc);
ffio_free_dyn_buf(&track->mdat_buf);
if (mov->tracks[i].squashed_packet_queue) {
avpriv_packet_list_free(&(mov->tracks[i].squashed_packet_queue),
&(mov->tracks[i].squashed_packet_queue_end));
}
avpriv_packet_list_free(&track->squashed_packet_queue);
}
av_freep(&mov->tracks);
@ -7150,13 +7140,8 @@ static int compute_sidx_size(AVFormatContext *s)
static int shift_data(AVFormatContext *s)
{
int ret = 0, moov_size;
int moov_size;
MOVMuxContext *mov = s->priv_data;
int64_t pos, pos_end;
uint8_t *buf, *read_buf[2];
int read_buf_id = 0;
int read_size[2];
AVIOContext *read_pb;
if (mov->flags & FF_MOV_FLAG_FRAGMENT)
moov_size = compute_sidx_size(s);
@ -7165,53 +7150,7 @@ static int shift_data(AVFormatContext *s)
if (moov_size < 0)
return moov_size;
buf = av_malloc(moov_size * 2);
if (!buf)
return AVERROR(ENOMEM);
read_buf[0] = buf;
read_buf[1] = buf + moov_size;
/* Shift the data: the AVIO context of the output can only be used for
* writing, so we re-open the same output, but for reading. It also avoids
* a read/seek/write/seek back and forth. */
avio_flush(s->pb);
ret = s->io_open(s, &read_pb, s->url, AVIO_FLAG_READ, NULL);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Unable to re-open %s output file for "
"the second pass (faststart)\n", s->url);
goto end;
}
/* mark the end of the shift to up to the last data we wrote, and get ready
* for writing */
pos_end = avio_tell(s->pb);
avio_seek(s->pb, mov->reserved_header_pos + moov_size, SEEK_SET);
/* start reading at where the new moov will be placed */
avio_seek(read_pb, mov->reserved_header_pos, SEEK_SET);
pos = avio_tell(read_pb);
#define READ_BLOCK do { \
read_size[read_buf_id] = avio_read(read_pb, read_buf[read_buf_id], moov_size); \
read_buf_id ^= 1; \
} while (0)
/* shift data by chunk of at most moov_size */
READ_BLOCK;
do {
int n;
READ_BLOCK;
n = read_size[read_buf_id];
if (n <= 0)
break;
avio_write(s->pb, read_buf[read_buf_id], n);
pos += n;
} while (pos < pos_end);
ff_format_io_close(s, &read_pb);
end:
av_free(buf);
return ret;
return ff_format_shift_data(s, mov->reserved_header_pos, moov_size);
}
static int mov_write_trailer(AVFormatContext *s)

View File

@ -167,7 +167,7 @@ typedef struct MOVTrack {
unsigned int squash_fragment_samples_to_one; //< flag to note formats where all samples for a fragment are to be squashed
PacketList *squashed_packet_queue, *squashed_packet_queue_end;
PacketList squashed_packet_queue;
} MOVTrack;
typedef enum {

View File

@ -70,9 +70,7 @@ static int mov_write_ttml_document_from_queue(AVFormatContext *s,
return ret;
}
while (!avpriv_packet_list_get(&track->squashed_packet_queue,
&track->squashed_packet_queue_end,
pkt)) {
while (!avpriv_packet_list_get(&track->squashed_packet_queue, pkt)) {
end_ts = FFMAX(end_ts, pkt->pts + pkt->duration);
// in case of the 'dfxp' muxing mode, each written document is offset
@ -121,7 +119,7 @@ int ff_mov_generate_squashed_ttml_packet(AVFormatContext *s,
goto cleanup;
}
if (!track->squashed_packet_queue) {
if (!track->squashed_packet_queue.head) {
// empty queue, write minimal empty document with zero duration
avio_write(ttml_ctx->pb, empty_ttml_document,
sizeof(empty_ttml_document) - 1);

View File

@ -132,7 +132,7 @@ typedef struct MP3Context {
int pics_to_write;
/* audio packets are queued here until we get all the attached pictures */
PacketList *queue, *queue_end;
PacketList queue;
} MP3Context;
static const uint8_t xing_offtbl[2][2] = {{32, 17}, {17, 9}};
@ -159,8 +159,8 @@ static int mp3_write_xing(AVFormatContext *s)
if (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL) || !mp3->write_xing)
return 0;
for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++) {
const uint16_t base_freq = avpriv_mpa_freq_tab[i];
for (i = 0; i < FF_ARRAY_ELEMS(ff_mpa_freq_tab); i++) {
const uint16_t base_freq = ff_mpa_freq_tab[i];
if (par->sample_rate == base_freq) ver = 0x3; // MPEG 1
else if (par->sample_rate == base_freq / 2) ver = 0x2; // MPEG 2
@ -170,7 +170,7 @@ static int mp3_write_xing(AVFormatContext *s)
srate_idx = i;
break;
}
if (i == FF_ARRAY_ELEMS(avpriv_mpa_freq_tab)) {
if (i == FF_ARRAY_ELEMS(ff_mpa_freq_tab)) {
av_log(s, AV_LOG_WARNING, "Unsupported sample rate, not writing Xing header.\n");
return -1;
}
@ -190,7 +190,7 @@ static int mp3_write_xing(AVFormatContext *s)
header |= channels << 6;
for (bitrate_idx = 1; bitrate_idx < 15; bitrate_idx++) {
int bit_rate = 1000 * avpriv_mpa_bitrate_tab[ver != 3][3 - 1][bitrate_idx];
int bit_rate = 1000 * ff_mpa_bitrate_tab[ver != 3][3 - 1][bitrate_idx];
int error = FFABS(bit_rate - par->bit_rate);
if (error < best_bitrate_error) {
@ -387,8 +387,8 @@ static int mp3_queue_flush(AVFormatContext *s)
ff_id3v2_finish(&mp3->id3, s->pb, s->metadata_header_padding);
mp3_write_xing(s);
while (mp3->queue) {
avpriv_packet_list_get(&mp3->queue, &mp3->queue_end, pkt);
while (mp3->queue.head) {
avpriv_packet_list_get(&mp3->queue, pkt);
if (write && (ret = mp3_write_audio_packet(s, pkt)) < 0)
write = 0;
av_packet_unref(pkt);
@ -524,7 +524,7 @@ static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt)
if (pkt->stream_index == mp3->audio_stream_idx) {
if (mp3->pics_to_write) {
/* buffer audio packets until we get all the pictures */
int ret = avpriv_packet_list_put(&mp3->queue, &mp3->queue_end, pkt, av_packet_ref, 0);
int ret = avpriv_packet_list_put(&mp3->queue, pkt, NULL, 0);
if (ret < 0) {
av_log(s, AV_LOG_WARNING, "Not enough memory to buffer audio. Skipping picture streams\n");
@ -632,7 +632,7 @@ static void mp3_deinit(struct AVFormatContext *s)
{
MP3Context *mp3 = s->priv_data;
avpriv_packet_list_free(&mp3->queue, &mp3->queue_end);
avpriv_packet_list_free(&mp3->queue);
av_freep(&mp3->xing_frame);
}

Some files were not shown because too many files have changed in this diff Show More