De-doxygenize some top-level files
This commit is contained in:
parent
d5c62122a7
commit
c1ef30a6ba
15
avconv.c
15
avconv.c
@ -246,7 +246,7 @@ static void assert_codec_experimental(AVCodecContext *c, int encoder)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* Update the requested input sample format based on the output sample format.
|
* Update the requested input sample format based on the output sample format.
|
||||||
* This is currently only used to request float output from decoders which
|
* This is currently only used to request float output from decoders which
|
||||||
* support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
|
* support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
|
||||||
@ -660,7 +660,7 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* Read one frame for lavfi output for ost and encode it.
|
* Read one frame for lavfi output for ost and encode it.
|
||||||
*/
|
*/
|
||||||
static int poll_filter(OutputStream *ost)
|
static int poll_filter(OutputStream *ost)
|
||||||
@ -723,7 +723,7 @@ static int poll_filter(OutputStream *ost)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* Read as many frames from possible from lavfi and encode them.
|
* Read as many frames from possible from lavfi and encode them.
|
||||||
*
|
*
|
||||||
* Always read from the active stream with the lowest timestamp. If no frames
|
* Always read from the active stream with the lowest timestamp. If no frames
|
||||||
@ -1941,10 +1941,7 @@ static int transcode_init(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
|
||||||
* @return 1 if there are still streams where more output is wanted,
|
|
||||||
* 0 otherwise
|
|
||||||
*/
|
|
||||||
static int need_output(void)
|
static int need_output(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -2128,13 +2125,13 @@ static void reset_eagain(void)
|
|||||||
input_files[i]->eagain = 0;
|
input_files[i]->eagain = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* Read one packet from an input file and send it for
|
* Read one packet from an input file and send it for
|
||||||
* - decoding -> lavfi (audio/video)
|
* - decoding -> lavfi (audio/video)
|
||||||
* - decoding -> encoding -> muxing (subtitles)
|
* - decoding -> encoding -> muxing (subtitles)
|
||||||
* - muxing (streamcopy)
|
* - muxing (streamcopy)
|
||||||
*
|
*
|
||||||
* @return
|
* Return
|
||||||
* - 0 -- one packet was read and processed
|
* - 0 -- one packet was read and processed
|
||||||
* - AVERROR(EAGAIN) -- no packets were available for selected file,
|
* - AVERROR(EAGAIN) -- no packets were available for selected file,
|
||||||
* this function should be called again
|
* this function should be called again
|
||||||
|
14
avconv.h
14
avconv.h
@ -51,21 +51,19 @@
|
|||||||
|
|
||||||
/* select an input stream for an output stream */
|
/* select an input stream for an output stream */
|
||||||
typedef struct StreamMap {
|
typedef struct StreamMap {
|
||||||
int disabled; /** 1 is this mapping is disabled by a negative map */
|
int disabled; /* 1 is this mapping is disabled by a negative map */
|
||||||
int file_index;
|
int file_index;
|
||||||
int stream_index;
|
int stream_index;
|
||||||
int sync_file_index;
|
int sync_file_index;
|
||||||
int sync_stream_index;
|
int sync_stream_index;
|
||||||
char *linklabel; /** name of an output link, for mapping lavfi outputs */
|
char *linklabel; /* name of an output link, for mapping lavfi outputs */
|
||||||
} StreamMap;
|
} StreamMap;
|
||||||
|
|
||||||
/**
|
/* select an input file for an output file */
|
||||||
* select an input file for an output file
|
|
||||||
*/
|
|
||||||
typedef struct MetadataMap {
|
typedef struct MetadataMap {
|
||||||
int file; ///< file index
|
int file; // file index
|
||||||
char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
|
char type; // type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
|
||||||
int index; ///< stream/chapter/program number
|
int index; // stream/chapter/program number
|
||||||
} MetadataMap;
|
} MetadataMap;
|
||||||
|
|
||||||
typedef struct OptionsContext {
|
typedef struct OptionsContext {
|
||||||
|
@ -29,10 +29,8 @@
|
|||||||
#include "libavutil/pixfmt.h"
|
#include "libavutil/pixfmt.h"
|
||||||
#include "libavutil/samplefmt.h"
|
#include "libavutil/samplefmt.h"
|
||||||
|
|
||||||
/**
|
/* Define a function for building a string containing a list of
|
||||||
* Define a function for building a string containing a list of
|
* allowed formats. */
|
||||||
* allowed formats,
|
|
||||||
*/
|
|
||||||
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\
|
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\
|
||||||
static char *choose_ ## var ## s(OutputStream *ost) \
|
static char *choose_ ## var ## s(OutputStream *ost) \
|
||||||
{ \
|
{ \
|
||||||
|
@ -275,7 +275,7 @@ static int opt_attach(void *optctx, const char *opt, const char *arg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse a metadata specifier in arg.
|
* Parse a metadata specifier passed as 'arg' parameter.
|
||||||
* @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
|
* @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
|
||||||
* @param index for type c/p, chapter/program index is written here
|
* @param index for type c/p, chapter/program index is written here
|
||||||
* @param stream_spec for type s, the stream specifier is written here
|
* @param stream_spec for type s, the stream specifier is written here
|
||||||
@ -423,10 +423,8 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
|
|||||||
return avcodec_find_decoder(st->codec->codec_id);
|
return avcodec_find_decoder(st->codec->codec_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* Add all the streams from the given input file to the global
|
||||||
* Add all the streams from the given input file to the global
|
* list of input streams. */
|
||||||
* list of input streams.
|
|
||||||
*/
|
|
||||||
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
30
avplay.c
30
avplay.c
@ -99,9 +99,9 @@ typedef struct PacketQueue {
|
|||||||
#define SUBPICTURE_QUEUE_SIZE 4
|
#define SUBPICTURE_QUEUE_SIZE 4
|
||||||
|
|
||||||
typedef struct VideoPicture {
|
typedef struct VideoPicture {
|
||||||
double pts; ///< presentation time stamp for this picture
|
double pts; // presentation timestamp for this picture
|
||||||
double target_clock; ///< av_gettime() time at which this should be displayed ideally
|
double target_clock; // av_gettime() time at which this should be displayed ideally
|
||||||
int64_t pos; ///< byte position in file
|
int64_t pos; // byte position in file
|
||||||
SDL_Overlay *bmp;
|
SDL_Overlay *bmp;
|
||||||
int width, height; /* source height & width */
|
int width, height; /* source height & width */
|
||||||
int allocated;
|
int allocated;
|
||||||
@ -191,13 +191,13 @@ typedef struct VideoState {
|
|||||||
double frame_timer;
|
double frame_timer;
|
||||||
double frame_last_pts;
|
double frame_last_pts;
|
||||||
double frame_last_delay;
|
double frame_last_delay;
|
||||||
double video_clock; ///< pts of last decoded frame / predicted pts of next decoded frame
|
double video_clock; // pts of last decoded frame / predicted pts of next decoded frame
|
||||||
int video_stream;
|
int video_stream;
|
||||||
AVStream *video_st;
|
AVStream *video_st;
|
||||||
PacketQueue videoq;
|
PacketQueue videoq;
|
||||||
double video_current_pts; ///< current displayed pts (different from video_clock if frame fifos are used)
|
double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
|
||||||
double video_current_pts_drift; ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
|
double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
|
||||||
int64_t video_current_pos; ///< current displayed file pos
|
int64_t video_current_pos; // current displayed file pos
|
||||||
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
|
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
|
||||||
int pictq_size, pictq_rindex, pictq_windex;
|
int pictq_size, pictq_rindex, pictq_windex;
|
||||||
SDL_mutex *pictq_mutex;
|
SDL_mutex *pictq_mutex;
|
||||||
@ -213,8 +213,8 @@ typedef struct VideoState {
|
|||||||
PtsCorrectionContext pts_ctx;
|
PtsCorrectionContext pts_ctx;
|
||||||
|
|
||||||
#if CONFIG_AVFILTER
|
#if CONFIG_AVFILTER
|
||||||
AVFilterContext *in_video_filter; ///< the first filter in the video chain
|
AVFilterContext *in_video_filter; // the first filter in the video chain
|
||||||
AVFilterContext *out_video_filter; ///< the last filter in the video chain
|
AVFilterContext *out_video_filter; // the last filter in the video chain
|
||||||
int use_dr1;
|
int use_dr1;
|
||||||
FrameBuffer *buffer_pool;
|
FrameBuffer *buffer_pool;
|
||||||
#endif
|
#endif
|
||||||
@ -1306,10 +1306,8 @@ static void alloc_picture(void *opaque)
|
|||||||
SDL_UnlockMutex(is->pictq_mutex);
|
SDL_UnlockMutex(is->pictq_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* The 'pts' parameter is the dts of the packet / pts of the frame and
|
||||||
*
|
* guessed if not known. */
|
||||||
* @param pts the dts of the pkt / pts of the frame and guessed if not known
|
|
||||||
*/
|
|
||||||
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
|
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
|
||||||
{
|
{
|
||||||
VideoPicture *vp;
|
VideoPicture *vp;
|
||||||
@ -1427,10 +1425,8 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* Compute the exact PTS for the picture if it is omitted in the stream.
|
||||||
* compute the exact PTS for the picture if it is omitted in the stream
|
* The 'pts1' parameter is the dts of the packet / pts of the frame. */
|
||||||
* @param pts1 the dts of the pkt / pts of the frame
|
|
||||||
*/
|
|
||||||
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
|
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
|
||||||
{
|
{
|
||||||
double frame_delay, pts;
|
double frame_delay, pts;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user