33 #define MAX_REFERENCES 8
42 VAProcFilterCapDeinterlacing
56 #define D(name) case VAProcDeinterlacing ## name: return #name
72 for (
i = 0;
i <
ctx->queue_count;
i++)
84 VAProcFilterParameterBufferDeinterlacing params;
87 ctx->nb_deint_caps = VAProcDeinterlacingCount;
88 vas = vaQueryVideoProcFilterCaps(vpp_ctx->
hwctx->
display,
90 VAProcFilterDeinterlacing,
93 if (vas != VA_STATUS_SUCCESS) {
95 "caps: %d (%s).\n", vas, vaErrorStr(vas));
99 if (
ctx->mode == VAProcDeinterlacingNone) {
100 for (
i = 0;
i <
ctx->nb_deint_caps;
i++) {
101 if (
ctx->deint_caps[
i].type >
ctx->mode)
102 ctx->mode =
ctx->deint_caps[
i].type;
105 "deinterlacing mode.\n",
ctx->mode,
108 for (
i = 0;
i <
ctx->nb_deint_caps;
i++) {
109 if (
ctx->deint_caps[
i].type ==
ctx->mode)
112 if (
i >=
ctx->nb_deint_caps) {
114 "not supported.\n",
ctx->mode,
120 params.type = VAProcFilterDeinterlacing;
121 params.algorithm =
ctx->mode;
125 VAProcFilterParameterBufferType,
132 vas = vaQueryVideoProcPipelineCaps(vpp_ctx->
hwctx->
display,
135 &
ctx->pipeline_caps);
136 if (vas != VA_STATUS_SUCCESS) {
138 "caps: %d (%s).\n", vas, vaErrorStr(vas));
142 ctx->extra_delay_for_timestamps =
ctx->field_rate == 2 &&
143 ctx->pipeline_caps.num_backward_references == 0;
145 ctx->queue_depth =
ctx->pipeline_caps.num_backward_references +
146 ctx->pipeline_caps.num_forward_references +
147 ctx->extra_delay_for_timestamps + 1;
150 "references (%u forward, %u back).\n",
151 ctx->pipeline_caps.num_forward_references,
152 ctx->pipeline_caps.num_backward_references);
184 VASurfaceID input_surface;
187 VAProcPipelineParameterBuffer params;
188 VAProcFilterParameterBufferDeinterlacing *filter_params;
190 void *filter_params_addr =
NULL;
191 int err,
i, field, current_frame_index;
197 if (
ctx->queue_count <
ctx->queue_depth) {
198 ctx->frame_queue[
ctx->queue_count++] = input_frame;
199 if (
ctx->queue_count <
ctx->queue_depth) {
205 for (
i = 0;
i + 1 <
ctx->queue_count;
i++)
206 ctx->frame_queue[
i] =
ctx->frame_queue[
i + 1];
207 ctx->frame_queue[
i] = input_frame;
210 current_frame_index =
ctx->pipeline_caps.num_forward_references;
212 input_frame =
ctx->frame_queue[current_frame_index];
213 input_surface = (VASurfaceID)(uintptr_t)input_frame->
data[3];
214 for (
i = 0;
i <
ctx->pipeline_caps.num_forward_references;
i++)
215 forward_references[
i] = (VASurfaceID)(uintptr_t)
216 ctx->frame_queue[current_frame_index -
i - 1]->data[3];
217 for (
i = 0;
i <
ctx->pipeline_caps.num_backward_references;
i++)
218 backward_references[
i] = (VASurfaceID)(uintptr_t)
219 ctx->frame_queue[current_frame_index +
i + 1]->data[3];
222 "deinterlace input.\n", input_surface);
224 for (
i = 0;
i <
ctx->pipeline_caps.num_backward_references;
i++)
228 for (
i = 0;
i <
ctx->pipeline_caps.num_forward_references;
i++)
232 for (field = 0; field <
ctx->field_rate; field++) {
251 &filter_params_addr);
252 if (vas != VA_STATUS_SUCCESS) {
254 "buffer: %d (%s).\n", vas, vaErrorStr(vas));
258 filter_params = filter_params_addr;
259 filter_params->flags = 0;
261 filter_params->flags |= field ? VA_DEINTERLACING_BOTTOM_FIELD : 0;
263 filter_params->flags |= VA_DEINTERLACING_BOTTOM_FIELD_FIRST;
264 filter_params->flags |= field ? 0 : VA_DEINTERLACING_BOTTOM_FIELD;
266 filter_params_addr =
NULL;
268 if (vas != VA_STATUS_SUCCESS)
270 "buffer: %d (%s).\n", vas, vaErrorStr(vas));
273 params.num_filters = 1;
275 params.forward_references = forward_references;
276 params.num_forward_references =
277 ctx->pipeline_caps.num_forward_references;
278 params.backward_references = backward_references;
279 params.num_backward_references =
280 ctx->pipeline_caps.num_backward_references;
283 params.filters =
NULL;
284 params.num_filters = 0;
291 if (
ctx->field_rate == 2) {
296 ctx->frame_queue[current_frame_index + 1]->pts;
312 if (filter_params_addr)
330 #define OFFSET(x) offsetof(DeintVAAPIContext, x)
331 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
333 {
"mode",
"Deinterlacing mode",
335 VAProcDeinterlacingNone, VAProcDeinterlacingCount - 1,
FLAGS,
"mode" },
336 {
"default",
"Use the highest-numbered (and therefore possibly most advanced) deinterlacing algorithm",
338 {
"bob",
"Use the bob deinterlacing algorithm",
340 {
"weave",
"Use the weave deinterlacing algorithm",
342 {
"motion_adaptive",
"Use the motion adaptive deinterlacing algorithm",
344 {
"motion_compensated",
"Use the motion compensated deinterlacing algorithm",
347 {
"rate",
"Generate output at frame rate or field rate",
349 {
"frame",
"Output at frame rate (one frame of output for each field-pair)",
351 {
"field",
"Output at field rate (one frame of output for each field)",
354 {
"auto",
"Only deinterlace fields, passing frames through unchanged",
387 .
name =
"deinterlace_vaapi",
static int query_formats(AVFilterContext *ctx)
static const AVFilterPad inputs[]
static const AVFilterPad outputs[]
simple assert() macros that are a bit more flexible than ISO C assert().
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Main libavfilter public API header.
static av_cold int init(AVCodecContext *avctx)
common internal and external API header
static av_cold int uninit(AVCodecContext *avctx)
mode
Use these values in ebur128_init (or'ed).
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_VERBOSE
Detailed information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const char * av_default_item_name(void *ptr)
Return the context name.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
#define LIBAVUTIL_VERSION_INT
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Memory handling functions.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Describe the class of an AVClass context structure.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
AVFilterLink ** inputs
array of pointers to input links
void * priv
private data for use by the filter
AVFilterLink ** outputs
array of pointers to output links
A link between two filters.
AVFilterContext * src
source filter
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
AVFilterContext * dst
dest filter
A filter pad used for either input or output.
const char * name
Pad name.
const char * name
Filter name.
int flags
Flags modifying the (de)muxer behaviour.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int top_field_first
If the content is interlaced, is top field displayed first.
int interlaced_frame
The content of the picture is interlaced.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Rational number (pair of numerator and denominator).
VADisplay display
The VADisplay handle, to be filled by the user.
AVFrame * frame_queue[MAX_REFERENCES]
VAProcFilterCapDeinterlacing deint_caps[VAProcDeinterlacingCount]
VAProcPipelineCaps pipeline_caps
int extra_delay_for_timestamps
VABufferID filter_buffers[VAProcFilterCount]
void(* pipeline_uninit)(AVFilterContext *avctx)
enum AVPixelFormat output_format
int(* build_filter_params)(AVFilterContext *avctx)
AVVAAPIDeviceContext * hwctx
int ff_vaapi_vpp_config_output(AVFilterLink *outlink)
int ff_vaapi_vpp_config_input(AVFilterLink *inlink)
void ff_vaapi_vpp_pipeline_uninit(AVFilterContext *avctx)
void ff_vaapi_vpp_ctx_init(AVFilterContext *avctx)
int ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx, int type, const void *data, size_t size, int count)
int ff_vaapi_vpp_init_params(AVFilterContext *avctx, VAProcPipelineParameterBuffer *params, const AVFrame *input_frame, AVFrame *output_frame)
int ff_vaapi_vpp_render_picture(AVFilterContext *avctx, VAProcPipelineParameterBuffer *params, AVFrame *output_frame)
void ff_vaapi_vpp_ctx_uninit(AVFilterContext *avctx)
int ff_vaapi_vpp_query_formats(AVFilterContext *avctx)
static const AVClass deint_vaapi_class
static av_cold int deint_vaapi_init(AVFilterContext *avctx)
AVFilter ff_vf_deinterlace_vaapi
static int deint_vaapi_config_output(AVFilterLink *outlink)
static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
static const AVOption deint_vaapi_options[]
static void deint_vaapi_pipeline_uninit(AVFilterContext *avctx)
static int deint_vaapi_build_filter_params(AVFilterContext *avctx)
static const AVFilterPad deint_vaapi_outputs[]
static const char * deint_vaapi_mode_name(int mode)
static const AVFilterPad deint_vaapi_inputs[]
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.