24 #include <mfx/mfxvideo.h>
72 #define QSV_HAVE_SCALING_CONFIG QSV_VERSION_ATLEAST(1, 19)
94 #if QSV_HAVE_SCALING_CONFIG
95 mfxExtVPPScaling scale_conf;
125 if (!strcmp(
s->format_str,
"same")) {
143 MFXClose(
s->session);
149 s->nb_mem_ids_in = 0;
150 s->nb_mem_ids_out = 0;
154 s->nb_surface_ptrs_in = 0;
155 s->nb_surface_ptrs_out = 0;
173 int out_width,
int out_height)
187 if (!
ctx->inputs[0]->hw_frames_ctx) {
192 in_frames_hwctx = in_frames_ctx->
hwctx;
201 out_frames_hwctx = out_frames_ctx->
hwctx;
220 mfxFrameInfo *info = &out_frames_hwctx->
surfaces[
i].Info;
221 info->CropW = out_width;
222 info->CropH = out_height;
228 static mfxStatus
frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
229 mfxFrameAllocResponse *resp)
234 if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
235 !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
236 !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
237 return MFX_ERR_UNSUPPORTED;
239 if (req->Type & MFX_MEMTYPE_FROM_VPPIN) {
240 resp->mids =
s->mem_ids_in;
241 resp->NumFrameActual =
s->nb_mem_ids_in;
243 resp->mids =
s->mem_ids_out;
244 resp->NumFrameActual =
s->nb_mem_ids_out;
250 static mfxStatus
frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
255 static mfxStatus
frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
257 return MFX_ERR_UNSUPPORTED;
260 static mfxStatus
frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
262 return MFX_ERR_UNSUPPORTED;
272 MFX_HANDLE_VA_DISPLAY,
273 MFX_HANDLE_D3D9_DEVICE_MANAGER,
274 MFX_HANDLE_D3D11_DEVICE,
287 int opaque = !!(in_frames_hwctx->
frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
289 mfxHDL handle =
NULL;
300 err = MFXQueryIMPL(device_hwctx->
session, &impl);
301 if (err == MFX_ERR_NONE)
302 err = MFXQueryVersion(device_hwctx->
session, &ver);
303 if (err != MFX_ERR_NONE) {
310 if (err == MFX_ERR_NONE) {
325 err = MFXInit(impl, &ver, &
s->session);
326 if (err != MFX_ERR_NONE) {
332 err = MFXVideoCORE_SetHandle(
s->session,
handle_type, handle);
333 if (err != MFX_ERR_NONE)
338 err = MFXJoinSession(device_hwctx->
session,
s->session);
339 if (err != MFX_ERR_NONE)
343 memset(&par, 0,
sizeof(par));
347 sizeof(*
s->surface_ptrs_in));
348 if (!
s->surface_ptrs_in)
351 s->surface_ptrs_in[
i] = in_frames_hwctx->
surfaces +
i;
355 sizeof(*
s->surface_ptrs_out));
356 if (!
s->surface_ptrs_out)
359 s->surface_ptrs_out[
i] = out_frames_hwctx->
surfaces +
i;
362 s->opaque_alloc.In.Surfaces =
s->surface_ptrs_in;
363 s->opaque_alloc.In.NumSurface =
s->nb_surface_ptrs_in;
364 s->opaque_alloc.In.Type = in_frames_hwctx->
frame_type;
366 s->opaque_alloc.Out.Surfaces =
s->surface_ptrs_out;
367 s->opaque_alloc.Out.NumSurface =
s->nb_surface_ptrs_out;
368 s->opaque_alloc.Out.Type = out_frames_hwctx->
frame_type;
370 s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
371 s->opaque_alloc.Header.BufferSz =
sizeof(
s->opaque_alloc);
373 s->ext_buffers[
s->num_ext_buf++] = (mfxExtBuffer*)&
s->opaque_alloc;
375 par.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
377 mfxFrameAllocator frame_allocator = {
387 sizeof(*
s->mem_ids_in));
391 s->mem_ids_in[
i] = in_frames_hwctx->
surfaces[
i].Data.MemId;
395 sizeof(*
s->mem_ids_out));
399 s->mem_ids_out[
i] = out_frames_hwctx->
surfaces[
i].Data.MemId;
402 err = MFXVideoCORE_SetFrameAllocator(
s->session, &frame_allocator);
403 if (err != MFX_ERR_NONE)
406 par.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;
409 #if QSV_HAVE_SCALING_CONFIG
410 memset(&
s->scale_conf, 0,
sizeof(mfxExtVPPScaling));
411 s->scale_conf.Header.BufferId = MFX_EXTBUFF_VPP_SCALING;
412 s->scale_conf.Header.BufferSz =
sizeof(mfxExtVPPScaling);
413 s->scale_conf.ScalingMode =
s->mode;
414 s->ext_buffers[
s->num_ext_buf++] = (mfxExtBuffer*)&
s->scale_conf;
418 par.ExtParam =
s->ext_buffers;
419 par.NumExtParam =
s->num_ext_buf;
423 par.vpp.In = in_frames_hwctx->
surfaces[0].Info;
424 par.vpp.Out = out_frames_hwctx->
surfaces[0].Info;
430 par.vpp.In.FrameRateExtN = 25;
431 par.vpp.In.FrameRateExtD = 1;
432 par.vpp.Out.FrameRateExtN = 25;
433 par.vpp.Out.FrameRateExtD = 1;
439 err = MFXVideoVPP_Init(
s->session, &par);
442 "Error opening the VPP for scaling");
445 "Warning in VPP initialization");
453 int out_width,
int out_height)
476 double var_values[
VARS_NB], res;
487 var_values[
VAR_A] = (double) inlink->
w / inlink->
h;
513 if (
w < -1 ||
h < -1) {
517 if (
w == -1 &&
h == -1)
529 if (
w > INT_MAX ||
h > INT_MAX ||
530 (
h * inlink->
w) > INT_MAX ||
531 (
w * inlink->
h) > INT_MAX)
542 inlink->
w, inlink->
h, outlink->
w, outlink->
h);
546 outlink->
w*inlink->
h},
555 "Error when evaluating the expression '%s'\n", expr);
565 mfxSyncPoint sync =
NULL;
578 err = MFXVideoVPP_RunFrameVPPAsync(
s->session,
579 (mfxFrameSurface1*)
in->data[3],
580 (mfxFrameSurface1*)
out->data[3],
582 if (err == MFX_WRN_DEVICE_BUSY)
584 }
while (err == MFX_WRN_DEVICE_BUSY);
598 err = MFXVideoCORE_SyncOperation(
s->session, sync, 1000);
599 }
while (err == MFX_WRN_IN_EXECUTION);
609 out->width = outlink->
w;
610 out->height = outlink->
h;
612 av_reduce(&
out->sample_aspect_ratio.num, &
out->sample_aspect_ratio.den,
613 (int64_t)
in->sample_aspect_ratio.num * outlink->
h * link->
w,
614 (int64_t)
in->sample_aspect_ratio.den * outlink->
w * link->
h,
625 #define OFFSET(x) offsetof(QSVScaleContext, x)
626 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
632 #if QSV_HAVE_SCALING_CONFIG
633 {
"mode",
"set scaling mode",
OFFSET(
mode),
AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT}, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY,
FLAGS,
"mode"},
634 {
"low_power",
"low power mode", 0,
AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX,
FLAGS,
"mode"},
635 {
"hq",
"high quality mode", 0,
AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_QUALITY}, INT_MIN, INT_MAX,
FLAGS,
"mode"},
static const AVFilterPad inputs[]
static const AVFilterPad outputs[]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link, int default_pool_size)
Perform any additional setup required for hardware frames.
Main libavfilter public API header.
common internal and external API header
mode
Use these values in ebur128_init (or'ed).
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
simple arithmetic expression evaluator
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define AV_LOG_VERBOSE
Detailed information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const char * av_default_item_name(void *ptr)
Return the context name.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
#define LIBAVUTIL_VERSION_INT
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
mfxHandleType handle_type
An API-specific header for AV_HWDEVICE_TYPE_QSV.
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static enum AVPixelFormat pix_fmts[]
enum AVPixelFormat av_get_pix_fmt(const char *name)
Return the pixel format corresponding to name.
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
int ff_qsvvpp_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
int ff_qsvvpp_print_error(void *log_ctx, mfxStatus err, const char *error_string)
int ff_qsvvpp_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Intel Quick Sync Video VPP base function.
#define FF_ARRAY_ELEMS(a)
uint8_t * data
The data buffer.
Describe the class of an AVClass context structure.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
AVFilterLink ** inputs
array of pointers to input links
A link between two filters.
int w
agreed upon image width
int h
agreed upon image height
AVFilterContext * src
source filter
AVBufferRef * hw_frames_ctx
For hwaccel pixel formats, this should be a reference to the AVHWFramesContext describing the frames.
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
AVFilterContext * dst
dest filter
A filter pad used for either input or output.
const char * name
Pad name.
const char * name
Filter name.
This structure describes decoded (raw) audio or video data.
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
This struct describes a set or pool of "hardware" frames (i.e.
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
int initial_pool_size
Initial size of the frame pool.
int width
The allocated dimensions of the frames in this pool.
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
This struct is allocated as AVHWDeviceContext.hwctx.
This struct is allocated as AVHWFramesContext.hwctx.
mfxFrameSurface1 * surfaces
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Rational number (pair of numerator and denominator).
char * h_expr
height expression string
enum AVPixelFormat format
Output sw format.
mfxFrameSurface1 ** surface_ptrs_in
mfxExtBuffer * ext_buffers[1+QSV_HAVE_SCALING_CONFIG]
mfxFrameSurface1 ** surface_ptrs_out
mfxExtOpaqueSurfaceAlloc opaque_alloc
char * w_expr
width expression string
int av_usleep(unsigned usec)
Sleep for a period of time.
static const AVFilterPad qsvscale_outputs[]
static int init_out_session(AVFilterContext *ctx)
static const AVOption options[]
static const AVFilterPad qsvscale_inputs[]
static av_cold int qsvscale_init(AVFilterContext *ctx)
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req, mfxFrameAllocResponse *resp)
static int init_scale_session(AVFilterContext *ctx, int in_width, int in_height, int out_width, int out_height)
static int qsvscale_query_formats(AVFilterContext *ctx)
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
static int qsvscale_config_props(AVFilterLink *outlink)
#define QSV_HAVE_SCALING_CONFIG
static const mfxHandleType handle_types[]
static const char *const var_names[]
static const AVClass qsvscale_class
static int init_out_pool(AVFilterContext *ctx, int out_width, int out_height)
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
static int qsvscale_filter_frame(AVFilterLink *link, AVFrame *in)
static av_cold void qsvscale_uninit(AVFilterContext *ctx)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.