39 #define VP9_SYNCCODE 0x498342
67 for (
i = 0;
i < n;
i++)
76 static void vp9_report_tile_progress(
VP9Context *
s,
int field,
int n) {
83 static void vp9_await_tile_progress(
VP9Context *
s,
int field,
int n) {
109 f->segmentation_map =
NULL;
110 f->hwaccel_picture_private =
NULL;
122 sz = 64 *
s->sb_cols *
s->sb_rows;
123 if (sz !=
s->frame_extradata_pool_size) {
126 if (!
s->frame_extradata_pool) {
127 s->frame_extradata_pool_size = 0;
130 s->frame_extradata_pool_size = sz;
136 memset(
f->extradata->data, 0,
f->extradata->size);
138 f->segmentation_map =
f->extradata->data;
146 if (!
f->hwaccel_priv_buf)
148 f->hwaccel_picture_private =
f->hwaccel_priv_buf->data;
175 if (
src->hwaccel_picture_private) {
191 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
192 CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
193 CONFIG_VP9_NVDEC_HWACCEL + \
194 CONFIG_VP9_VAAPI_HWACCEL + \
195 CONFIG_VP9_VDPAU_HWACCEL)
199 int bytesperpixel =
s->bytesperpixel, ret, cols, rows;
205 if (!(
s->pix_fmt ==
s->gf_fmt &&
w ==
s->w &&
h ==
s->h)) {
210 switch (
s->pix_fmt) {
213 #if CONFIG_VP9_DXVA2_HWACCEL
216 #if CONFIG_VP9_D3D11VA_HWACCEL
220 #if CONFIG_VP9_NVDEC_HWACCEL
223 #if CONFIG_VP9_VAAPI_HWACCEL
226 #if CONFIG_VP9_VDPAU_HWACCEL
231 #if CONFIG_VP9_NVDEC_HWACCEL
234 #if CONFIG_VP9_VAAPI_HWACCEL
237 #if CONFIG_VP9_VDPAU_HWACCEL
243 *fmtp++ =
s->pix_fmt;
253 s->gf_fmt =
s->pix_fmt;
261 if (
s->intra_pred_data[0] && cols ==
s->cols && rows ==
s->rows &&
s->pix_fmt ==
s->last_fmt)
264 s->last_fmt =
s->pix_fmt;
265 s->sb_cols = (
w + 63) >> 6;
266 s->sb_rows = (
h + 63) >> 6;
267 s->cols = (
w + 7) >> 3;
268 s->rows = (
h + 7) >> 3;
271 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
275 p =
av_malloc(
s->sb_cols * (128 + 192 * bytesperpixel +
276 lflvl_len *
sizeof(*
s->lflvl) + 16 *
sizeof(*
s->above_mv_ctx)));
299 for (
i = 0;
i <
s->active_tile_cols;
i++)
303 if (
s->s.h.bpp !=
s->last_bpp) {
306 s->last_bpp =
s->s.h.bpp;
317 int chroma_blocks, chroma_eobs, bytesperpixel =
s->bytesperpixel;
320 if (
td->b_base &&
td->block_base &&
s->block_alloc_using_2pass ==
s->s.frames[
CUR_FRAME].uses_2pass)
324 chroma_blocks = 64 * 64 >> (
s->ss_h +
s->ss_v);
325 chroma_eobs = 16 * 16 >> (
s->ss_h +
s->ss_v);
327 int sbs =
s->sb_cols *
s->sb_rows;
330 td->block_base =
av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel *
sizeof(int16_t) +
331 16 * 16 + 2 * chroma_eobs) * sbs);
332 if (!
td->b_base || !
td->block_base)
334 td->uvblock_base[0] =
td->block_base + sbs * 64 * 64 * bytesperpixel;
335 td->uvblock_base[1] =
td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
336 td->eob_base = (
uint8_t *) (
td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
337 td->uveob_base[0] =
td->eob_base + 16 * 16 * sbs;
338 td->uveob_base[1] =
td->uveob_base[0] + chroma_eobs * sbs;
342 if (!
td->block_structure)
346 for (
i = 1;
i <
s->active_tile_cols;
i++)
349 for (
i = 0;
i <
s->active_tile_cols;
i++) {
351 s->td[
i].block_base =
av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel *
sizeof(int16_t) +
352 16 * 16 + 2 * chroma_eobs);
353 if (!
s->td[
i].b_base || !
s->td[
i].block_base)
355 s->td[
i].uvblock_base[0] =
s->td[
i].block_base + 64 * 64 * bytesperpixel;
356 s->td[
i].uvblock_base[1] =
s->td[
i].uvblock_base[0] + chroma_blocks * bytesperpixel;
357 s->td[
i].eob_base = (
uint8_t *) (
s->td[
i].uvblock_base[1] + chroma_blocks * bytesperpixel);
358 s->td[
i].uveob_base[0] =
s->td[
i].eob_base + 16 * 16;
359 s->td[
i].uveob_base[1] =
s->td[
i].uveob_base[0] + chroma_eobs;
363 if (!
s->td[
i].block_structure)
368 s->block_alloc_using_2pass =
s->s.frames[
CUR_FRAME].uses_2pass;
385 return m - ((v + 1) >> 1);
392 static const uint8_t inv_map_table[255] = {
393 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
394 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
395 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
396 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
397 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
398 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
399 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
400 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
401 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
402 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
403 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
404 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
405 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
406 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
407 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
408 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
409 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
410 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
458 s->s.h.bpp = 8 +
bits * 2;
459 s->bytesperpixel = (7 +
s->s.h.bpp) >> 3;
465 s->ss_h =
s->ss_v = 0;
479 static const enum AVPixelFormat pix_fmt_for_ss[3][2 ][2 ] = {
491 s->pix_fmt = pix_fmt_for_ss[
bits][
s->ss_v][
s->ss_h];
502 s->ss_h =
s->ss_v = 1;
503 s->pix_fmt = pix_fmt_for_ss[
bits][1][1];
514 int c,
i, j, k, l, m, n,
w,
h,
max, size2, ret, sharp;
541 s->last_keyframe =
s->s.h.keyframe;
544 last_invisible =
s->s.h.invisible;
547 s->s.h.use_last_frame_mvs = !
s->s.h.errorres && !last_invisible;
549 if (
s->s.h.keyframe) {
557 s->s.h.refreshrefmask = 0xff;
563 s->s.h.intraonly =
s->s.h.invisible ?
get_bits1(&
s->gb) : 0;
564 s->s.h.resetctx =
s->s.h.errorres ? 0 :
get_bits(&
s->gb, 2);
565 if (
s->s.h.intraonly) {
574 s->ss_h =
s->ss_v = 1;
577 s->bytesperpixel = 1;
590 s->s.h.signbias[0] =
get_bits1(&
s->gb) && !
s->s.h.errorres;
592 s->s.h.signbias[1] =
get_bits1(&
s->gb) && !
s->s.h.errorres;
594 s->s.h.signbias[2] =
get_bits1(&
s->gb) && !
s->s.h.errorres;
595 if (!
s->s.refs[
s->s.h.refidx[0]].f->buf[0] ||
596 !
s->s.refs[
s->s.h.refidx[1]].f->buf[0] ||
597 !
s->s.refs[
s->s.h.refidx[2]].f->buf[0]) {
602 w =
s->s.refs[
s->s.h.refidx[0]].f->width;
603 h =
s->s.refs[
s->s.h.refidx[0]].f->height;
605 w =
s->s.refs[
s->s.h.refidx[1]].f->width;
606 h =
s->s.refs[
s->s.h.refidx[1]].f->height;
608 w =
s->s.refs[
s->s.h.refidx[2]].f->width;
609 h =
s->s.refs[
s->s.h.refidx[2]].f->height;
617 s->s.h.use_last_frame_mvs &=
s->s.frames[
CUR_FRAME].tf.f->width ==
w &&
624 s->s.h.allowcompinter =
s->s.h.signbias[0] !=
s->s.h.signbias[1] ||
625 s->s.h.signbias[0] !=
s->s.h.signbias[2];
626 if (
s->s.h.allowcompinter) {
627 if (
s->s.h.signbias[0] ==
s->s.h.signbias[1]) {
628 s->s.h.fixcompref = 2;
629 s->s.h.varcompref[0] = 0;
630 s->s.h.varcompref[1] = 1;
631 }
else if (
s->s.h.signbias[0] ==
s->s.h.signbias[2]) {
632 s->s.h.fixcompref = 1;
633 s->s.h.varcompref[0] = 0;
634 s->s.h.varcompref[1] = 2;
636 s->s.h.fixcompref = 0;
637 s->s.h.varcompref[0] = 1;
638 s->s.h.varcompref[1] = 2;
643 s->s.h.refreshctx =
s->s.h.errorres ? 0 :
get_bits1(&
s->gb);
644 s->s.h.parallelmode =
s->s.h.errorres ? 1 :
get_bits1(&
s->gb);
646 if (
s->s.h.keyframe ||
s->s.h.intraonly)
647 s->s.h.framectxid = 0;
650 if (
s->s.h.keyframe ||
s->s.h.errorres ||
s->s.h.intraonly) {
652 s->s.h.lf_delta.ref[0] = 1;
653 s->s.h.lf_delta.ref[1] = 0;
654 s->s.h.lf_delta.ref[2] = -1;
655 s->s.h.lf_delta.ref[3] = -1;
656 s->s.h.lf_delta.mode[0] = 0;
657 s->s.h.lf_delta.mode[1] = 0;
658 memset(
s->s.h.segmentation.feat, 0,
sizeof(
s->s.h.segmentation.feat));
664 if (
s->s.h.filter.sharpness != sharp) {
665 for (
i = 1;
i <= 63;
i++) {
669 limit >>= (sharp + 3) >> 2;
670 limit =
FFMIN(limit, 9 - sharp);
672 limit =
FFMAX(limit, 1);
674 s->filter_lut.lim_lut[
i] = limit;
675 s->filter_lut.mblim_lut[
i] = 2 * (
i + 2) + limit;
678 s->s.h.filter.sharpness = sharp;
679 if ((
s->s.h.lf_delta.enabled =
get_bits1(&
s->gb))) {
680 if ((
s->s.h.lf_delta.updated =
get_bits1(&
s->gb))) {
681 for (
i = 0;
i < 4;
i++)
684 for (
i = 0;
i < 2;
i++)
695 s->s.h.lossless =
s->s.h.yac_qi == 0 &&
s->s.h.ydc_qdelta == 0 &&
696 s->s.h.uvdc_qdelta == 0 &&
s->s.h.uvac_qdelta == 0;
701 if ((
s->s.h.segmentation.enabled =
get_bits1(&
s->gb))) {
702 if ((
s->s.h.segmentation.update_map =
get_bits1(&
s->gb))) {
703 for (
i = 0;
i < 7;
i++)
706 if ((
s->s.h.segmentation.temporal =
get_bits1(&
s->gb)))
707 for (
i = 0;
i < 3;
i++)
713 s->s.h.segmentation.absolute_vals =
get_bits1(&
s->gb);
714 for (
i = 0;
i < 8;
i++) {
715 if ((
s->s.h.segmentation.feat[
i].q_enabled =
get_bits1(&
s->gb)))
717 if ((
s->s.h.segmentation.feat[
i].lf_enabled =
get_bits1(&
s->gb)))
719 if ((
s->s.h.segmentation.feat[
i].ref_enabled =
get_bits1(&
s->gb)))
720 s->s.h.segmentation.feat[
i].ref_val =
get_bits(&
s->gb, 2);
721 s->s.h.segmentation.feat[
i].skip_enabled =
get_bits1(&
s->gb);
728 s->s.h.segmentation.temporal = 0;
729 s->s.h.segmentation.update_map = 0;
733 for (
i = 0;
i < (
s->s.h.segmentation.enabled ? 8 : 1);
i++) {
734 int qyac, qydc, quvac, quvdc, lflvl, sh;
736 if (
s->s.h.segmentation.enabled &&
s->s.h.segmentation.feat[
i].q_enabled) {
737 if (
s->s.h.segmentation.absolute_vals)
742 qyac =
s->s.h.yac_qi;
754 sh =
s->s.h.filter.level >= 32;
755 if (
s->s.h.segmentation.enabled &&
s->s.h.segmentation.feat[
i].lf_enabled) {
756 if (
s->s.h.segmentation.absolute_vals)
759 lflvl =
av_clip_uintp2(
s->s.h.filter.level +
s->s.h.segmentation.feat[
i].lf_val, 6);
761 lflvl =
s->s.h.filter.level;
763 if (
s->s.h.lf_delta.enabled) {
764 s->s.h.segmentation.feat[
i].lflvl[0][0] =
765 s->s.h.segmentation.feat[
i].lflvl[0][1] =
767 for (j = 1; j < 4; j++) {
768 s->s.h.segmentation.feat[
i].lflvl[j][0] =
770 s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
771 s->s.h.segmentation.feat[
i].lflvl[j][1] =
773 s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
776 memset(
s->s.h.segmentation.feat[
i].lflvl, lflvl,
777 sizeof(
s->s.h.segmentation.feat[
i].lflvl));
787 for (
s->s.h.tiling.log2_tile_cols = 0;
788 s->sb_cols > (64 <<
s->s.h.tiling.log2_tile_cols);
789 s->s.h.tiling.log2_tile_cols++) ;
790 for (
max = 0; (
s->sb_cols >>
max) >= 4;
max++) ;
792 while (
max >
s->s.h.tiling.log2_tile_cols) {
794 s->s.h.tiling.log2_tile_cols++;
799 s->s.h.tiling.tile_rows = 1 <<
s->s.h.tiling.log2_tile_rows;
800 if (
s->s.h.tiling.tile_cols != (1 <<
s->s.h.tiling.log2_tile_cols) || changed) {
805 for (
i = 0;
i <
s->active_tile_cols;
i++)
810 s->s.h.tiling.tile_cols = 1 <<
s->s.h.tiling.log2_tile_cols;
813 s->s.h.tiling.tile_cols : 1;
818 n_range_coders =
s->s.h.tiling.tile_cols;
825 for (
i = 0;
i <
s->active_tile_cols;
i++) {
828 rc += n_range_coders;
833 if (!
s->s.h.keyframe && !
s->s.h.intraonly) {
834 int valid_ref_frame = 0;
835 for (
i = 0;
i < 3;
i++) {
837 int refw =
ref->width, refh =
ref->height;
841 "Ref pixfmt (%s) did not match current frame (%s)",
845 }
else if (refw ==
w && refh ==
h) {
846 s->mvscale[
i][0] =
s->mvscale[
i][1] = 0;
850 if (
w * 2 < refw ||
h * 2 < refh ||
w > 16 * refw ||
h > 16 * refh) {
852 "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
857 s->mvscale[
i][0] = (refw << 14) /
w;
858 s->mvscale[
i][1] = (refh << 14) /
h;
859 s->mvstep[
i][0] = 16 *
s->mvscale[
i][0] >> 14;
860 s->mvstep[
i][1] = 16 *
s->mvscale[
i][1] >> 14;
864 if (!valid_ref_frame) {
865 av_log(avctx,
AV_LOG_ERROR,
"No valid reference frame is found, bitstream not supported\n");
870 if (
s->s.h.keyframe ||
s->s.h.errorres || (
s->s.h.intraonly &&
s->s.h.resetctx == 3)) {
871 s->prob_ctx[0].p =
s->prob_ctx[1].p =
s->prob_ctx[2].p =
881 }
else if (
s->s.h.intraonly &&
s->s.h.resetctx == 2) {
888 s->s.h.compressed_header_size = size2 =
get_bits(&
s->gb, 16);
892 if (size2 >
size - (data2 -
data)) {
905 for (
i = 0;
i <
s->active_tile_cols;
i++) {
906 if (
s->s.h.keyframe ||
s->s.h.intraonly) {
907 memset(
s->td[
i].counts.coef, 0,
sizeof(
s->td[0].counts.coef));
908 memset(
s->td[
i].counts.eob, 0,
sizeof(
s->td[0].counts.eob));
910 memset(&
s->td[
i].counts, 0,
sizeof(
s->td[0].counts));
912 s->td[
i].nb_block_structure = 0;
918 s->prob.p =
s->prob_ctx[
c].p;
921 if (
s->s.h.lossless) {
925 if (
s->s.h.txfmmode == 3)
929 for (
i = 0;
i < 2;
i++)
932 for (
i = 0;
i < 2;
i++)
933 for (j = 0; j < 2; j++)
935 s->prob.p.tx16p[
i][j] =
937 for (
i = 0;
i < 2;
i++)
938 for (j = 0; j < 3; j++)
940 s->prob.p.tx32p[
i][j] =
946 for (
i = 0;
i < 4;
i++) {
949 for (j = 0; j < 2; j++)
950 for (k = 0; k < 2; k++)
951 for (l = 0; l < 6; l++)
952 for (m = 0; m < 6; m++) {
953 uint8_t *p =
s->prob.coef[
i][j][k][l][m];
955 if (m >= 3 && l == 0)
957 for (n = 0; n < 3; n++) {
966 for (j = 0; j < 2; j++)
967 for (k = 0; k < 2; k++)
968 for (l = 0; l < 6; l++)
969 for (m = 0; m < 6; m++) {
970 uint8_t *p =
s->prob.coef[
i][j][k][l][m];
978 if (
s->s.h.txfmmode ==
i)
983 for (
i = 0;
i < 3;
i++)
986 if (!
s->s.h.keyframe && !
s->s.h.intraonly) {
987 for (
i = 0;
i < 7;
i++)
988 for (j = 0; j < 3; j++)
990 s->prob.p.mv_mode[
i][j] =
994 for (
i = 0;
i < 4;
i++)
995 for (j = 0; j < 2; j++)
997 s->prob.p.filter[
i][j] =
1000 for (
i = 0;
i < 4;
i++)
1004 if (
s->s.h.allowcompinter) {
1006 if (
s->s.h.comppredmode)
1009 for (
i = 0;
i < 5;
i++)
1018 for (
i = 0;
i < 5;
i++) {
1020 s->prob.p.single_ref[
i][0] =
1023 s->prob.p.single_ref[
i][1] =
1029 for (
i = 0;
i < 5;
i++)
1031 s->prob.p.comp_ref[
i] =
1035 for (
i = 0;
i < 4;
i++)
1036 for (j = 0; j < 9; j++)
1038 s->prob.p.y_mode[
i][j] =
1041 for (
i = 0;
i < 4;
i++)
1042 for (j = 0; j < 4; j++)
1043 for (k = 0; k < 3; k++)
1045 s->prob.p.partition[3 -
i][j][k] =
1047 s->prob.p.partition[3 -
i][j][k]);
1050 for (
i = 0;
i < 3;
i++)
1054 for (
i = 0;
i < 2;
i++) {
1056 s->prob.p.mv_comp[
i].sign =
1059 for (j = 0; j < 10; j++)
1061 s->prob.p.mv_comp[
i].classes[j] =
1065 s->prob.p.mv_comp[
i].class0 =
1068 for (j = 0; j < 10; j++)
1070 s->prob.p.mv_comp[
i].bits[j] =
1074 for (
i = 0;
i < 2;
i++) {
1075 for (j = 0; j < 2; j++)
1076 for (k = 0; k < 3; k++)
1078 s->prob.p.mv_comp[
i].class0_fp[j][k] =
1081 for (j = 0; j < 3; j++)
1083 s->prob.p.mv_comp[
i].fp[j] =
1087 if (
s->s.h.highprecisionmvs) {
1088 for (
i = 0;
i < 2;
i++) {
1090 s->prob.p.mv_comp[
i].class0_hp =
1094 s->prob.p.mv_comp[
i].hp =
1100 return (data2 -
data) + size2;
1104 ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl)
1107 int c = ((
s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1108 (((
td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1110 s->prob.p.partition[bl][
c];
1112 ptrdiff_t hbs = 4 >> bl;
1114 ptrdiff_t y_stride =
f->linesize[0], uv_stride =
f->linesize[1];
1115 int bytesperpixel =
s->bytesperpixel;
1120 }
else if (col + hbs < s->cols) {
1121 if (row + hbs < s->rows) {
1129 yoff += hbs * 8 * y_stride;
1130 uvoff += hbs * 8 * uv_stride >>
s->ss_v;
1135 yoff += hbs * 8 * bytesperpixel;
1136 uvoff += hbs * 8 * bytesperpixel >>
s->ss_h;
1140 decode_sb(
td, row, col, lflvl, yoff, uvoff, bl + 1);
1142 yoff + 8 * hbs * bytesperpixel,
1143 uvoff + (8 * hbs * bytesperpixel >>
s->ss_h), bl + 1);
1144 yoff += hbs * 8 * y_stride;
1145 uvoff += hbs * 8 * uv_stride >>
s->ss_v;
1146 decode_sb(
td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1148 yoff + 8 * hbs * bytesperpixel,
1149 uvoff + (8 * hbs * bytesperpixel >>
s->ss_h), bl + 1);
1156 decode_sb(
td, row, col, lflvl, yoff, uvoff, bl + 1);
1158 yoff + 8 * hbs * bytesperpixel,
1159 uvoff + (8 * hbs * bytesperpixel >>
s->ss_h), bl + 1);
1164 }
else if (row + hbs < s->rows) {
1167 decode_sb(
td, row, col, lflvl, yoff, uvoff, bl + 1);
1168 yoff += hbs * 8 * y_stride;
1169 uvoff += hbs * 8 * uv_stride >>
s->ss_v;
1170 decode_sb(
td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1177 decode_sb(
td, row, col, lflvl, yoff, uvoff, bl + 1);
1179 td->counts.partition[bl][
c][bp]++;
1183 ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl)
1187 ptrdiff_t hbs = 4 >> bl;
1189 ptrdiff_t y_stride =
f->linesize[0], uv_stride =
f->linesize[1];
1190 int bytesperpixel =
s->bytesperpixel;
1195 }
else if (
td->b->bl == bl) {
1198 yoff += hbs * 8 * y_stride;
1199 uvoff += hbs * 8 * uv_stride >>
s->ss_v;
1201 }
else if (
b->bp ==
PARTITION_V && col + hbs < s->cols) {
1202 yoff += hbs * 8 * bytesperpixel;
1203 uvoff += hbs * 8 * bytesperpixel >>
s->ss_h;
1208 if (col + hbs < s->cols) {
1209 if (row + hbs < s->rows) {
1210 decode_sb_mem(
td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1211 uvoff + (8 * hbs * bytesperpixel >>
s->ss_h), bl + 1);
1212 yoff += hbs * 8 * y_stride;
1213 uvoff += hbs * 8 * uv_stride >>
s->ss_v;
1216 yoff + 8 * hbs * bytesperpixel,
1217 uvoff + (8 * hbs * bytesperpixel >>
s->ss_h), bl + 1);
1219 yoff += hbs * 8 * bytesperpixel;
1220 uvoff += hbs * 8 * bytesperpixel >>
s->ss_h;
1223 }
else if (row + hbs < s->rows) {
1224 yoff += hbs * 8 * y_stride;
1225 uvoff += hbs * 8 * uv_stride >>
s->ss_v;
1233 int sb_start = ( idx * n) >> log2_n;
1234 int sb_end = ((idx + 1) * n) >> log2_n;
1235 *start =
FFMIN(sb_start, n) << 3;
1236 *end =
FFMIN(sb_end, n) << 3;
1244 for (
i = 0;
i <
s->active_tile_cols;
i++)
1253 for (
i = 0;
i < 3;
i++) {
1258 for (
i = 0;
i < 8;
i++) {
1276 int row, col, tile_row, tile_col, ret;
1278 int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1280 ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1283 ls_y =
f->linesize[0];
1284 ls_uv =
f->linesize[1];
1285 bytesperpixel =
s->bytesperpixel;
1288 for (tile_row = 0; tile_row <
s->s.h.tiling.tile_rows; tile_row++) {
1290 tile_row,
s->s.h.tiling.log2_tile_rows,
s->sb_rows);
1292 for (tile_col = 0; tile_col <
s->s.h.tiling.tile_cols; tile_col++) {
1295 if (tile_col ==
s->s.h.tiling.tile_cols - 1 &&
1296 tile_row ==
s->s.h.tiling.tile_rows - 1) {
1303 if (tile_size >
size) {
1318 for (row = tile_row_start; row < tile_row_end;
1319 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >>
s->ss_v) {
1321 ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1323 for (tile_col = 0; tile_col <
s->s.h.tiling.tile_cols; tile_col++) {
1325 tile_col,
s->s.h.tiling.log2_tile_cols,
s->sb_cols);
1326 td->tile_col_start = tile_col_start;
1328 memset(
td->left_partition_ctx, 0, 8);
1329 memset(
td->left_skip_ctx, 0, 8);
1330 if (
s->s.h.keyframe ||
s->s.h.intraonly) {
1335 memset(
td->left_y_nnz_ctx, 0, 16);
1336 memset(
td->left_uv_nnz_ctx, 0, 32);
1337 memset(
td->left_segpred_ctx, 0, 8);
1339 td->c = &
td->c_b[tile_col];
1342 for (col = tile_col_start;
1344 col += 8, yoff2 += 64 * bytesperpixel,
1345 uvoff2 += 64 * bytesperpixel >>
s->ss_h, lflvl_ptr++) {
1349 memset(lflvl_ptr->
mask, 0,
sizeof(lflvl_ptr->
mask));
1370 if (row + 8 <
s->rows) {
1371 memcpy(
s->intra_pred_data[0],
1372 f->data[0] + yoff + 63 * ls_y,
1373 8 *
s->cols * bytesperpixel);
1374 memcpy(
s->intra_pred_data[1],
1375 f->data[1] + uvoff + ((64 >>
s->ss_v) - 1) * ls_uv,
1376 8 *
s->cols * bytesperpixel >>
s->ss_h);
1377 memcpy(
s->intra_pred_data[2],
1378 f->data[2] + uvoff + ((64 >>
s->ss_v) - 1) * ls_uv,
1379 8 *
s->cols * bytesperpixel >>
s->ss_h);
1383 if (
s->s.h.filter.level) {
1386 lflvl_ptr =
s->lflvl;
1387 for (col = 0; col <
s->cols;
1388 col += 8, yoff2 += 64 * bytesperpixel,
1389 uvoff2 += 64 * bytesperpixel >>
s->ss_h, lflvl_ptr++) {
1406 int decode_tiles_mt(
AVCodecContext *avctx,
void *tdata,
int jobnr,
1411 ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1412 int bytesperpixel =
s->bytesperpixel, row, col, tile_row;
1413 unsigned tile_cols_len;
1414 int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1419 ls_y =
f->linesize[0];
1420 ls_uv =
f->linesize[1];
1423 jobnr,
s->s.h.tiling.log2_tile_cols,
s->sb_cols);
1424 td->tile_col_start = tile_col_start;
1425 uvoff = (64 * bytesperpixel >>
s->ss_h)*(tile_col_start >> 3);
1426 yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1427 lflvl_ptr_base =
s->lflvl+(tile_col_start >> 3);
1429 for (tile_row = 0; tile_row <
s->s.h.tiling.tile_rows; tile_row++) {
1431 tile_row,
s->s.h.tiling.log2_tile_rows,
s->sb_rows);
1433 td->c = &
td->c_b[tile_row];
1434 for (row = tile_row_start; row < tile_row_end;
1435 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >>
s->ss_v) {
1436 ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1437 VP9Filter *lflvl_ptr = lflvl_ptr_base+
s->sb_cols*(row >> 3);
1439 memset(
td->left_partition_ctx, 0, 8);
1440 memset(
td->left_skip_ctx, 0, 8);
1441 if (
s->s.h.keyframe ||
s->s.h.intraonly) {
1446 memset(
td->left_y_nnz_ctx, 0, 16);
1447 memset(
td->left_uv_nnz_ctx, 0, 32);
1448 memset(
td->left_segpred_ctx, 0, 8);
1450 for (col = tile_col_start;
1452 col += 8, yoff2 += 64 * bytesperpixel,
1453 uvoff2 += 64 * bytesperpixel >>
s->ss_h, lflvl_ptr++) {
1456 memset(lflvl_ptr->
mask, 0,
sizeof(lflvl_ptr->
mask));
1463 tile_cols_len = tile_col_end - tile_col_start;
1464 if (row + 8 <
s->rows) {
1465 memcpy(
s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1466 f->data[0] + yoff + 63 * ls_y,
1467 8 * tile_cols_len * bytesperpixel);
1468 memcpy(
s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >>
s->ss_h),
1469 f->data[1] + uvoff + ((64 >>
s->ss_v) - 1) * ls_uv,
1470 8 * tile_cols_len * bytesperpixel >>
s->ss_h);
1471 memcpy(
s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >>
s->ss_h),
1472 f->data[2] + uvoff + ((64 >>
s->ss_v) - 1) * ls_uv,
1473 8 * tile_cols_len * bytesperpixel >>
s->ss_h);
1476 vp9_report_tile_progress(
s, row >> 3, 1);
1486 ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1488 int bytesperpixel =
s->bytesperpixel, col,
i;
1492 ls_y =
f->linesize[0];
1493 ls_uv =
f->linesize[1];
1495 for (
i = 0;
i <
s->sb_rows;
i++) {
1496 vp9_await_tile_progress(
s,
i,
s->s.h.tiling.tile_cols);
1498 if (
s->s.h.filter.level) {
1499 yoff = (ls_y * 64)*
i;
1500 uvoff = (ls_uv * 64 >>
s->ss_v)*
i;
1501 lflvl_ptr =
s->lflvl+
s->sb_cols*
i;
1502 for (col = 0; col <
s->cols;
1503 col += 8, yoff += 64 * bytesperpixel,
1504 uvoff += 64 * bytesperpixel >>
s->ss_h, lflvl_ptr++) {
1517 unsigned int tile, nb_blocks = 0;
1519 if (
s->s.h.segmentation.enabled) {
1520 for (tile = 0; tile <
s->active_tile_cols; tile++)
1521 nb_blocks +=
s->td[tile].nb_block_structure;
1529 par->
qp =
s->s.h.yac_qi;
1530 par->
delta_qp[0][0] =
s->s.h.ydc_qdelta;
1531 par->
delta_qp[1][0] =
s->s.h.uvdc_qdelta;
1532 par->
delta_qp[2][0] =
s->s.h.uvdc_qdelta;
1533 par->
delta_qp[1][1] =
s->s.h.uvac_qdelta;
1534 par->
delta_qp[2][1] =
s->s.h.uvac_qdelta;
1537 unsigned int block = 0;
1538 unsigned int tile, block_tile;
1540 for (tile = 0; tile <
s->active_tile_cols; tile++) {
1543 for (block_tile = 0; block_tile <
td->nb_block_structure; block_tile++) {
1545 unsigned int row =
td->block_structure[block_tile].row;
1546 unsigned int col =
td->block_structure[block_tile].col;
1547 uint8_t seg_id =
frame->segmentation_map[row * 8 *
s->sb_cols + col];
1551 b->w = 1 << (3 +
td->block_structure[block_tile].block_size_idx_x);
1552 b->h = 1 << (3 +
td->block_structure[block_tile].block_size_idx_y);
1554 if (
s->s.h.segmentation.feat[seg_id].q_enabled) {
1555 b->delta_qp =
s->s.h.segmentation.feat[seg_id].q_val;
1556 if (
s->s.h.segmentation.absolute_vals)
1557 b->delta_qp -= par->
qp;
1574 (!
s->s.h.segmentation.enabled || !
s->s.h.segmentation.update_map);
1579 }
else if (ret == 0) {
1580 if (!
s->s.refs[
ref].f->buf[0]) {
1595 for (
i = 0;
i < 8;
i++) {
1596 if (
s->next_refs[
i].f->buf[0])
1598 if (
s->s.refs[
i].f->buf[0] &&
1608 if (!retain_segmap_ref ||
s->s.h.keyframe ||
s->s.h.intraonly) {
1611 if (!
s->s.h.keyframe && !
s->s.h.intraonly && !
s->s.h.errorres &&
s->s.frames[
CUR_FRAME].tf.f->buf[0] &&
1617 if (!
s->s.h.intraonly && !
s->s.h.keyframe && !
s->s.h.errorres &&
s->s.frames[
CUR_FRAME].tf.f->buf[0] &&
1625 f->key_frame =
s->s.h.keyframe;
1635 for (
i = 0;
i < 8;
i++) {
1636 if (
s->next_refs[
i].f->buf[0])
1638 if (
s->s.h.refreshrefmask & (1 <<
i)) {
1640 }
else if (
s->s.refs[
i].f->buf[0]) {
1661 memset(
s->above_partition_ctx, 0,
s->cols);
1662 memset(
s->above_skip_ctx, 0,
s->cols);
1663 if (
s->s.h.keyframe ||
s->s.h.intraonly) {
1664 memset(
s->above_mode_ctx,
DC_PRED,
s->cols * 2);
1668 memset(
s->above_y_nnz_ctx, 0,
s->sb_cols * 16);
1669 memset(
s->above_uv_nnz_ctx[0], 0,
s->sb_cols * 16 >>
s->ss_h);
1670 memset(
s->above_uv_nnz_ctx[1], 0,
s->sb_cols * 16 >>
s->ss_h);
1671 memset(
s->above_segpred_ctx, 0,
s->cols);
1676 "Failed to allocate block buffers\n");
1679 if (
s->s.h.refreshctx &&
s->s.h.parallelmode) {
1682 for (
i = 0;
i < 4;
i++) {
1683 for (j = 0; j < 2; j++)
1684 for (k = 0; k < 2; k++)
1685 for (l = 0; l < 6; l++)
1686 for (m = 0; m < 6; m++)
1687 memcpy(
s->prob_ctx[
s->s.h.framectxid].coef[
i][j][k][l][m],
1688 s->prob.coef[
i][j][k][l][m], 3);
1689 if (
s->s.h.txfmmode ==
i)
1692 s->prob_ctx[
s->s.h.framectxid].p =
s->prob.p;
1694 }
else if (!
s->s.h.refreshctx) {
1700 for (
i = 0;
i <
s->sb_rows;
i++)
1706 for (
i = 0;
i <
s->active_tile_cols;
i++) {
1707 s->td[
i].b =
s->td[
i].b_base;
1708 s->td[
i].block =
s->td[
i].block_base;
1709 s->td[
i].uvblock[0] =
s->td[
i].uvblock_base[0];
1710 s->td[
i].uvblock[1] =
s->td[
i].uvblock_base[1];
1711 s->td[
i].eob =
s->td[
i].eob_base;
1712 s->td[
i].uveob[0] =
s->td[
i].uveob_base[0];
1713 s->td[
i].uveob[1] =
s->td[
i].uveob_base[1];
1714 s->td[
i].error_info = 0;
1719 int tile_row, tile_col;
1723 for (tile_row = 0; tile_row <
s->s.h.tiling.tile_rows; tile_row++) {
1724 for (tile_col = 0; tile_col <
s->s.h.tiling.tile_cols; tile_col++) {
1727 if (tile_col ==
s->s.h.tiling.tile_cols - 1 &&
1728 tile_row ==
s->s.h.tiling.tile_rows - 1) {
1735 if (tile_size >
size)
1758 for (
i = 1;
i <
s->s.h.tiling.tile_cols;
i++)
1759 for (j = 0; j <
sizeof(
s->td[
i].counts) /
sizeof(
unsigned); j++)
1760 ((
unsigned *)&
s->td[0].counts)[j] += ((
unsigned *)&
s->td[
i].counts)[j];
1762 if (
s->pass < 2 &&
s->s.h.refreshctx && !
s->s.h.parallelmode) {
1766 }
while (
s->pass++ == 1);
1768 if (
s->td->error_info < 0) {
1770 s->td->error_info = 0;
1783 for (
i = 0;
i < 8;
i++) {
1784 if (
s->s.refs[
i].f->buf[0])
1786 if (
s->next_refs[
i].f->buf[0] &&
1791 if (!
s->s.h.invisible) {
1808 for (
i = 0;
i < 3;
i++)
1810 for (
i = 0;
i < 8;
i++)
1819 for (
i = 0;
i < 3;
i++) {
1821 if (!
s->s.frames[
i].tf.f) {
1827 for (
i = 0;
i < 8;
i++) {
1830 if (!
s->s.refs[
i].f || !
s->next_refs[
i].f) {
1845 s->s.h.filter.sharpness = -1;
1856 for (
i = 0;
i < 3;
i++) {
1857 if (
s->s.frames[
i].tf.f->buf[0])
1859 if (ssrc->s.frames[
i].tf.f->buf[0]) {
1864 for (
i = 0;
i < 8;
i++) {
1865 if (
s->s.refs[
i].f->buf[0])
1867 if (ssrc->next_refs[
i].f->buf[0]) {
1873 s->s.h.invisible = ssrc->s.h.invisible;
1874 s->s.h.keyframe = ssrc->s.h.keyframe;
1875 s->s.h.intraonly = ssrc->s.h.intraonly;
1876 s->ss_v = ssrc->ss_v;
1877 s->ss_h = ssrc->ss_h;
1878 s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1879 s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1880 s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1881 s->bytesperpixel = ssrc->bytesperpixel;
1882 s->gf_fmt = ssrc->gf_fmt;
1885 s->s.h.bpp = ssrc->s.h.bpp;
1886 s->bpp_index = ssrc->bpp_index;
1887 s->pix_fmt = ssrc->pix_fmt;
1888 memcpy(&
s->prob_ctx, &ssrc->prob_ctx,
sizeof(
s->prob_ctx));
1889 memcpy(&
s->s.h.lf_delta, &ssrc->s.h.lf_delta,
sizeof(
s->s.h.lf_delta));
1890 memcpy(&
s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1891 sizeof(
s->s.h.segmentation.feat));
1912 .bsfs =
"vp9_superframe_split",
1914 #if CONFIG_VP9_DXVA2_HWACCEL
1917 #if CONFIG_VP9_D3D11VA_HWACCEL
1920 #if CONFIG_VP9_D3D11VA2_HWACCEL
1923 #if CONFIG_VP9_NVDEC_HWACCEL
1926 #if CONFIG_VP9_VAAPI_HWACCEL
1929 #if CONFIG_VP9_VDPAU_HWACCEL
static void flush(AVCodecContext *avctx)
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Libavcodec external API header.
#define FF_THREAD_FRAME
Decode more than one frame at once.
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
#define FF_CODEC_PROPERTY_LOSSLESS
static av_cold int init(AVCodecContext *avctx)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define atomic_store(object, desired)
#define atomic_fetch_add_explicit(object, operand, order)
#define atomic_load_explicit(object, order)
#define atomic_init(obj, value)
#define pthread_mutex_lock(a)
#define pthread_mutex_unlock(a)
bitstream reader API header.
static int decode012(GetBitContext *gb)
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits(GetBitContext *s, int n)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static int get_bits_count(const GetBitContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static const uint8_t * align_get_bits(GetBitContext *s)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
AVBufferRef * av_buffer_allocz(buffer_size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
AVBufferPool * av_buffer_pool_init(buffer_size_t size, AVBufferRef *(*alloc)(buffer_size_t size))
Allocate and initialize a buffer pool.
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
@ AV_PICTURE_TYPE_I
Intra.
@ AV_PICTURE_TYPE_P
Predicted.
#define HWACCEL_DXVA2(codec)
#define HWACCEL_VDPAU(codec)
#define HWACCEL_NVDEC(codec)
#define HWACCEL_VAAPI(codec)
#define HWACCEL_D3D11VA(codec)
#define HWACCEL_D3D11VA2(codec)
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define FF_DISABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
static enum AVPixelFormat pix_fmt_rgb[3]
static enum AVPixelFormat pix_fmts[]
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV440P12
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
@ AVCOL_RANGE_JPEG
Full range content.
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_GBRP12
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV444P10
AVColorSpace
YUV colorspace type.
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
@ AVCOL_SPC_SMPTE240M
functionally identical to above
const AVProfile ff_vp9_profiles[]
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
FF_ENABLE_DEPRECATION_WARNINGS int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
#define FF_ARRAY_ELEMS(a)
uint8_t * data
The data buffer.
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
int active_thread_type
Which multithreading methods are in use by the codec.
unsigned properties
Properties of the stream that gets decoded.
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
enum AVColorSpace colorspace
YUV colorspace type.
int flags
AV_CODEC_FLAG_*.
const char * name
Name of the codec implementation.
This structure describes decoded (raw) audio or video data.
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Data structure for storing block-level encoding information.
Video encoding parameters for a given frame.
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
int32_t qp
Base quantisation parameter for the frame.
AVBufferRef * hwaccel_priv_buf
void * hwaccel_picture_private
uint8_t * segmentation_map
#define av_malloc_array(a, b)
static int ref[MAX_W *MAX_W]
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Core video DSP helper functions.
VP5 and VP6 compatible video decoder (common features)
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c)
vp5689 returns 1 if the end of the stream has been reached, 0 otherwise.
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
static void vp9_tile_data_free(VP9TileData *td)
static int read_colorspace_details(AVCodecContext *avctx)
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
static int update_size(AVCodecContext *avctx, int w, int h)
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
static int update_prob(VP56RangeCoder *c, int p)
static int init_frames(AVCodecContext *avctx)
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
static void vp9_free_entries(AVCodecContext *avctx)
static av_cold int vp9_decode_init(AVCodecContext *avctx)
static void free_buffers(VP9Context *s)
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
static av_always_inline int inv_recenter_nonneg(int v, int m)
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
static void vp9_decode_flush(AVCodecContext *avctx)
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
static av_cold int vp9_decode_free(AVCodecContext *avctx)
static int update_block_buffers(AVCodecContext *avctx)
static int vp9_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
#define assign(var, type, n)
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
const ProbContext ff_vp9_default_probs
const int16_t ff_vp9_ac_qlookup[3][256]
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
const int16_t ff_vp9_dc_qlookup[3][256]
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
const uint8_t ff_vp9_model_pareto8[256][8]
const int8_t ff_vp9_partition_tree[3][2]
#define REF_INVALID_SCALE
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
void ff_vp9_adapt_probs(VP9Context *s)