28 #ifndef AVCODEC_H264_MVPRED_H
29 #define AVCODEC_H264_MVPRED_H
41 int i,
int list,
int part_width)
43 const int topright_ref = sl->
ref_cache[list][
i - 8 + part_width];
48 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4) \
49 const int xy = XY, y4 = Y4; \
50 const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \
51 if (!USES_LIST(mb_type, list)) \
52 return LIST_NOT_USED; \
53 mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
54 sl->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
55 sl->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
56 return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
59 &&
i >=
scan8[0] + 8 && (
i & 7) == 4
61 const uint32_t *mb_types =
h->cur_pic_ptr->mb_type;
68 (sl->
mb_y & 1) * 2 + (
i >> 5));
82 ff_tlog(
h->avctx,
"topright MV not available\n");
99 int part_width,
int list,
int ref,
100 int *
const mx,
int *
const my)
102 const int index8 =
scan8[n];
103 const int top_ref = sl->
ref_cache[list][index8 - 8];
104 const int left_ref = sl->
ref_cache[list][index8 - 1];
105 const int16_t *
const A = sl->
mv_cache[list][index8 - 1];
106 const int16_t *
const B = sl->
mv_cache[list][index8 - 8];
108 int diagonal_ref, match_count;
110 av_assert2(part_width == 1 || part_width == 2 || part_width == 4);
121 match_count = (diagonal_ref ==
ref) + (top_ref ==
ref) + (left_ref ==
ref);
122 ff_tlog(
h->avctx,
"pred_motion match_count=%d\n", match_count);
123 if (match_count > 1) {
126 }
else if (match_count == 1) {
127 if (left_ref ==
ref) {
130 }
else if (top_ref ==
ref) {
150 "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n",
151 top_ref,
B[0],
B[1], diagonal_ref,
C[0],
C[1], left_ref,
163 int n,
int list,
int ref,
164 int *
const mx,
int *
const my)
170 ff_tlog(
h->avctx,
"pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
171 top_ref,
B[0],
B[1], sl->
mb_x, sl->
mb_y, n, list);
173 if (top_ref ==
ref) {
182 ff_tlog(
h->avctx,
"pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
183 left_ref,
A[0],
A[1], sl->
mb_x, sl->
mb_y, n, list);
185 if (left_ref ==
ref) {
204 int n,
int list,
int ref,
205 int *
const mx,
int *
const my)
211 ff_tlog(
h->avctx,
"pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
212 left_ref,
A[0],
A[1], sl->
mb_x, sl->
mb_y, n, list);
214 if (left_ref ==
ref) {
225 ff_tlog(
h->avctx,
"pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
226 diagonal_ref,
C[0],
C[1], sl->
mb_x, sl->
mb_y, n, list);
228 if (diagonal_ref ==
ref) {
239 #define FIX_MV_MBAFF(type, refn, mvn, idx) \
240 if (FRAME_MBAFF(h)) { \
241 if (MB_FIELD(sl)) { \
242 if (!IS_INTERLACED(type)) { \
244 AV_COPY32(mvbuf[idx], mvn); \
245 mvbuf[idx][1] /= 2; \
249 if (IS_INTERLACED(type)) { \
251 AV_COPY32(mvbuf[idx], mvn); \
252 mvbuf[idx][1] *= 2; \
263 int8_t *
ref =
h->cur_pic.ref_index[0];
264 int16_t(*
mv)[2] =
h->cur_pic.motion_val[0];
265 int top_ref, left_ref, diagonal_ref, match_count, mx, my;
266 const int16_t *
A, *
B, *
C;
267 int b_stride =
h->b_stride;
302 ff_tlog(
h->avctx,
"pred_pskip: (%d) (%d) at %2d %2d\n",
303 top_ref, left_ref, sl->
mb_x, sl->
mb_y);
328 match_count = !diagonal_ref + !top_ref + !left_ref;
329 ff_tlog(
h->avctx,
"pred_pskip_motion match_count=%d\n", match_count);
330 if (match_count > 1) {
333 }
else if (match_count == 1) {
337 }
else if (!top_ref) {
359 const int mb_xy = sl->
mb_xy;
360 int topleft_xy, top_xy, topright_xy, left_xy[
LEFT_MBS];
361 static const uint8_t left_block_options[4][32] = {
362 { 0, 1, 2, 3, 7, 10, 8, 11, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 5 * 4, 1 + 9 * 4 },
363 { 2, 2, 3, 3, 8, 11, 8, 11, 3 + 2 * 4, 3 + 2 * 4, 3 + 3 * 4, 3 + 3 * 4, 1 + 5 * 4, 1 + 9 * 4, 1 + 5 * 4, 1 + 9 * 4 },
364 { 0, 0, 1, 1, 7, 10, 7, 10, 3 + 0 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 1 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 },
365 { 0, 2, 0, 2, 7, 10, 7, 10, 3 + 0 * 4, 3 + 2 * 4, 3 + 0 * 4, 3 + 2 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 }
370 top_xy = mb_xy - (
h->mb_stride <<
MB_FIELD(sl));
375 topleft_xy = top_xy - 1;
376 topright_xy = top_xy + 1;
377 left_xy[
LBOT] = left_xy[
LTOP] = mb_xy - 1;
380 const int left_mb_field_flag =
IS_INTERLACED(
h->cur_pic.mb_type[mb_xy - 1]);
383 if (left_mb_field_flag != curr_mb_field_flag) {
384 left_xy[
LBOT] = left_xy[
LTOP] = mb_xy -
h->mb_stride - 1;
385 if (curr_mb_field_flag) {
386 left_xy[
LBOT] +=
h->mb_stride;
389 topleft_xy +=
h->mb_stride;
397 if (curr_mb_field_flag) {
398 topleft_xy +=
h->mb_stride & (((
h->cur_pic.mb_type[top_xy - 1] >> 7) & 1) - 1);
399 topright_xy +=
h->mb_stride & (((
h->cur_pic.mb_type[top_xy + 1] >> 7) & 1) - 1);
400 top_xy +=
h->mb_stride & (((
h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
402 if (left_mb_field_flag != curr_mb_field_flag) {
403 if (curr_mb_field_flag) {
404 left_xy[
LBOT] +=
h->mb_stride;
421 sl->
top_type =
h->cur_pic.mb_type[top_xy];
427 if (
h->slice_table[topleft_xy] != sl->
slice_num)
434 if (
h->slice_table[topleft_xy] != sl->
slice_num) {
442 if (
h->slice_table[topright_xy] != sl->
slice_num)
448 int topleft_xy, top_xy, topright_xy, left_xy[
LEFT_MBS];
449 int topleft_type, top_type, topright_type, left_type[
LEFT_MBS];
468 int type_mask =
h->ps.pps->constrained_intra_pred ?
IS_INTRA(-1) : -1;
474 if (!(top_type & type_mask)) {
481 if (!(left_type[
LTOP] & type_mask)) {
485 if (!(left_type[
LBOT] & type_mask)) {
490 int left_typei =
h->cur_pic.mb_type[left_xy[
LTOP] +
h->mb_stride];
493 if (!((left_typei & type_mask) && (left_type[
LTOP] & type_mask))) {
499 if (!(left_type[
LTOP] & type_mask)) {
505 if (!(topleft_type & type_mask))
508 if (!(topright_type & type_mask))
520 for (
i = 0;
i < 2;
i++) {
545 nnz =
h->non_zero_count[top_xy];
546 AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[4 * 3]);
547 if (!
h->chroma_y_shift) {
548 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 7]);
549 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 11]);
551 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 5]);
552 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 9]);
555 uint32_t top_empty =
CABAC(
h) && !
IS_INTRA(mb_type) ? 0 : 0x40404040;
556 AV_WN32A(&nnz_cache[4 + 8 * 0], top_empty);
557 AV_WN32A(&nnz_cache[4 + 8 * 5], top_empty);
558 AV_WN32A(&nnz_cache[4 + 8 * 10], top_empty);
561 for (
i = 0;
i < 2;
i++) {
562 if (left_type[
LEFT(
i)]) {
563 nnz =
h->non_zero_count[left_xy[
LEFT(
i)]];
564 nnz_cache[3 + 8 * 1 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i]];
565 nnz_cache[3 + 8 * 2 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i]];
567 nnz_cache[3 + 8 * 6 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i] + 4 * 4];
568 nnz_cache[3 + 8 * 7 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i] + 4 * 4];
569 nnz_cache[3 + 8 * 11 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i] + 8 * 4];
570 nnz_cache[3 + 8 * 12 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i] + 8 * 4];
572 nnz_cache[3 + 8 * 6 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i] - 2 + 4 * 4];
573 nnz_cache[3 + 8 * 7 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i] - 2 + 4 * 4];
574 nnz_cache[3 + 8 * 11 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i] - 2 + 8 * 4];
575 nnz_cache[3 + 8 * 12 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i] - 2 + 8 * 4];
577 nnz_cache[3 + 8 * 6 + 8 *
i] = nnz[left_block[8 + 4 + 2 *
i]];
578 nnz_cache[3 + 8 * 11 + 8 *
i] = nnz[left_block[8 + 5 + 2 *
i]];
581 nnz_cache[3 + 8 * 1 + 2 * 8 *
i] =
582 nnz_cache[3 + 8 * 2 + 2 * 8 *
i] =
583 nnz_cache[3 + 8 * 6 + 2 * 8 *
i] =
584 nnz_cache[3 + 8 * 7 + 2 * 8 *
i] =
585 nnz_cache[3 + 8 * 11 + 2 * 8 *
i] =
586 nnz_cache[3 + 8 * 12 + 2 * 8 *
i] =
CABAC(
h) && !
IS_INTRA(mb_type) ? 0 : 64;
597 if (left_type[
LTOP]) {
599 ((
h->cbp_table[left_xy[
LTOP]] >> (left_block[0] & (~1))) & 2) |
600 (((
h->cbp_table[left_xy[
LBOT]] >> (left_block[2] & (~1))) & 2) << 2);
609 int b_stride =
h->b_stride;
610 for (list = 0; list < sl->
list_count; list++) {
612 int8_t *
ref =
h->cur_pic.ref_index[list];
614 int16_t(*
mv)[2] =
h->cur_pic.motion_val[list];
620 const int b_xy =
h->mb2b_xy[top_xy] + 3 * b_stride;
622 ref_cache[0 - 1 * 8] =
623 ref_cache[1 - 1 * 8] =
ref[4 * top_xy + 2];
624 ref_cache[2 - 1 * 8] =
625 ref_cache[3 - 1 * 8] =
ref[4 * top_xy + 3];
633 for (
i = 0;
i < 2;
i++) {
634 int cache_idx = -1 +
i * 2 * 8;
636 const int b_xy =
h->mb2b_xy[left_xy[
LEFT(
i)]] + 3;
637 const int b8_xy = 4 * left_xy[
LEFT(
i)] + 1;
639 mv[b_xy + b_stride * left_block[0 +
i * 2]]);
641 mv[b_xy + b_stride * left_block[1 +
i * 2]]);
642 ref_cache[cache_idx] =
ref[b8_xy + (left_block[0 +
i * 2] & ~1)];
643 ref_cache[cache_idx + 8] =
ref[b8_xy + (left_block[1 +
i * 2] & ~1)];
647 ref_cache[cache_idx] =
654 const int b_xy =
h->mb2b_xy[left_xy[
LTOP]] + 3;
655 const int b8_xy = 4 * left_xy[
LTOP] + 1;
656 AV_COPY32(mv_cache[-1],
mv[b_xy + b_stride * left_block[0]]);
657 ref_cache[-1] =
ref[b8_xy + (left_block[0] & ~1)];
666 const int b_xy =
h->mb2b_xy[topright_xy] + 3 * b_stride;
668 ref_cache[4 - 1 * 8] =
ref[4 * topright_xy + 2];
674 if(ref_cache[2 - 1*8] < 0 || ref_cache[4 - 1 * 8] < 0) {
676 const int b_xy =
h->mb2b_xy[topleft_xy] + 3 + b_stride +
680 ref_cache[-1 - 1 * 8] =
ref[b8_xy];
694 ref_cache[2 + 8 * 0] =
701 const int b_xy =
h->mb2br_xy[top_xy];
702 AV_COPY64(mvd_cache[0 - 1 * 8], mvd[b_xy + 0]);
707 const int b_xy =
h->mb2br_xy[left_xy[
LTOP]] + 6;
708 AV_COPY16(mvd_cache[-1 + 0 * 8], mvd[b_xy - left_block[0]]);
709 AV_COPY16(mvd_cache[-1 + 1 * 8], mvd[b_xy - left_block[1]]);
715 const int b_xy =
h->mb2br_xy[left_xy[
LBOT]] + 6;
716 AV_COPY16(mvd_cache[-1 + 2 * 8], mvd[b_xy - left_block[2]]);
717 AV_COPY16(mvd_cache[-1 + 3 * 8], mvd[b_xy - left_block[3]]);
726 uint8_t *direct_table =
h->direct_table;
732 }
else if (
IS_8X8(top_type)) {
733 int b8_xy = 4 * top_xy;
734 direct_cache[0 - 1 * 8] = direct_table[b8_xy + 2];
735 direct_cache[2 - 1 * 8] = direct_table[b8_xy + 3];
744 direct_cache[-1 + 0 * 8] = direct_table[4 * left_xy[
LTOP] + 1 + (left_block[0] & ~1)];
751 direct_cache[-1 + 2 * 8] = direct_table[4 * left_xy[
LBOT] + 1 + (left_block[2] & ~1)];
759 MAP_F2F(scan8[0] - 1 - 1 * 8, topleft_type) \
760 MAP_F2F(scan8[0] + 0 - 1 * 8, top_type) \
761 MAP_F2F(scan8[0] + 1 - 1 * 8, top_type) \
762 MAP_F2F(scan8[0] + 2 - 1 * 8, top_type) \
763 MAP_F2F(scan8[0] + 3 - 1 * 8, top_type) \
764 MAP_F2F(scan8[0] + 4 - 1 * 8, topright_type) \
765 MAP_F2F(scan8[0] - 1 + 0 * 8, left_type[LTOP]) \
766 MAP_F2F(scan8[0] - 1 + 1 * 8, left_type[LTOP]) \
767 MAP_F2F(scan8[0] - 1 + 2 * 8, left_type[LBOT]) \
768 MAP_F2F(scan8[0] - 1 + 3 * 8, left_type[LBOT])
773 #define MAP_F2F(idx, mb_type) \
774 if (!IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
775 sl->ref_cache[list][idx] *= 2; \
776 sl->mv_cache[list][idx][1] /= 2; \
777 sl->mvd_cache[list][idx][1] >>= 1; \
784 #define MAP_F2F(idx, mb_type) \
785 if (IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
786 sl->ref_cache[list][idx] >>= 1; \
787 sl->mv_cache[list][idx][1] *= 2; \
788 sl->mvd_cache[list][idx][1] <<= 1; \
806 const int mb_xy = sl->
mb_xy;
809 memset(
h->non_zero_count[mb_xy], 0, 48);
831 h->cur_pic.mb_type[mb_xy] = mb_type;
832 h->cur_pic.qscale_table[mb_xy] = sl->
qscale;
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Libavcodec external API header.
mode
Use these values in ebur128_init (or'ed).
static void fill_rectangle(int x, int y, int w, int h)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type)
#define FIX_MV_MBAFF(type, refn, mvn, idx)
static void fill_decode_neighbors(const H264Context *h, H264SliceContext *sl, int mb_type)
#define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4)
static void av_unused decode_mb_skip(const H264Context *h, H264SliceContext *sl)
decodes a P_SKIP or B_SKIP macroblock
static av_always_inline void pred_pskip_motion(const H264Context *const h, H264SliceContext *sl)
static av_always_inline void pred_motion(const H264Context *const h, H264SliceContext *sl, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
static av_always_inline void pred_8x16_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 8x16 MV.
static av_always_inline int fetch_diagonal_mv(const H264Context *h, H264SliceContext *sl, const int16_t **C, int i, int list, int part_width)
static av_always_inline void pred_16x8_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 16x8 MV.
H.264 / AVC / MPEG-4 part10 codec.
static const uint8_t scan8[16 *3+3]
static av_always_inline void write_back_motion(const H264Context *h, H264SliceContext *sl, int mb_type)
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
#define PART_NOT_AVAILABLE
static const int8_t mv[256][2]
common internal API header
#define USES_LIST(a, list)
#define MB_TYPE_INTERLACED
int8_t ref_cache[2][5 *8]
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
unsigned int left_samples_available
uint8_t mvd_cache[2][5 *8][2]
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
unsigned int top_samples_available
unsigned int topright_samples_available
int direct_spatial_mv_pred
uint8_t(*[2] mvd_table)[2]
const uint8_t * left_block
int8_t * intra4x4_pred_mode
uint8_t direct_cache[5 *8]
int8_t intra4x4_pred_mode_cache[5 *8]
unsigned int topleft_samples_available
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
int neighbor_transform_size
number of neighbors (top and/or left) that used 8x8 dct
static int ref[MAX_W *MAX_W]