149 #define FULLPEL_MODE 1
150 #define HALFPEL_MODE 2
151 #define THIRDPEL_MODE 3
152 #define PREDICT_MODE 4
164 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
165 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
166 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
167 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
171 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
172 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
173 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
174 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
180 { 0, 2 }, { 1, 1 }, { 2, 0 },
181 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
182 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
183 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
184 { 2, 4 }, { 3, 3 }, { 4, 2 },
190 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
191 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
192 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
193 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
194 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
195 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
196 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
197 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
198 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
199 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
200 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
201 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
204 static const struct {
208 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
209 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
210 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
211 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
215 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
216 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
217 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
218 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
229 for (
i = 0;
i < 4;
i++) {
230 const int z0 = 13 * (input[4 *
i + 0] + input[4 *
i + 2]);
231 const int z1 = 13 * (input[4 *
i + 0] - input[4 *
i + 2]);
232 const int z2 = 7 * input[4 *
i + 1] - 17 * input[4 *
i + 3];
233 const int z3 = 17 * input[4 *
i + 1] + 7 * input[4 *
i + 3];
235 temp[4 *
i + 0] = z0 + z3;
236 temp[4 *
i + 1] = z1 + z2;
237 temp[4 *
i + 2] = z1 - z2;
238 temp[4 *
i + 3] = z0 - z3;
241 for (
i = 0;
i < 4;
i++) {
242 const int offset = x_offset[
i];
243 const int z0 = 13 * (
temp[4 * 0 +
i] +
temp[4 * 2 +
i]);
244 const int z1 = 13 * (
temp[4 * 0 +
i] -
temp[4 * 2 +
i]);
245 const int z2 = 7 *
temp[4 * 1 +
i] - 17 *
temp[4 * 3 +
i];
246 const int z3 = 17 *
temp[4 * 1 +
i] + 7 *
temp[4 * 3 +
i];
251 output[
stride * 10 +
offset] = (
int)((z0 - z3) * qmul + 0x80000) >> 20;
263 dc = 13 * 13 * (
dc == 1 ? 1538U*
block[0]
264 : qmul * (
block[0] >> 3) / 2);
268 for (
i = 0;
i < 4;
i++) {
269 const int z0 = 13 * (
block[0 + 4 *
i] +
block[2 + 4 *
i]);
270 const int z1 = 13 * (
block[0 + 4 *
i] -
block[2 + 4 *
i]);
271 const int z2 = 7 *
block[1 + 4 *
i] - 17 *
block[3 + 4 *
i];
272 const int z3 = 17 *
block[1 + 4 *
i] + 7 *
block[3 + 4 *
i];
274 block[0 + 4 *
i] = z0 + z3;
275 block[1 + 4 *
i] = z1 + z2;
276 block[2 + 4 *
i] = z1 - z2;
277 block[3 + 4 *
i] = z0 - z3;
280 for (
i = 0;
i < 4;
i++) {
281 const unsigned z0 = 13 * (
block[
i + 4 * 0] +
block[
i + 4 * 2]);
282 const unsigned z1 = 13 * (
block[
i + 4 * 0] -
block[
i + 4 * 2]);
283 const unsigned z2 = 7 *
block[
i + 4 * 1] - 17 *
block[
i + 4 * 3];
284 const unsigned z3 = 17 *
block[
i + 4 * 1] + 7 *
block[
i + 4 * 3];
285 const int rr = (
dc + 0x80000u);
293 memset(
block, 0, 16 *
sizeof(int16_t));
299 static const uint8_t *
const scan_patterns[4] = {
305 const int intra = 3 *
type >> 2;
308 for (limit = (16 >> intra);
index < 16;
index = limit, limit += 8) {
313 sign = (vlc & 1) ? 0 : -1;
320 }
else if (vlc < 4) {
333 level = (vlc >> 3) + ((
run == 0) ? 8 : ((
run < 2) ? 2 : ((
run < 5) ? 0 : -1)));
336 level = (vlc >> 4) + ((
run == 0) ? 4 : ((
run < 3) ? 2 : ((
run < 10) ? 1 : 0)));
357 int i,
int list,
int part_width)
359 const int topright_ref =
s->ref_cache[list][
i - 8 + part_width];
362 *
C =
s->mv_cache[list][
i - 8 + part_width];
365 *
C =
s->mv_cache[list][
i - 8 - 1];
366 return s->ref_cache[list][
i - 8 - 1];
378 int part_width,
int list,
379 int ref,
int *
const mx,
int *
const my)
381 const int index8 =
scan8[n];
382 const int top_ref =
s->ref_cache[list][index8 - 8];
383 const int left_ref =
s->ref_cache[list][index8 - 1];
384 const int16_t *
const A =
s->mv_cache[list][index8 - 1];
385 const int16_t *
const B =
s->mv_cache[list][index8 - 8];
387 int diagonal_ref, match_count;
398 match_count = (diagonal_ref ==
ref) + (top_ref ==
ref) + (left_ref ==
ref);
399 if (match_count > 1) {
402 }
else if (match_count == 1) {
403 if (left_ref ==
ref) {
406 }
else if (top_ref ==
ref) {
428 int mx,
int my,
int dxy,
429 int thirdpel,
int dir,
int avg)
431 const SVQ3Frame *pic = (dir == 0) ?
s->last_pic :
s->next_pic;
434 int blocksize = 2 - (
width >> 3);
435 int linesize =
s->cur_pic->f->linesize[0];
436 int uvlinesize =
s->cur_pic->f->linesize[1];
441 if (mx < 0 || mx >=
s->h_edge_pos -
width - 1 ||
442 my < 0 || my >=
s->v_edge_pos -
height - 1) {
449 dest =
s->cur_pic->f->data[0] + x + y * linesize;
450 src = pic->
f->
data[0] + mx + my * linesize;
453 s->vdsp.emulated_edge_mc(
s->edge_emu_buffer,
src,
456 mx, my,
s->h_edge_pos,
s->v_edge_pos);
457 src =
s->edge_emu_buffer;
460 (
avg ?
s->tdsp.avg_tpel_pixels_tab
461 :
s->tdsp.put_tpel_pixels_tab)[dxy](dest,
src, linesize,
464 (
avg ?
s->hdsp.avg_pixels_tab
465 :
s->hdsp.put_pixels_tab)[blocksize][dxy](dest,
src, linesize,
469 mx = mx + (mx < (
int) x) >> 1;
470 my = my + (my < (
int) y) >> 1;
475 for (
i = 1;
i < 3;
i++) {
476 dest =
s->cur_pic->f->data[
i] + (x >> 1) + (y >> 1) * uvlinesize;
477 src = pic->
f->
data[
i] + mx + my * uvlinesize;
480 s->vdsp.emulated_edge_mc(
s->edge_emu_buffer,
src,
481 uvlinesize, uvlinesize,
483 mx, my, (
s->h_edge_pos >> 1),
485 src =
s->edge_emu_buffer;
488 (
avg ?
s->tdsp.avg_tpel_pixels_tab
489 :
s->tdsp.put_tpel_pixels_tab)[dxy](dest,
src,
493 (
avg ?
s->hdsp.avg_pixels_tab
494 :
s->hdsp.put_pixels_tab)[blocksize][dxy](dest,
src,
504 int i, j, k, mx, my, dx, dy, x, y;
505 const int part_width = ((
size & 5) == 4) ? 4 : 16 >> (
size & 1);
506 const int part_height = 16 >> ((unsigned)(
size + 1) / 3);
508 const int h_edge_pos = 6 * (
s->h_edge_pos - part_width) - extra_width;
509 const int v_edge_pos = 6 * (
s->v_edge_pos - part_height) - extra_width;
511 for (
i = 0;
i < 16;
i += part_height)
512 for (j = 0; j < 16; j += part_width) {
513 const int b_xy = (4 *
s->mb_x + (j >> 2)) +
514 (4 *
s->mb_y + (
i >> 2)) *
s->b_stride;
516 x = 16 *
s->mb_x + j;
517 y = 16 *
s->mb_y +
i;
518 k = (j >> 2 & 1) + (
i >> 1 & 2) +
519 (j >> 1 & 4) + (
i & 8);
524 mx =
s->next_pic->motion_val[0][b_xy][0] * 2;
525 my =
s->next_pic->motion_val[0][b_xy][1] * 2;
528 mx = mx *
s->frame_num_offset /
529 s->prev_frame_num_offset + 1 >> 1;
530 my = my *
s->frame_num_offset /
531 s->prev_frame_num_offset + 1 >> 1;
533 mx = mx * (
s->frame_num_offset -
s->prev_frame_num_offset) /
534 s->prev_frame_num_offset + 1 >> 1;
535 my = my * (
s->frame_num_offset -
s->prev_frame_num_offset) /
536 s->prev_frame_num_offset + 1 >> 1;
541 mx =
av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
542 my =
av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
551 if (dx != (int16_t)dx || dy != (int16_t)dy) {
560 mx = (mx + 1 >> 1) + dx;
561 my = (my + 1 >> 1) + dy;
562 fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
563 fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
564 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
567 fx, fy, dxy, 1, dir,
avg);
571 mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
572 my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
573 dxy = (mx & 1) + 2 * (my & 1);
576 mx >> 1, my >> 1, dxy, 0, dir,
avg);
580 mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
581 my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
584 mx, my, 0, 0, dir,
avg);
593 if (part_height == 8 &&
i < 8) {
596 if (part_width == 8 && j < 8)
599 if (part_width == 8 && j < 8)
601 if (part_width == 4 || part_height == 4)
607 part_width >> 2, part_height >> 2,
s->b_stride,
615 int mb_type,
const int *block_offset,
620 for (
i = 0;
i < 16;
i++)
621 if (
s->non_zero_count_cache[
scan8[
i]] ||
s->mb[
i * 16]) {
622 uint8_t *
const ptr = dest_y + block_offset[
i];
631 const int *block_offset,
636 int qscale =
s->qscale;
639 for (
i = 0;
i < 16;
i++) {
640 uint8_t *
const ptr = dest_y + block_offset[
i];
641 const int dir =
s->intra4x4_pred_mode_cache[
scan8[
i]];
646 const int topright_avail = (
s->topright_samples_available <<
i) & 0x8000;
648 if (!topright_avail) {
649 tr = ptr[3 - linesize] * 0x01010101u;
652 topright = ptr + 4 - linesize;
656 s->hpc.pred4x4[dir](ptr, topright, linesize);
657 nnz =
s->non_zero_count_cache[
scan8[
i]];
663 s->hpc.pred16x16[
s->intra16x16_pred_mode](dest_y, linesize);
670 const int mb_x =
s->mb_x;
671 const int mb_y =
s->mb_y;
672 const int mb_xy =
s->mb_xy;
673 const int mb_type =
s->cur_pic->mb_type[mb_xy];
674 uint8_t *dest_y, *dest_cb, *dest_cr;
675 int linesize, uvlinesize;
677 const int *block_offset = &
s->block_offset[0];
678 const int block_h = 16 >> 1;
680 linesize =
s->cur_pic->f->linesize[0];
681 uvlinesize =
s->cur_pic->f->linesize[1];
683 dest_y =
s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
684 dest_cb =
s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
685 dest_cr =
s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
687 s->vdsp.prefetch(dest_y + (
s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
688 s->vdsp.prefetch(dest_cb + (
s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
691 s->hpc.pred8x8[
s->chroma_pred_mode](dest_cb, uvlinesize);
692 s->hpc.pred8x8[
s->chroma_pred_mode](dest_cr, uvlinesize);
700 uint8_t *dest[2] = { dest_cb, dest_cr };
701 s->h264dsp.h264_chroma_dc_dequant_idct(
s->mb + 16 * 16 * 1,
702 s->dequant4_coeff[4][0]);
703 s->h264dsp.h264_chroma_dc_dequant_idct(
s->mb + 16 * 16 * 2,
704 s->dequant4_coeff[4][0]);
705 for (j = 1; j < 3; j++) {
706 for (
i = j * 16;
i < j * 16 + 4;
i++)
707 if (
s->non_zero_count_cache[
scan8[
i]] ||
s->mb[
i * 16]) {
708 uint8_t *
const ptr = dest[j - 1] + block_offset[
i];
718 int i, j, k, m, dir,
mode;
722 const int mb_xy =
s->mb_xy;
723 const int b_xy = 4 *
s->mb_x + 4 *
s->mb_y *
s->b_stride;
725 s->top_samples_available = (
s->mb_y == 0) ? 0x33FF : 0xFFFF;
726 s->left_samples_available = (
s->mb_x == 0) ? 0x5F5F : 0xFFFF;
727 s->topright_samples_available = 0xFFFF;
731 s->next_pic->mb_type[mb_xy] == -1) {
741 mb_type =
FFMIN(
s->next_pic->mb_type[mb_xy], 6);
749 }
else if (mb_type < 8) {
750 if (
s->thirdpel_flag &&
s->halfpel_flag == !
get_bits1(&
s->gb_slice))
752 else if (
s->halfpel_flag &&
767 for (m = 0; m < 2; m++) {
768 if (
s->mb_x > 0 &&
s->intra4x4_pred_mode[
s->mb2br_xy[mb_xy - 1] + 6] != -1) {
769 for (
i = 0;
i < 4;
i++)
771 s->cur_pic->motion_val[m][b_xy - 1 +
i *
s->b_stride]);
773 for (
i = 0;
i < 4;
i++)
777 memcpy(
s->mv_cache[m][
scan8[0] - 1 * 8],
778 s->cur_pic->motion_val[m][b_xy -
s->b_stride],
779 4 * 2 *
sizeof(int16_t));
780 memset(&
s->ref_cache[m][
scan8[0] - 1 * 8],
783 if (
s->mb_x <
s->mb_width - 1) {
785 s->cur_pic->motion_val[m][b_xy -
s->b_stride + 4]);
786 s->ref_cache[m][
scan8[0] + 4 - 1 * 8] =
787 (
s->intra4x4_pred_mode[
s->mb2br_xy[mb_xy -
s->mb_stride + 1] + 6] == -1 ||
793 s->cur_pic->motion_val[m][b_xy -
s->b_stride - 1]);
794 s->ref_cache[m][
scan8[0] - 1 - 1 * 8] =
799 memset(&
s->ref_cache[m][
scan8[0] - 1 * 8 - 1],
815 for (
i = 0;
i < 4;
i++)
816 memset(
s->cur_pic->motion_val[0][b_xy +
i *
s->b_stride],
817 0, 4 * 2 *
sizeof(int16_t));
823 for (
i = 0;
i < 4;
i++)
824 memset(
s->cur_pic->motion_val[1][b_xy +
i *
s->b_stride],
825 0, 4 * 2 *
sizeof(int16_t));
830 }
else if (mb_type == 8 || mb_type == 33) {
831 int8_t *i4x4 =
s->intra4x4_pred_mode +
s->mb2br_xy[
s->mb_xy];
832 int8_t *i4x4_cache =
s->intra4x4_pred_mode_cache;
834 memset(
s->intra4x4_pred_mode_cache, -1, 8 * 5 *
sizeof(int8_t));
838 for (
i = 0;
i < 4;
i++)
839 s->intra4x4_pred_mode_cache[
scan8[0] - 1 +
i * 8] =
s->intra4x4_pred_mode[
s->mb2br_xy[mb_xy - 1] + 6 -
i];
840 if (
s->intra4x4_pred_mode_cache[
scan8[0] - 1] == -1)
841 s->left_samples_available = 0x5F5F;
844 s->intra4x4_pred_mode_cache[4 + 8 * 0] =
s->intra4x4_pred_mode[
s->mb2br_xy[mb_xy -
s->mb_stride] + 0];
845 s->intra4x4_pred_mode_cache[5 + 8 * 0] =
s->intra4x4_pred_mode[
s->mb2br_xy[mb_xy -
s->mb_stride] + 1];
846 s->intra4x4_pred_mode_cache[6 + 8 * 0] =
s->intra4x4_pred_mode[
s->mb2br_xy[mb_xy -
s->mb_stride] + 2];
847 s->intra4x4_pred_mode_cache[7 + 8 * 0] =
s->intra4x4_pred_mode[
s->mb2br_xy[mb_xy -
s->mb_stride] + 3];
849 if (
s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
850 s->top_samples_available = 0x33FF;
854 for (
i = 0;
i < 16;
i += 2) {
859 "luma prediction:%"PRIu32
"\n", vlc);
863 left = &
s->intra4x4_pred_mode_cache[
scan8[
i] - 1];
864 top = &
s->intra4x4_pred_mode_cache[
scan8[
i] - 8];
869 if (left[1] == -1 || left[2] == -1) {
875 for (
i = 0;
i < 4;
i++)
876 memset(&
s->intra4x4_pred_mode_cache[
scan8[0] + 8 *
i],
DC_PRED, 4);
880 i4x4[4] = i4x4_cache[7 + 8 * 3];
881 i4x4[5] = i4x4_cache[7 + 8 * 2];
882 i4x4[6] = i4x4_cache[7 + 8 * 1];
886 s->avctx,
s->top_samples_available,
887 s->left_samples_available);
889 s->top_samples_available = (
s->mb_y == 0) ? 0x33FF : 0xFFFF;
890 s->left_samples_available = (
s->mb_x == 0) ? 0x5F5F : 0xFFFF;
892 for (
i = 0;
i < 4;
i++)
895 s->top_samples_available = 0x33FF;
896 s->left_samples_available = 0x5F5F;
902 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
905 s->left_samples_available, dir, 0)) < 0) {
907 return s->intra16x16_pred_mode;
915 for (
i = 0;
i < 4;
i++)
916 memset(
s->cur_pic->motion_val[0][b_xy +
i *
s->b_stride],
917 0, 4 * 2 *
sizeof(int16_t));
919 for (
i = 0;
i < 4;
i++)
920 memset(
s->cur_pic->motion_val[1][b_xy +
i *
s->b_stride],
921 0, 4 * 2 *
sizeof(int16_t));
925 memset(
s->intra4x4_pred_mode +
s->mb2br_xy[mb_xy],
DC_PRED, 8);
928 memset(
s->non_zero_count_cache + 8, 0, 14 * 8 *
sizeof(
uint8_t));
945 if (
s->qscale > 31u) {
955 "error while decoding intra luma dc\n");
964 for (
i = 0;
i < 4;
i++)
965 if ((cbp & (1 <<
i))) {
966 for (j = 0; j < 4; j++) {
967 k =
index ? (1 * (j & 1) + 2 * (
i & 1) +
968 2 * (j & 2) + 4 * (
i & 2))
970 s->non_zero_count_cache[
scan8[k]] = 1;
974 "error while decoding block\n");
981 for (
i = 1;
i < 3; ++
i)
984 "error while decoding chroma dc block\n");
989 for (
i = 1;
i < 3;
i++) {
990 for (j = 0; j < 4; j++) {
992 s->non_zero_count_cache[
scan8[k]] = 1;
996 "error while decoding chroma ac block\n");
1006 s->cur_pic->mb_type[mb_xy] = mb_type;
1018 const int mb_xy =
s->mb_xy;
1029 int slice_bits, slice_bytes, slice_length;
1030 int length =
header >> 5 & 3;
1032 slice_length =
show_bits(&
s->gb, 8 * length);
1033 slice_bits = slice_length * 8;
1034 slice_bytes = slice_length + length - 1;
1046 memcpy(
s->slice_buf,
s->gb.buffer +
s->gb.index / 8, slice_bytes);
1048 if (
s->watermark_key) {
1055 memmove(
s->slice_buf, &
s->slice_buf[slice_length], length - 1);
1067 if ((
header & 0x9F) == 2) {
1068 i = (
s->mb_num < 64) ? 6 : (1 +
av_log2(
s->mb_num - 1));
1082 if (
s->has_watermark)
1093 memset(
s->intra4x4_pred_mode +
s->mb2br_xy[mb_xy - 1] + 3,
1094 -1, 4 *
sizeof(int8_t));
1095 memset(
s->intra4x4_pred_mode +
s->mb2br_xy[mb_xy -
s->mb_x],
1096 -1, 8 *
sizeof(int8_t) *
s->mb_x);
1099 memset(
s->intra4x4_pred_mode +
s->mb2br_xy[mb_xy -
s->mb_stride],
1100 -1, 8 *
sizeof(int8_t) * (
s->mb_width -
s->mb_x));
1103 s->intra4x4_pred_mode[
s->mb2br_xy[mb_xy -
s->mb_stride - 1] + 3] = -1;
1112 const int max_qp = 51;
1114 for (q = 0; q < max_qp + 1; q++) {
1117 for (x = 0; x < 16; x++)
1118 s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1127 unsigned char *extradata;
1128 unsigned char *extradata_end;
1130 int marker_found = 0;
1133 s->cur_pic = &
s->frames[0];
1134 s->last_pic = &
s->frames[1];
1135 s->next_pic = &
s->frames[2];
1140 if (!
s->cur_pic->f || !
s->last_pic->f || !
s->next_pic->f)
1157 s->halfpel_flag = 1;
1158 s->thirdpel_flag = 1;
1159 s->has_watermark = 0;
1162 extradata = (
unsigned char *)avctx->
extradata;
1166 if (!memcmp(extradata,
"SEQH", 4)) {
1177 int frame_size_code;
1178 int unk0, unk1, unk2, unk3, unk4;
1182 if (
size > extradata_end - extradata - 8)
1187 frame_size_code =
get_bits(&gb, 3);
1188 switch (frame_size_code) {
1241 unk0, unk1, unk2, unk3, unk4);
1248 if (
s->has_watermark) {
1256 unsigned long buf_len = watermark_width *
1257 watermark_height * 4;
1261 if (watermark_height <= 0 ||
1262 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1270 watermark_width, watermark_height);
1272 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1274 if (uncompress(buf, &buf_len, extradata + 8 +
offset,
1277 "could not uncompress watermark logo\n");
1283 s->watermark_key =
s->watermark_key << 16 |
s->watermark_key;
1285 "watermark key %#"PRIx32
"\n",
s->watermark_key);
1289 "this svq3 file contains watermark which need zlib support compiled in\n");
1295 s->mb_width = (avctx->
width + 15) / 16;
1296 s->mb_height = (avctx->
height + 15) / 16;
1297 s->mb_stride =
s->mb_width + 1;
1298 s->mb_num =
s->mb_width *
s->mb_height;
1299 s->b_stride = 4 *
s->mb_width;
1300 s->h_edge_pos =
s->mb_width * 16;
1301 s->v_edge_pos =
s->mb_height * 16;
1303 s->intra4x4_pred_mode =
av_mallocz(
s->mb_stride * 2 * 8);
1304 if (!
s->intra4x4_pred_mode)
1307 s->mb2br_xy =
av_mallocz(
s->mb_stride * (
s->mb_height + 1) *
1308 sizeof(*
s->mb2br_xy));
1312 for (y = 0; y <
s->mb_height; y++)
1313 for (x = 0; x <
s->mb_width; x++) {
1314 const int mb_xy = x + y *
s->mb_stride;
1316 s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 *
s->mb_stride));
1327 for (
i = 0;
i < 2;
i++) {
1338 const int big_mb_num =
s->mb_stride * (
s->mb_height + 1) + 1;
1339 const int b4_stride =
s->mb_width * 4 + 1;
1340 const int b4_array_size = b4_stride *
s->mb_height * 4;
1351 for (
i = 0;
i < 2;
i++) {
1368 if (!
s->edge_emu_buffer) {
1370 if (!
s->edge_emu_buffer)
1384 int buf_size = avpkt->
size;
1390 if (buf_size == 0) {
1391 if (
s->next_pic->f->data[0] && !
s->low_delay && !
s->last_frame_output) {
1395 s->last_frame_output = 1;
1401 s->mb_x =
s->mb_y =
s->mb_xy = 0;
1403 if (
s->watermark_key) {
1407 memcpy(
s->buf, avpkt->
data, buf_size);
1420 s->pict_type =
s->slice_type;
1428 s->cur_pic->f->pict_type =
s->pict_type;
1435 for (
i = 0;
i < 16;
i++) {
1437 s->block_offset[48 +
i] = (4 * ((
scan8[
i] -
scan8[0]) & 7)) + 8 *
s->cur_pic->f->linesize[0] * ((
scan8[
i] -
scan8[0]) >> 3);
1439 for (
i = 0;
i < 16;
i++) {
1440 s->block_offset[16 +
i] =
1441 s->block_offset[32 +
i] = (4 * ((
scan8[
i] -
scan8[0]) & 7)) + 4 *
s->cur_pic->f->linesize[1] * ((
scan8[
i] -
scan8[0]) >> 3);
1442 s->block_offset[48 + 16 +
i] =
1443 s->block_offset[48 + 32 +
i] = (4 * ((
scan8[
i] -
scan8[0]) & 7)) + 8 *
s->cur_pic->f->linesize[1] * ((
scan8[
i] -
scan8[0]) >> 3);
1447 if (!
s->last_pic->f->data[0]) {
1453 memset(
s->last_pic->f->data[0], 0, avctx->
height *
s->last_pic->f->linesize[0]);
1454 memset(
s->last_pic->f->data[1], 0x80, (avctx->
height / 2) *
1455 s->last_pic->f->linesize[1]);
1456 memset(
s->last_pic->f->data[2], 0x80, (avctx->
height / 2) *
1457 s->last_pic->f->linesize[2]);
1466 memset(
s->next_pic->f->data[0], 0, avctx->
height *
s->next_pic->f->linesize[0]);
1467 memset(
s->next_pic->f->data[1], 0x80, (avctx->
height / 2) *
1468 s->next_pic->f->linesize[1]);
1469 memset(
s->next_pic->f->data[2], 0x80, (avctx->
height / 2) *
1470 s->next_pic->f->linesize[2]);
1476 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1478 s->halfpel_flag,
s->thirdpel_flag,
1479 s->adaptive_quant,
s->qscale,
s->slice_num);
1486 if (
s->next_p_frame_damaged) {
1490 s->next_p_frame_damaged = 0;
1494 s->frame_num_offset =
s->slice_num -
s->prev_frame_num;
1496 if (
s->frame_num_offset < 0)
1497 s->frame_num_offset += 256;
1498 if (
s->frame_num_offset == 0 ||
1499 s->frame_num_offset >=
s->prev_frame_num_offset) {
1504 s->prev_frame_num =
s->frame_num;
1505 s->frame_num =
s->slice_num;
1506 s->prev_frame_num_offset =
s->frame_num -
s->prev_frame_num;
1508 if (
s->prev_frame_num_offset < 0)
1509 s->prev_frame_num_offset += 256;
1512 for (m = 0; m < 2; m++) {
1514 for (
i = 0;
i < 4;
i++) {
1516 for (j = -1; j < 4; j++)
1517 s->ref_cache[m][
scan8[0] + 8 *
i + j] = 1;
1523 for (
s->mb_y = 0;
s->mb_y <
s->mb_height;
s->mb_y++) {
1524 for (
s->mb_x = 0;
s->mb_x <
s->mb_width;
s->mb_x++) {
1526 s->mb_xy =
s->mb_x +
s->mb_y *
s->mb_stride;
1535 if (
s->slice_type !=
s->pict_type) {
1549 "error while decoding MB %d %d\n",
s->mb_x,
s->mb_y);
1553 if (mb_type != 0 ||
s->cbp)
1557 s->cur_pic->mb_type[
s->mb_x +
s->mb_y *
s->mb_stride] =
1562 s->last_pic->f->data[0] ?
s->last_pic->f :
NULL,
1569 if (
s->mb_y !=
s->mb_height ||
s->mb_x !=
s->mb_width) {
1581 else if (
s->last_pic->f->data[0])
1587 if (
s->last_pic->f->data[0] ||
s->low_delay)
Macro definitions for various function/variable attributes.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Libavcodec external API header.
#define FF_DEBUG_PICT_INFO
static av_cold int init(AVCodecContext *avctx)
#define FFSWAP(type, a, b)
Public header for CRC hash function implementation.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
mode
Use these values in ebur128_init (or'ed).
static void fill_rectangle(int x, int y, int w, int h)
static int get_bits_left(GetBitContext *gb)
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits(GetBitContext *s, int n)
static int skip_1stop_8data_bits(GetBitContext *gb)
static int get_bits_count(const GetBitContext *s)
static void skip_bits1(GetBitContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int get_interleaved_se_golomb(GetBitContext *gb)
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
@ AVDISCARD_ALL
discard all
@ AVDISCARD_NONKEY
discard all frames except keyframes
@ AVDISCARD_NONREF
discard all non reference
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_INFO
Standard information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
@ AV_PICTURE_TYPE_I
Intra.
@ AV_PICTURE_TYPE_P
Predicted.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
const IMbInfo ff_h264_i_mb_type_info[26]
const uint8_t ff_h264_golomb_to_inter_cbp[48]
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
const uint8_t ff_h264_dequant4_coeff_init[6][3]
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
const uint8_t ff_h264_golomb_to_pict_type[5]
const uint8_t ff_h264_chroma_dc_scan[4]
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
H.264 / AVC / MPEG-4 part10 codec.
static const uint8_t scan8[16 *3+3]
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
#define PART_NOT_AVAILABLE
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
static const int8_t mv[256][2]
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static enum AVPixelFormat pix_fmts[]
const uint8_t ff_zigzag_scan[16+1]
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
#define MB_TYPE_INTRA16x16
@ AVCOL_RANGE_JPEG
Full range content.
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
useful rectangle filling function
static const uint8_t header[24]
static int shift(int a, int b)
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int width
picture width / height.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
int has_b_frames
Size of the frame reordering buffer in the decoder.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
int frame_number
Frame counter, set by libavcodec.
int flags
AV_CODEC_FLAG_*.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
enum AVDiscard skip_frame
Skip decoding for selected frames.
const char * name
Name of the codec implementation.
This structure describes decoded (raw) audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
This structure stores compressed data.
Context for storing H.264 DSP functions.
Context for storing H.264 prediction functions.
int prev_frame_num_offset
uint8_t * edge_emu_buffer
int16_t mb_luma_dc[3][16 *2]
unsigned int topright_samples_available
enum AVPictureType pict_type
int16_t mv_cache[2][5 *8][2]
int8_t intra4x4_pred_mode_cache[5 *8]
uint8_t non_zero_count_cache[15 *8]
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
unsigned int top_samples_available
unsigned int left_samples_available
int8_t ref_cache[2][5 *8]
int block_offset[2 *(16 *3)]
int8_t * intra4x4_pred_mode
enum AVPictureType slice_type
int16_t(*[2] motion_val_buf)[2]
int16_t(*[2] motion_val)[2]
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static const int8_t svq3_pred_1[6][6][5]
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
static void init_dequant4_coeff_table(SVQ3Context *s)
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
static av_cold int svq3_decode_end(AVCodecContext *avctx)
static av_cold int svq3_decode_init(AVCodecContext *avctx)
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
static const struct @134 svq3_dct_tables[2][16]
static const uint8_t svq3_pred_0[25][2]
static int svq3_decode_slice_header(AVCodecContext *avctx)
static const uint8_t luma_dc_zigzag_scan[16]
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
static const uint8_t svq3_scan[16]
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
static void hl_decode_mb(SVQ3Context *s)
static const uint32_t svq3_dequant_coeff[32]
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
#define avpriv_request_sample(...)
static int ref[MAX_W *MAX_W]
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
static const uint8_t offset[127][2]