95 int counts[17] = {0}, codes[17];
104 codes[0] = counts[0] = 0;
105 for (
int i = 0;
i < 16;
i++) {
106 codes[
i+1] = (codes[
i] + counts[
i]) << 1;
130 for(j = 0; j < 2; j++){
137 for(k = 0; k < 4; k++){
142 for(j = 0; j < 4; j++){
153 for(j = 0; j < 4; j++){
157 for(j = 0; j < 2; j++){
182 int pattern,
code, cbp=0;
184 static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
185 static const int shifts[4] = { 0, 2, 8, 10 };
186 const int *curshift =
shifts;
190 pattern =
code & 0xF;
200 for(
i = 0;
i < 4;
i++){
205 cbp |= cbp_masks[2] <<
i;
220 coef = 22 + ((1 << coef) |
get_bits(gb, coef));
226 *dst = (coef*q + 8) >> 4;
258 int q_dc,
int q_ac1,
int q_ac2)
281 int code, pattern, has_ac = 1;
285 pattern =
code & 0x7;
310 return has_ac | pattern;
325 for(
i = 0;
i < 5;
i++)
350 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
358 fill_rectangle(intra_types, 4, 4,
r->intra_types_stride, t,
sizeof(intra_types[0]));
367 if(
r->decode_intra_types(
r, gb, intra_types) < 0)
385 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
388 r->block_type =
r->decode_mb_info(
r);
389 if(
r->block_type == -1)
392 r->mb_type[mb_pos] =
r->block_type;
403 fill_rectangle(intra_types, 4, 4,
r->intra_types_stride, 0,
sizeof(intra_types[0]));
409 if(
IS_INTRA(
s->current_picture_ptr->mb_type[mb_pos])){
412 fill_rectangle(intra_types, 4, 4,
r->intra_types_stride, t,
sizeof(intra_types[0]));
415 if(
r->decode_intra_types(
r, gb, intra_types) < 0)
422 for(
i = 0;
i < 16;
i++)
423 intra_types[(
i & 3) + (
i>>2) *
r->intra_types_stride] = 0;
444 static const uint8_t part_sizes_w[
RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
447 static const uint8_t part_sizes_h[
RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
462 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
463 int A[2] = {0},
B[2],
C[2];
469 mv_pos += (subblock_no & 1) + (subblock_no >> 1)*
s->b8_stride;
474 A[0] =
s->current_picture_ptr->motion_val[0][mv_pos-1][0];
475 A[1] =
s->current_picture_ptr->motion_val[0][mv_pos-1][1];
478 B[0] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride][0];
479 B[1] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride][1];
485 if(avail[-4] && (avail[-1] ||
r->rv30)){
486 C[0] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride-1][0];
487 C[1] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride-1][1];
493 C[0] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride+c_off][0];
494 C[1] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride+c_off][1];
498 mx +=
r->dmv[dmv_no][0];
499 my +=
r->dmv[dmv_no][1];
502 s->current_picture_ptr->motion_val[0][mv_pos +
i + j*
s->b8_stride][0] = mx;
503 s->current_picture_ptr->motion_val[0][mv_pos +
i + j*
s->b8_stride][1] = my;
508 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
515 int mul = dir ? -
r->mv_weight2 :
r->mv_weight1;
524 int A_avail,
int B_avail,
int C_avail,
527 if(A_avail + B_avail + C_avail != 3){
528 *mx =
A[0] +
B[0] +
C[0];
529 *my =
A[1] +
B[1] +
C[1];
530 if(A_avail + B_avail + C_avail == 2){
546 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
547 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
548 int A[2] = { 0 },
B[2] = { 0 },
C[2] = { 0 };
549 int has_A = 0, has_B = 0, has_C = 0;
552 Picture *cur_pic =
s->current_picture_ptr;
562 B[0] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride][0];
563 B[1] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride][1];
566 if(
r->avail_cache[6-4] && (
r->avail_cache[6-2] &
type) &
mask){
567 C[0] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride + 2][0];
568 C[1] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride + 2][1];
570 }
else if((
s->mb_x+1) ==
s->mb_width && (
r->avail_cache[6-5] &
type) &
mask){
571 C[0] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride - 1][0];
572 C[1] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride - 1][1];
578 mx +=
r->dmv[dir][0];
579 my +=
r->dmv[dir][1];
581 for(j = 0; j < 2; j++){
582 for(
i = 0;
i < 2;
i++){
583 cur_pic->
motion_val[dir][mv_pos +
i + j*
s->b8_stride][0] = mx;
584 cur_pic->
motion_val[dir][mv_pos +
i + j*
s->b8_stride][1] = my;
598 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
599 int A[2] = {0},
B[2],
C[2];
605 A[0] =
s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
606 A[1] =
s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
609 B[0] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride][0];
610 B[1] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride][1];
616 if(avail[-4] && (avail[-1])){
617 C[0] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride - 1][0];
618 C[1] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride - 1][1];
624 C[0] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride + 2][0];
625 C[1] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride + 2][1];
631 for(j = 0; j < 2; j++){
632 for(
i = 0;
i < 2;
i++){
633 for(k = 0; k < 2; k++){
634 s->current_picture_ptr->motion_val[k][mv_pos +
i + j*
s->b8_stride][0] = mx;
635 s->current_picture_ptr->motion_val[k][mv_pos +
i + j*
s->b8_stride][1] = my;
659 const int xoff,
const int yoff,
int mv_off,
661 const int thirdpel,
int weighted,
667 int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
668 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride + mv_off;
673 int chroma_mx, chroma_my;
674 mx = (
s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
675 my = (
s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
676 lx = (
s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
677 ly = (
s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
678 chroma_mx =
s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
679 chroma_my =
s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
680 umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
681 umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
686 mx =
s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
687 my =
s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
688 lx =
s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
689 ly =
s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
690 cx =
s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
691 cy =
s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
694 uvmx = (cx & 3) << 1;
695 uvmy = (cy & 3) << 1;
697 if(uvmx == 6 && uvmy == 6)
703 int mb_row =
s->mb_y + ((yoff + my + 5 + 8 *
height) >> 4);
704 ThreadFrame *
f = dir ? &
s->next_picture_ptr->tf : &
s->last_picture_ptr->tf;
709 srcY = dir ?
s->next_picture_ptr->f->data[0] :
s->last_picture_ptr->f->data[0];
710 srcU = dir ?
s->next_picture_ptr->f->data[1] :
s->last_picture_ptr->f->data[1];
711 srcV = dir ?
s->next_picture_ptr->f->data[2] :
s->last_picture_ptr->f->data[2];
712 src_x =
s->mb_x * 16 + xoff + mx;
713 src_y =
s->mb_y * 16 + yoff + my;
714 uvsrc_x =
s->mb_x * 8 + (xoff >> 1) + umx;
715 uvsrc_y =
s->mb_y * 8 + (yoff >> 1) + umy;
716 srcY += src_y *
s->linesize + src_x;
717 srcU += uvsrc_y *
s->uvlinesize + uvsrc_x;
718 srcV += uvsrc_y *
s->uvlinesize + uvsrc_x;
719 if(
s->h_edge_pos - (
width << 3) < 6 ||
s->v_edge_pos - (
height << 3) < 6 ||
720 (
unsigned)(src_x - !!lx*2) >
s->h_edge_pos - !!lx*2 - (
width <<3) - 4 ||
721 (unsigned)(src_y - !!ly*2) >
s->v_edge_pos - !!ly*2 - (
height<<3) - 4) {
722 srcY -= 2 + 2*
s->linesize;
723 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, srcY,
724 s->linesize,
s->linesize,
726 src_x - 2, src_y - 2,
727 s->h_edge_pos,
s->v_edge_pos);
728 srcY =
s->sc.edge_emu_buffer + 2 + 2*
s->linesize;
732 Y =
s->dest[0] + xoff + yoff *
s->linesize;
733 U =
s->dest[1] + (xoff>>1) + (yoff>>1)*
s->uvlinesize;
734 V =
s->dest[2] + (xoff>>1) + (yoff>>1)*
s->uvlinesize;
736 Y =
r->tmp_b_block_y [dir] + xoff + yoff *
s->linesize;
737 U =
r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*
s->uvlinesize;
738 V =
r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*
s->uvlinesize;
742 qpel_mc[1][dxy](
Y, srcY,
s->linesize);
746 qpel_mc[1][dxy](
Y, srcY,
s->linesize);
747 Y += 8 *
s->linesize;
748 srcY += 8 *
s->linesize;
751 qpel_mc[!is16x16][dxy](
Y, srcY,
s->linesize);
753 uint8_t *uvbuf =
s->sc.edge_emu_buffer;
755 s->vdsp.emulated_edge_mc(uvbuf, srcU,
756 s->uvlinesize,
s->uvlinesize,
759 s->h_edge_pos >> 1,
s->v_edge_pos >> 1);
761 uvbuf += 9*
s->uvlinesize;
763 s->vdsp.emulated_edge_mc(uvbuf, srcV,
764 s->uvlinesize,
s->uvlinesize,
767 s->h_edge_pos >> 1,
s->v_edge_pos >> 1);
775 const int xoff,
const int yoff,
int mv_off,
778 rv34_mc(
r, block_type, xoff, yoff, mv_off,
width,
height, dir,
r->rv30, 0,
779 r->rdsp.put_pixels_tab,
780 r->rdsp.put_chroma_pixels_tab);
785 r->rdsp.rv40_weight_pixels_tab[
r->scaled_weight][0](
r->s.dest[0],
791 r->rdsp.rv40_weight_pixels_tab[
r->scaled_weight][1](
r->s.dest[1],
792 r->tmp_b_block_uv[0],
793 r->tmp_b_block_uv[2],
797 r->rdsp.rv40_weight_pixels_tab[
r->scaled_weight][1](
r->s.dest[2],
798 r->tmp_b_block_uv[1],
799 r->tmp_b_block_uv[3],
809 rv34_mc(
r, block_type, 0, 0, 0, 2, 2, 0,
r->rv30, weighted,
810 r->rdsp.put_pixels_tab,
811 r->rdsp.put_chroma_pixels_tab);
813 rv34_mc(
r, block_type, 0, 0, 0, 2, 2, 1,
r->rv30, 0,
814 r->rdsp.avg_pixels_tab,
815 r->rdsp.avg_chroma_pixels_tab);
817 rv34_mc(
r, block_type, 0, 0, 0, 2, 2, 1,
r->rv30, 1,
818 r->rdsp.put_pixels_tab,
819 r->rdsp.put_chroma_pixels_tab);
827 int weighted = !
r->rv30 &&
r->weight1 != 8192;
829 for(j = 0; j < 2; j++)
830 for(
i = 0;
i < 2;
i++){
833 r->rdsp.put_pixels_tab,
834 r->rdsp.put_chroma_pixels_tab);
837 weighted ?
r->rdsp.put_pixels_tab :
r->rdsp.avg_pixels_tab,
838 weighted ?
r->rdsp.put_chroma_pixels_tab :
r->rdsp.avg_chroma_pixels_tab);
845 static const int num_mvs[
RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
856 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
859 memset(
r->dmv, 0,
sizeof(
r->dmv));
865 r->dmv[
i][0] =
r->dmv[
i][1] = 0;
872 ZERO8x2(
s->current_picture_ptr->motion_val[0][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
876 ZERO8x2(
s->current_picture_ptr->motion_val[0][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
886 next_bt =
s->next_picture_ptr->mb_type[
s->mb_x +
s->mb_y *
s->mb_stride];
888 ZERO8x2(
s->current_picture_ptr->motion_val[0][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
889 ZERO8x2(
s->current_picture_ptr->motion_val[1][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
891 for(j = 0; j < 2; j++)
892 for(
i = 0;
i < 2;
i++)
893 for(k = 0; k < 2; k++)
894 for(l = 0; l < 2; l++)
895 s->current_picture_ptr->motion_val[l][mv_pos +
i + j*
s->b8_stride][k] =
calc_add_mv(
r, l,
s->next_picture_ptr->motion_val[0][mv_pos +
i + j*
s->b8_stride][k]);
900 ZERO8x2(
s->current_picture_ptr->motion_val[0][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
909 r->dmv[1][0] =
r->dmv[0][0];
910 r->dmv[1][1] =
r->dmv[0][1];
938 rv34_mc_1mv (
r, block_type, (
i&1)<<3, (
i&2)<<2, (
i&1)+(
i>>1)*
s->b8_stride, 1, 1, 0);
986 topleft = dst[-
stride + 3] * 0x01010101u;
989 r->h.pred4x4[itype](dst, prev,
stride);
1010 int fc,
int sc,
int q_dc,
int q_ac)
1013 int16_t *ptr =
s->block[0];
1015 fc, sc, q_dc, q_ac, q_ac);
1017 r->rdsp.rv34_idct_add(pdst,
stride, ptr);
1019 r->rdsp.rv34_idct_dc_add(pdst,
stride, ptr[0]);
1032 int16_t *ptr =
s->block[0];
1033 int i, j, itype, has_ac;
1035 memset(block16, 0, 16 *
sizeof(*block16));
1039 r->rdsp.rv34_inv_transform(block16);
1041 r->rdsp.rv34_inv_transform_dc(block16);
1044 itype =
adjust_pred16(itype,
r->avail_cache[6-4],
r->avail_cache[6-1]);
1045 r->h.pred16x16[itype](dst,
s->linesize);
1047 for(j = 0; j < 4; j++){
1048 for(
i = 0; i < 4; i++, cbp >>= 1){
1049 int dc = block16[
i + j*4];
1058 r->rdsp.rv34_idct_add(dst+4*
i,
s->linesize, ptr);
1060 r->rdsp.rv34_idct_dc_add(dst+4*
i,
s->linesize,
dc);
1063 dst += 4*
s->linesize;
1068 itype =
adjust_pred16(itype,
r->avail_cache[6-4],
r->avail_cache[6-1]);
1073 for(j = 1; j < 3; j++){
1075 r->h.pred8x8[itype](dst,
s->uvlinesize);
1076 for(
i = 0; i < 4; i++, cbp >>= 1){
1078 if(!(cbp & 1))
continue;
1079 pdst = dst + (
i&1)*4 + (
i&2)*2*
s->uvlinesize;
1082 r->chroma_vlc, 1, q_dc, q_ac);
1091 int avail[6*8] = {0};
1093 int idx, q_ac, q_dc;
1096 if(
r->avail_cache[1])
1098 if(
r->avail_cache[2])
1099 avail[1] = avail[2] = 1;
1100 if(
r->avail_cache[3])
1101 avail[3] = avail[4] = 1;
1102 if(
r->avail_cache[4])
1104 if(
r->avail_cache[5])
1105 avail[8] = avail[16] = 1;
1106 if(
r->avail_cache[9])
1107 avail[24] = avail[32] = 1;
1110 for(j = 0; j < 4; j++){
1112 for(
i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1115 if(!(cbp & 1))
continue;
1118 r->luma_vlc, 0, q_ac, q_ac);
1120 dst +=
s->linesize * 4 - 4*4;
1121 intra_types +=
r->intra_types_stride;
1124 intra_types -=
r->intra_types_stride * 4;
1129 for(k = 0; k < 2; k++){
1133 for(j = 0; j < 2; j++){
1134 int* acache =
r->avail_cache + 6 + j*4;
1135 for(
i = 0; i < 2; i++, cbp >>= 1, acache++){
1136 int itype =
ittrans[intra_types[
i*2+j*2*
r->intra_types_stride]];
1140 if(!(cbp&1))
continue;
1143 r->chroma_vlc, 1, q_dc, q_ac);
1146 dst += 4*
s->uvlinesize;
1154 d = motion_val[0][0] - motion_val[-step][0];
1157 d = motion_val[0][1] - motion_val[-step][1];
1166 int hmvmask = 0, vmvmask = 0,
i, j;
1167 int midx =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
1168 int16_t (*motion_val)[2] = &
s->current_picture_ptr->motion_val[0][midx];
1169 for(j = 0; j < 16; j += 8){
1170 for(
i = 0;
i < 2;
i++){
1172 vmvmask |= 0x11 << (j +
i*2);
1174 hmvmask |= 0x03 << (j +
i*2);
1176 motion_val +=
s->b8_stride;
1178 if(
s->first_slice_line)
1183 vmvmask |= (vmvmask & 0x4444) >> 1;
1184 hmvmask |= (hmvmask & 0x0F00) >> 4;
1186 r->deblock_coefs[
s->mb_x - 1 +
s->mb_y*
s->mb_stride] |= (vmvmask & 0x1111) << 3;
1187 if(!
s->first_slice_line)
1188 r->deblock_coefs[
s->mb_x + (
s->mb_y - 1)*
s->mb_stride] |= (hmvmask & 0xF) << 12;
1190 return hmvmask | vmvmask;
1198 int16_t *ptr =
s->block[0];
1199 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
1201 int q_dc, q_ac, has_ac;
1206 memset(
r->avail_cache, 0,
sizeof(
r->avail_cache));
1208 dist = (
s->mb_x -
s->resync_mb_x) + (
s->mb_y -
s->resync_mb_y) *
s->mb_width;
1211 r->avail_cache[9] =
s->current_picture_ptr->mb_type[mb_pos - 1];
1212 if(dist >=
s->mb_width)
1214 r->avail_cache[3] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride];
1215 if(((
s->mb_x+1) <
s->mb_width) && dist >=
s->mb_width - 1)
1216 r->avail_cache[4] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride + 1];
1217 if(
s->mb_x && dist >
s->mb_width)
1218 r->avail_cache[1] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride - 1];
1220 s->qscale =
r->si.quant;
1222 r->cbp_luma [mb_pos] = cbp;
1223 r->cbp_chroma[mb_pos] = cbp >> 16;
1225 s->current_picture_ptr->qscale_table[mb_pos] =
s->qscale;
1230 if (
IS_INTRA(
s->current_picture_ptr->mb_type[mb_pos])){
1239 memset(block16, 0, 16 *
sizeof(*block16));
1243 r->rdsp.rv34_inv_transform(block16);
1245 r->rdsp.rv34_inv_transform_dc(block16);
1249 for(j = 0; j < 4; j++){
1250 for(
i = 0; i < 4; i++, cbp >>= 1){
1251 int dc = block16[
i + j*4];
1260 r->rdsp.rv34_idct_add(dst+4*
i,
s->linesize, ptr);
1262 r->rdsp.rv34_idct_dc_add(dst+4*
i,
s->linesize,
dc);
1265 dst += 4*
s->linesize;
1272 for(j = 0; j < 4; j++){
1273 for(
i = 0; i < 4; i++, cbp >>= 1){
1274 if(!(cbp & 1))
continue;
1277 r->luma_vlc, 0, q_ac, q_ac);
1279 dst += 4*
s->linesize;
1286 for(j = 1; j < 3; j++){
1288 for(
i = 0; i < 4; i++, cbp >>= 1){
1290 if(!(cbp & 1))
continue;
1291 pdst = dst + (
i&1)*4 + (
i&2)*2*
s->uvlinesize;
1294 r->chroma_vlc, 1, q_dc, q_ac);
1305 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
1308 memset(
r->avail_cache, 0,
sizeof(
r->avail_cache));
1310 dist = (
s->mb_x -
s->resync_mb_x) + (
s->mb_y -
s->resync_mb_y) *
s->mb_width;
1313 r->avail_cache[9] =
s->current_picture_ptr->mb_type[mb_pos - 1];
1314 if(dist >=
s->mb_width)
1316 r->avail_cache[3] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride];
1317 if(((
s->mb_x+1) <
s->mb_width) && dist >=
s->mb_width - 1)
1318 r->avail_cache[4] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride + 1];
1319 if(
s->mb_x && dist >
s->mb_width)
1320 r->avail_cache[1] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride - 1];
1322 s->qscale =
r->si.quant;
1324 r->cbp_luma [mb_pos] = cbp;
1325 r->cbp_chroma[mb_pos] = cbp >> 16;
1326 r->deblock_coefs[mb_pos] = 0xFFFF;
1327 s->current_picture_ptr->qscale_table[mb_pos] =
s->qscale;
1344 if(
s->mb_y >=
s->mb_height)
1348 if(
r->s.mb_skip_run > 1)
1360 r->intra_types =
NULL;
1371 r->intra_types_stride =
r->s.mb_width * 4 + 4;
1373 r->cbp_chroma =
av_mallocz(
r->s.mb_stride *
r->s.mb_height *
1374 sizeof(*
r->cbp_chroma));
1376 sizeof(*
r->cbp_luma));
1377 r->deblock_coefs =
av_mallocz(
r->s.mb_stride *
r->s.mb_height *
1378 sizeof(*
r->deblock_coefs));
1379 r->intra_types_hist =
av_malloc(
r->intra_types_stride * 4 * 2 *
1380 sizeof(*
r->intra_types_hist));
1382 sizeof(*
r->mb_type));
1384 if (!(
r->cbp_chroma &&
r->cbp_luma &&
r->deblock_coefs &&
1385 r->intra_types_hist &&
r->mb_type)) {
1386 r->s.context_reinit = 1;
1391 r->intra_types =
r->intra_types_hist +
r->intra_types_stride * 4;
1408 int mb_pos, slice_type;
1412 res =
r->parse_slice_header(
r, gb, &
r->si);
1419 if (slice_type !=
s->pict_type) {
1423 if (
s->width !=
r->si.width ||
s->height !=
r->si.height) {
1429 s->qscale =
r->si.quant;
1430 s->mb_num_left =
r->si.end -
r->si.start;
1431 r->s.mb_skip_run = 0;
1433 mb_pos =
s->mb_x +
s->mb_y *
s->mb_width;
1434 if(
r->si.start != mb_pos){
1436 s->mb_x =
r->si.start %
s->mb_width;
1437 s->mb_y =
r->si.start /
s->mb_width;
1439 memset(
r->intra_types_hist, -1,
r->intra_types_stride * 4 * 2 *
sizeof(*
r->intra_types_hist));
1440 s->first_slice_line = 1;
1441 s->resync_mb_x =
s->mb_x;
1442 s->resync_mb_y =
s->mb_y;
1456 if (++
s->mb_x ==
s->mb_width) {
1461 memmove(
r->intra_types_hist,
r->intra_types,
r->intra_types_stride * 4 *
sizeof(*
r->intra_types_hist));
1462 memset(
r->intra_types, -1,
r->intra_types_stride * 4 *
sizeof(*
r->intra_types_hist));
1464 if(
r->loop_filter &&
s->mb_y >= 2)
1465 r->loop_filter(
r,
s->mb_y - 2);
1472 if(
s->mb_x ==
s->resync_mb_x)
1473 s->first_slice_line=0;
1478 return s->mb_y ==
s->mb_height;
1506 #if CONFIG_RV30_DECODER
1510 #if CONFIG_RV40_DECODER
1531 if (dst ==
src || !
s1->context_initialized)
1534 if (
s->height !=
s1->height ||
s->width !=
s1->width ||
s->context_reinit) {
1535 s->height =
s1->height;
1536 s->width =
s1->width;
1543 r->cur_pts = r1->cur_pts;
1544 r->last_pts = r1->last_pts;
1545 r->next_pts = r1->next_pts;
1547 memset(&
r->si, 0,
sizeof(
r->si));
1551 if (!
s1->context_initialized)
1559 if (n < slice_count) {
1570 int got_picture = 0, ret;
1580 if ((ret =
av_frame_ref(pict,
s->current_picture_ptr->f)) < 0)
1585 }
else if (
s->last_picture_ptr) {
1607 void *
data,
int *got_picture_ptr,
1611 int buf_size = avpkt->
size;
1624 if (buf_size == 0) {
1626 if (
s->low_delay==0 &&
s->next_picture_ptr) {
1629 s->next_picture_ptr =
NULL;
1631 *got_picture_ptr = 1;
1637 slice_count = (*buf++) + 1;
1638 slices_hdr = buf + 4;
1639 buf += 8 * slice_count;
1640 buf_size -= 1 + 8 * slice_count;
1646 if(offset < 0 || offset > buf_size){
1651 if(
r->parse_slice_header(
r, &
r->s.gb, &si) < 0 || si.
start){
1655 if ((!
s->last_picture_ptr || !
s->last_picture_ptr->f->data[0]) &&
1658 "reference data.\n");
1667 if (si.
start == 0) {
1668 if (
s->mb_num_left > 0 &&
s->current_picture_ptr) {
1671 if (!
s->context_reinit)
1676 if (
s->width != si.
width ||
s->height != si.
height ||
s->context_reinit) {
1686 s->width,
s->height,
s->avctx->sample_aspect_ratio,
1705 if (!
r->tmp_b_block_base) {
1708 r->tmp_b_block_base =
av_malloc(
s->linesize * 48);
1709 for (
i = 0;
i < 2;
i++)
1710 r->tmp_b_block_y[
i] =
r->tmp_b_block_base
1711 +
i * 16 *
s->linesize;
1713 r->tmp_b_block_uv[
i] =
r->tmp_b_block_base + 32 *
s->linesize
1714 + (
i >> 1) * 8 *
s->uvlinesize
1717 r->cur_pts = si.
pts;
1719 r->last_pts =
r->next_pts;
1720 r->next_pts =
r->cur_pts;
1727 r->mv_weight1 =
r->mv_weight2 =
r->weight1 =
r->weight2 = 8192;
1728 r->scaled_weight = 0;
1730 if (
FFMAX(dist0, dist1) > refdist)
1733 r->mv_weight1 = (dist0 << 14) / refdist;
1734 r->mv_weight2 = (dist1 << 14) / refdist;
1735 if((
r->mv_weight1|
r->mv_weight2) & 511){
1736 r->weight1 =
r->mv_weight1;
1737 r->weight2 =
r->mv_weight2;
1738 r->scaled_weight = 0;
1740 r->weight1 =
r->mv_weight1 >> 9;
1741 r->weight2 =
r->mv_weight2 >> 9;
1742 r->scaled_weight = 1;
1746 s->mb_x =
s->mb_y = 0;
1748 }
else if (
s->context_reinit) {
1750 "reinitialize (start MB is %d).\n", si.
start);
1755 "multithreading mode (start MB is %d).\n", si.
start);
1759 for(
i = 0;
i < slice_count;
i++){
1764 if(offset < 0 || offset > offset1 || offset1 > buf_size){
1770 r->si.end =
s->mb_width *
s->mb_height;
1771 s->mb_num_left =
r->s.mb_x +
r->s.mb_y*
r->s.mb_width -
r->si.start;
1773 if(
i+1 < slice_count){
1775 if (offset2 < offset1 || offset2 > buf_size) {
1780 if(
r->parse_slice_header(
r, &
r->s.gb, &si) < 0){
1791 if (
s->current_picture_ptr) {
1794 r->loop_filter(
r,
s->mb_height - 1);
1799 *got_picture_ptr = ret;
static double val(void *priv, double ch)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Libavcodec external API header.
#define FF_THREAD_FRAME
Decode more than one frame at once.
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
static const uint8_t shifts[2][12]
#define flags(name, subs,...)
#define fc(width, name, range_min, range_max)
static float mul(float src0, float src1)
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
void ff_er_frame_end(ERContext *s)
static void fill_rectangle(int x, int y, int w, int h)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static int get_bits_left(GetBitContext *gb)
static unsigned int get_bits1(GetBitContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int get_interleaved_se_golomb(GetBitContext *gb)
@ AVDISCARD_ALL
discard all
@ AVDISCARD_NONKEY
discard all frames except keyframes
@ AVDISCARD_NONREF
discard all non reference
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_INFO
Standard information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
@ AV_PICTURE_TYPE_I
Intra.
@ AV_PICTURE_TYPE_P
Predicted.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
#define FF_QSCALE_TYPE_MPEG1
common internal API header
static int ff_thread_once(char *control, void(*routine)(void))
static const uint16_t mask[17]
#define LOCAL_ALIGNED_16(t, v,...)
void ff_mpeg_er_frame_start(MpegEncContext *s)
#define MB_TYPE_INTRA16x16
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
void ff_mpv_common_end(MpegEncContext *s)
void ff_mpv_frame_end(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
void ff_init_block_index(MpegEncContext *s)
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
int ff_mpv_common_frame_size_change(MpegEncContext *s)
static void ff_update_block_index(MpegEncContext *s)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static const uint16_t table[]
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
useful rectangle filling function
av_cold void ff_rv30dsp_init(RV34DSPContext *c)
static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
Decode coded block pattern.
static av_cold void rv34_init_tables(void)
Initialize all tables.
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
static int adjust_pred16(int itype, int up, int left)
static void rv34_decoder_free(RV34DecContext *r)
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
static const int chroma_coeffs[3]
static void rv4_weight(RV34DecContext *r)
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
static int rv34_decoder_realloc(RV34DecContext *r)
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int *offset)
Generate VLC from codeword lengths.
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q_dc, int q_ac1, int q_ac2)
static int rv34_set_deblock_coef(RV34DecContext *r)
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
Decode 2x2 subblock of coefficients.
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
Decode a single coefficient.
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
static VLC_TYPE table_data[117592][2]
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
static void ZERO8x2(void *dst, int stride)
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
static const uint8_t avail_indexes[4]
availability index for subblocks
int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt)
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
#define GET_PTS_DIFF(a, b)
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC *vlc, int q)
Get one coefficient value from the bitstream and store it.
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
static int rv34_decoder_alloc(RV34DecContext *r)
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
static void rv34_mc_2mv_skip(RV34DecContext *r)
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
RV30 and RV40 decoder common data declarations.
@ RV34_MB_TYPE_INTRA
Intra macroblock.
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
@ RV34_MB_SKIP
Skipped block.
#define MB_TYPE_SEPARATE_DC
miscellaneous RV30/40 tables
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
void ff_rv40dsp_init(RV34DSPContext *c)
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
#define OTHERBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
#define FF_ARRAY_ELEMS(a)
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int slice_count
slice count
int has_b_frames
Size of the frame reordering buffer in the decoder.
int * slice_offset
slice offsets in the frame in bytes
enum AVDiscard skip_frame
Skip decoding for selected frames.
This structure describes decoded (raw) audio or video data.
This structure stores compressed data.
Rational number (pair of numerator and denominator).
int16_t(*[2] motion_val)[2]
uint32_t * mb_type
types and macros are defined in mpegutils.h
VLC tables used by the decoder.
VLC cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
VLC second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
VLC third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
VLC first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
VLC coefficient
VLCs used for decoding big coefficients.
essential slice information
int type
slice type (intra, inter)
VLC_TYPE(* table)[2]
code, bits
static const double coeff[2][5]
static const uint8_t offset[127][2]
static int mod(int a, int b)
Modulo operation with only positive remainders.
#define INIT_VLC_STATIC_OVERLONG