FFmpeg  4.4.5
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
43 #include "avcodec.h"
44 #include "dct.h"
45 #include "idctdsp.h"
46 #include "mpeg12.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
49 #include "h261.h"
50 #include "h263.h"
51 #include "h263data.h"
52 #include "mjpegenc_common.h"
53 #include "mathops.h"
54 #include "mpegutils.h"
55 #include "mjpegenc.h"
56 #include "speedhqenc.h"
57 #include "msmpeg4.h"
58 #include "pixblockdsp.h"
59 #include "qpeldsp.h"
60 #include "faandct.h"
61 #include "thread.h"
62 #include "aandcttab.h"
63 #include "flv.h"
64 #include "mpeg4video.h"
65 #include "internal.h"
66 #include "bytestream.h"
67 #include "wmv2.h"
68 #include "rv10.h"
69 #include "packet_internal.h"
70 #include <limits.h>
71 #include "sp5x.h"
72 
73 #define QUANT_BIAS_SHIFT 8
74 
75 #define QMAT_SHIFT_MMX 16
76 #define QMAT_SHIFT 21
77 
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
83 
86 
89  { NULL },
90 };
91 
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93  uint16_t (*qmat16)[2][64],
94  const uint16_t *quant_matrix,
95  int bias, int qmin, int qmax, int intra)
96 {
97  FDCTDSPContext *fdsp = &s->fdsp;
98  int qscale;
99  int shift = 0;
100 
101  for (qscale = qmin; qscale <= qmax; qscale++) {
102  int i;
103  int qscale2;
104 
105  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106  else qscale2 = qscale << 1;
107 
108  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
109 #if CONFIG_FAANDCT
110  fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112  fdsp->fdct == ff_jpeg_fdct_islow_10) {
113  for (i = 0; i < 64; i++) {
114  const int j = s->idsp.idct_permutation[i];
115  int64_t den = (int64_t) qscale2 * quant_matrix[j];
116  /* 16 <= qscale * quant_matrix[i] <= 7905
117  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118  * 19952 <= x <= 249205026
119  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120  * 3444240 >= (1 << 36) / (x) >= 275 */
121 
122  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
123  }
124  } else if (fdsp->fdct == ff_fdct_ifast) {
125  for (i = 0; i < 64; i++) {
126  const int j = s->idsp.idct_permutation[i];
127  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128  /* 16 <= qscale * quant_matrix[i] <= 7905
129  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130  * 19952 <= x <= 249205026
131  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132  * 3444240 >= (1 << 36) / (x) >= 275 */
133 
134  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135  }
136  } else {
137  for (i = 0; i < 64; i++) {
138  const int j = s->idsp.idct_permutation[i];
139  int64_t den = (int64_t) qscale2 * quant_matrix[j];
140  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141  * Assume x = qscale * quant_matrix[i]
142  * So 16 <= x <= 7905
143  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144  * so 32768 >= (1 << 19) / (x) >= 67 */
145  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147  // (qscale * quant_matrix[i]);
148  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
149 
150  if (qmat16[qscale][0][i] == 0 ||
151  qmat16[qscale][0][i] == 128 * 256)
152  qmat16[qscale][0][i] = 128 * 256 - 1;
153  qmat16[qscale][1][i] =
154  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155  qmat16[qscale][0][i]);
156  }
157  }
158 
159  for (i = intra; i < 64; i++) {
160  int64_t max = 8191;
161  if (fdsp->fdct == ff_fdct_ifast) {
162  max = (8191LL * ff_aanscales[i]) >> 14;
163  }
164  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
165  shift++;
166  }
167  }
168  }
169  if (shift) {
170  av_log(s->avctx, AV_LOG_INFO,
171  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
172  QMAT_SHIFT - shift);
173  }
174 }
175 
176 static inline void update_qscale(MpegEncContext *s)
177 {
178  if (s->q_scale_type == 1 && 0) {
179  int i;
180  int bestdiff=INT_MAX;
181  int best = 1;
182 
183  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
187  continue;
188  if (diff < bestdiff) {
189  bestdiff = diff;
190  best = i;
191  }
192  }
193  s->qscale = best;
194  } else {
195  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196  (FF_LAMBDA_SHIFT + 7);
197  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198  }
199 
200  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 }
203 
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
205 {
206  int i;
207 
208  if (matrix) {
209  put_bits(pb, 1, 1);
210  for (i = 0; i < 64; i++) {
211  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
212  }
213  } else
214  put_bits(pb, 1, 0);
215 }
216 
217 /**
218  * init s->current_picture.qscale_table from s->lambda_table
219  */
221 {
222  int8_t * const qscale_table = s->current_picture.qscale_table;
223  int i;
224 
225  for (i = 0; i < s->mb_num; i++) {
226  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
229  s->avctx->qmax);
230  }
231 }
232 
235 {
236 #define COPY(a) dst->a= src->a
237  COPY(pict_type);
238  COPY(current_picture);
239  COPY(f_code);
240  COPY(b_code);
241  COPY(qscale);
242  COPY(lambda);
243  COPY(lambda2);
244  COPY(picture_in_gop_number);
245  COPY(gop_picture_number);
246  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247  COPY(progressive_frame); // FIXME don't set in encode_header
248  COPY(partitioned_frame); // FIXME don't set in encode_header
249 #undef COPY
250 }
251 
252 static void mpv_encode_init_static(void)
253 {
254  for (int i = -16; i < 16; i++)
255  default_fcode_tab[i + MAX_MV] = 1;
256 }
257 
258 /**
259  * Set the given MpegEncContext to defaults for encoding.
260  * the changed fields will not depend upon the prior state of the MpegEncContext.
261  */
263 {
264  static AVOnce init_static_once = AV_ONCE_INIT;
265 
267 
268  ff_thread_once(&init_static_once, mpv_encode_init_static);
269 
270  s->me.mv_penalty = default_mv_penalty;
271  s->fcode_tab = default_fcode_tab;
272 
273  s->input_picture_number = 0;
274  s->picture_in_gop_number = 0;
275 }
276 
278 {
279  if (ARCH_X86)
281 
283  ff_h263dsp_init(&s->h263dsp);
284  if (!s->dct_quantize)
285  s->dct_quantize = ff_dct_quantize_c;
286  if (!s->denoise_dct)
287  s->denoise_dct = denoise_dct_c;
288  s->fast_dct_quantize = s->dct_quantize;
289  if (s->avctx->trellis)
290  s->dct_quantize = dct_quantize_trellis_c;
291 
292  return 0;
293 }
294 
295 /* init video encoder */
297 {
298  MpegEncContext *s = avctx->priv_data;
299  AVCPBProperties *cpb_props;
300  int i, ret, format_supported;
301 
303 
304  switch (avctx->codec_id) {
306  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
307  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
308  av_log(avctx, AV_LOG_ERROR,
309  "only YUV420 and YUV422 are supported\n");
310  return AVERROR(EINVAL);
311  }
312  break;
313  case AV_CODEC_ID_MJPEG:
314  case AV_CODEC_ID_AMV:
315  format_supported = 0;
316  /* JPEG color space */
317  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
318  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
319  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
320  (avctx->color_range == AVCOL_RANGE_JPEG &&
321  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
322  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
323  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
324  format_supported = 1;
325  /* MPEG color space */
326  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
327  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
328  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
329  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
330  format_supported = 1;
331 
332  if (!format_supported) {
333  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
334  return AVERROR(EINVAL);
335  }
336  break;
337  case AV_CODEC_ID_SPEEDHQ:
338  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
339  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
340  avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
341  av_log(avctx, AV_LOG_ERROR,
342  "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
343  return AVERROR(EINVAL);
344  }
345  break;
346  default:
347  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
348  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
349  return AVERROR(EINVAL);
350  }
351  }
352 
353  switch (avctx->pix_fmt) {
354  case AV_PIX_FMT_YUVJ444P:
355  case AV_PIX_FMT_YUV444P:
356  s->chroma_format = CHROMA_444;
357  break;
358  case AV_PIX_FMT_YUVJ422P:
359  case AV_PIX_FMT_YUV422P:
360  s->chroma_format = CHROMA_422;
361  break;
362  case AV_PIX_FMT_YUVJ420P:
363  case AV_PIX_FMT_YUV420P:
364  default:
365  s->chroma_format = CHROMA_420;
366  break;
367  }
368 
369  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
370 
371 #if FF_API_PRIVATE_OPT
373  if (avctx->rtp_payload_size)
374  s->rtp_payload_size = avctx->rtp_payload_size;
375  if (avctx->me_penalty_compensation)
376  s->me_penalty_compensation = avctx->me_penalty_compensation;
377  if (avctx->pre_me)
378  s->me_pre = avctx->pre_me;
380 #endif
381 
382  s->bit_rate = avctx->bit_rate;
383  s->width = avctx->width;
384  s->height = avctx->height;
385  if (avctx->gop_size > 600 &&
387  av_log(avctx, AV_LOG_WARNING,
388  "keyframe interval too large!, reducing it from %d to %d\n",
389  avctx->gop_size, 600);
390  avctx->gop_size = 600;
391  }
392  s->gop_size = avctx->gop_size;
393  s->avctx = avctx;
394  if (avctx->max_b_frames > MAX_B_FRAMES) {
395  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
396  "is %d.\n", MAX_B_FRAMES);
397  avctx->max_b_frames = MAX_B_FRAMES;
398  }
399  s->max_b_frames = avctx->max_b_frames;
400  s->codec_id = avctx->codec->id;
401  s->strict_std_compliance = avctx->strict_std_compliance;
402  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
403  s->rtp_mode = !!s->rtp_payload_size;
404  s->intra_dc_precision = avctx->intra_dc_precision;
405 
406  // workaround some differences between how applications specify dc precision
407  if (s->intra_dc_precision < 0) {
408  s->intra_dc_precision += 8;
409  } else if (s->intra_dc_precision >= 8)
410  s->intra_dc_precision -= 8;
411 
412  if (s->intra_dc_precision < 0) {
413  av_log(avctx, AV_LOG_ERROR,
414  "intra dc precision must be positive, note some applications use"
415  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
416  return AVERROR(EINVAL);
417  }
418 
419  if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
420  s->huffman = 0;
421 
422  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
423  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
424  return AVERROR(EINVAL);
425  }
426  s->user_specified_pts = AV_NOPTS_VALUE;
427 
428  if (s->gop_size <= 1) {
429  s->intra_only = 1;
430  s->gop_size = 12;
431  } else {
432  s->intra_only = 0;
433  }
434 
435  /* Fixed QSCALE */
436  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
437 
438  s->adaptive_quant = (avctx->lumi_masking ||
439  avctx->dark_masking ||
440  avctx->temporal_cplx_masking ||
441  avctx->spatial_cplx_masking ||
442  avctx->p_masking ||
443  s->border_masking ||
444  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
445  !s->fixed_qscale;
446 
447  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
448 
449  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
450  switch(avctx->codec_id) {
453  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
454  break;
455  case AV_CODEC_ID_MPEG4:
459  if (avctx->rc_max_rate >= 15000000) {
460  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
461  } else if(avctx->rc_max_rate >= 2000000) {
462  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
463  } else if(avctx->rc_max_rate >= 384000) {
464  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
465  } else
466  avctx->rc_buffer_size = 40;
467  avctx->rc_buffer_size *= 16384;
468  break;
469  }
470  if (avctx->rc_buffer_size) {
471  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
472  }
473  }
474 
475  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
476  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
477  return AVERROR(EINVAL);
478  }
479 
480  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
481  av_log(avctx, AV_LOG_INFO,
482  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
483  }
484 
485  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
486  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
487  return AVERROR(EINVAL);
488  }
489 
490  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
491  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
492  return AVERROR(EINVAL);
493  }
494 
495  if (avctx->rc_max_rate &&
496  avctx->rc_max_rate == avctx->bit_rate &&
497  avctx->rc_max_rate != avctx->rc_min_rate) {
498  av_log(avctx, AV_LOG_INFO,
499  "impossible bitrate constraints, this will fail\n");
500  }
501 
502  if (avctx->rc_buffer_size &&
503  avctx->bit_rate * (int64_t)avctx->time_base.num >
504  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
505  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
506  return AVERROR(EINVAL);
507  }
508 
509  if (!s->fixed_qscale &&
510  avctx->bit_rate * av_q2d(avctx->time_base) >
511  avctx->bit_rate_tolerance) {
512  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
513  av_log(avctx, AV_LOG_WARNING,
514  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
515  if (nbt <= INT_MAX) {
516  avctx->bit_rate_tolerance = nbt;
517  } else
518  avctx->bit_rate_tolerance = INT_MAX;
519  }
520 
521  if (avctx->rc_max_rate &&
522  avctx->rc_min_rate == avctx->rc_max_rate &&
523  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
524  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
525  90000LL * (avctx->rc_buffer_size - 1) >
526  avctx->rc_max_rate * 0xFFFFLL) {
527  av_log(avctx, AV_LOG_INFO,
528  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
529  "specified vbv buffer is too large for the given bitrate!\n");
530  }
531 
532  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
533  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
534  s->codec_id != AV_CODEC_ID_FLV1) {
535  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
536  return AVERROR(EINVAL);
537  }
538 
539  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
540  av_log(avctx, AV_LOG_ERROR,
541  "OBMC is only supported with simple mb decision\n");
542  return AVERROR(EINVAL);
543  }
544 
545  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
546  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
547  return AVERROR(EINVAL);
548  }
549 
550  if (s->max_b_frames &&
551  s->codec_id != AV_CODEC_ID_MPEG4 &&
552  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
553  s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
554  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
555  return AVERROR(EINVAL);
556  }
557  if (s->max_b_frames < 0) {
558  av_log(avctx, AV_LOG_ERROR,
559  "max b frames must be 0 or positive for mpegvideo based encoders\n");
560  return AVERROR(EINVAL);
561  }
562 
563  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
564  s->codec_id == AV_CODEC_ID_H263 ||
565  s->codec_id == AV_CODEC_ID_H263P) &&
566  (avctx->sample_aspect_ratio.num > 255 ||
567  avctx->sample_aspect_ratio.den > 255)) {
568  av_log(avctx, AV_LOG_WARNING,
569  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
572  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
573  }
574 
575  if ((s->codec_id == AV_CODEC_ID_H263 ||
576  s->codec_id == AV_CODEC_ID_H263P) &&
577  (avctx->width > 2048 ||
578  avctx->height > 1152 )) {
579  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
580  return AVERROR(EINVAL);
581  }
582  if ((s->codec_id == AV_CODEC_ID_H263 ||
583  s->codec_id == AV_CODEC_ID_H263P) &&
584  ((avctx->width &3) ||
585  (avctx->height&3) )) {
586  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
587  return AVERROR(EINVAL);
588  }
589 
590  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
591  (avctx->width > 4095 ||
592  avctx->height > 4095 )) {
593  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
594  return AVERROR(EINVAL);
595  }
596 
597  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
598  (avctx->width > 16383 ||
599  avctx->height > 16383 )) {
600  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
601  return AVERROR(EINVAL);
602  }
603 
604  if (s->codec_id == AV_CODEC_ID_RV10 &&
605  (avctx->width &15 ||
606  avctx->height&15 )) {
607  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
608  return AVERROR(EINVAL);
609  }
610 
611  if (s->codec_id == AV_CODEC_ID_RV20 &&
612  (avctx->width &3 ||
613  avctx->height&3 )) {
614  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
615  return AVERROR(EINVAL);
616  }
617 
618  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
619  s->codec_id == AV_CODEC_ID_WMV2) &&
620  avctx->width & 1) {
621  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
622  return AVERROR(EINVAL);
623  }
624 
626  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
627  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
628  return AVERROR(EINVAL);
629  }
630 
631 #if FF_API_PRIVATE_OPT
633  if (avctx->mpeg_quant)
634  s->mpeg_quant = 1;
636 #endif
637 
638  // FIXME mpeg2 uses that too
639  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
640  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
641  av_log(avctx, AV_LOG_ERROR,
642  "mpeg2 style quantization not supported by codec\n");
643  return AVERROR(EINVAL);
644  }
645 
646  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
647  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
648  return AVERROR(EINVAL);
649  }
650 
651  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
652  avctx->mb_decision != FF_MB_DECISION_RD) {
653  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
654  return AVERROR(EINVAL);
655  }
656 
657  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
658  (s->codec_id == AV_CODEC_ID_AMV ||
659  s->codec_id == AV_CODEC_ID_MJPEG)) {
660  // Used to produce garbage with MJPEG.
661  av_log(avctx, AV_LOG_ERROR,
662  "QP RD is no longer compatible with MJPEG or AMV\n");
663  return AVERROR(EINVAL);
664  }
665 
666 #if FF_API_PRIVATE_OPT
668  if (avctx->scenechange_threshold)
669  s->scenechange_threshold = avctx->scenechange_threshold;
671 #endif
672 
673  if (s->scenechange_threshold < 1000000000 &&
674  (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
675  av_log(avctx, AV_LOG_ERROR,
676  "closed gop with scene change detection are not supported yet, "
677  "set threshold to 1000000000\n");
678  return AVERROR_PATCHWELCOME;
679  }
680 
681  if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
682  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
683  s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
684  av_log(avctx, AV_LOG_ERROR,
685  "low delay forcing is only available for mpeg2, "
686  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
687  return AVERROR(EINVAL);
688  }
689  if (s->max_b_frames != 0) {
690  av_log(avctx, AV_LOG_ERROR,
691  "B-frames cannot be used with low delay\n");
692  return AVERROR(EINVAL);
693  }
694  }
695 
696  if (s->q_scale_type == 1) {
697  if (avctx->qmax > 28) {
698  av_log(avctx, AV_LOG_ERROR,
699  "non linear quant only supports qmax <= 28 currently\n");
700  return AVERROR_PATCHWELCOME;
701  }
702  }
703 
704  if (avctx->slices > 1 &&
705  (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
706  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
707  return AVERROR(EINVAL);
708  }
709 
710  if (avctx->thread_count > 1 &&
711  s->codec_id != AV_CODEC_ID_MPEG4 &&
712  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
713  s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
714  s->codec_id != AV_CODEC_ID_MJPEG &&
715  (s->codec_id != AV_CODEC_ID_H263P)) {
716  av_log(avctx, AV_LOG_ERROR,
717  "multi threaded encoding not supported by codec\n");
718  return AVERROR_PATCHWELCOME;
719  }
720 
721  if (avctx->thread_count < 1) {
722  av_log(avctx, AV_LOG_ERROR,
723  "automatic thread number detection not supported by codec, "
724  "patch welcome\n");
725  return AVERROR_PATCHWELCOME;
726  }
727 
728  if (!avctx->time_base.den || !avctx->time_base.num) {
729  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
730  return AVERROR(EINVAL);
731  }
732 
733 #if FF_API_PRIVATE_OPT
735  if (avctx->b_frame_strategy)
736  s->b_frame_strategy = avctx->b_frame_strategy;
737  if (avctx->b_sensitivity != 40)
738  s->b_sensitivity = avctx->b_sensitivity;
740 #endif
741 
742  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
743  av_log(avctx, AV_LOG_INFO,
744  "notice: b_frame_strategy only affects the first pass\n");
745  s->b_frame_strategy = 0;
746  }
747 
748  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
749  if (i > 1) {
750  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
751  avctx->time_base.den /= i;
752  avctx->time_base.num /= i;
753  //return -1;
754  }
755 
756  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
757  // (a + x * 3 / 8) / x
758  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
759  s->inter_quant_bias = 0;
760  } else {
761  s->intra_quant_bias = 0;
762  // (a - x / 4) / x
763  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
764  }
765 
766  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
767  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
768  return AVERROR(EINVAL);
769  }
770 
771  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
772 
773  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
774  avctx->time_base.den > (1 << 16) - 1) {
775  av_log(avctx, AV_LOG_ERROR,
776  "timebase %d/%d not supported by MPEG 4 standard, "
777  "the maximum admitted value for the timebase denominator "
778  "is %d\n", avctx->time_base.num, avctx->time_base.den,
779  (1 << 16) - 1);
780  return AVERROR(EINVAL);
781  }
782  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
783 
784  switch (avctx->codec->id) {
786  s->out_format = FMT_MPEG1;
787  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
788  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
789  break;
791  s->out_format = FMT_MPEG1;
792  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
793  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
794  s->rtp_mode = 1;
795  break;
796  case AV_CODEC_ID_MJPEG:
797  case AV_CODEC_ID_AMV:
798  s->out_format = FMT_MJPEG;
799  s->intra_only = 1; /* force intra only for jpeg */
802  if ((ret = ff_mjpeg_encode_init(s)) < 0)
803  return ret;
804  avctx->delay = 0;
805  s->low_delay = 1;
806  break;
807  case AV_CODEC_ID_SPEEDHQ:
808  s->out_format = FMT_SPEEDHQ;
809  s->intra_only = 1; /* force intra only for SHQ */
812  if ((ret = ff_speedhq_encode_init(s)) < 0)
813  return ret;
814  avctx->delay = 0;
815  s->low_delay = 1;
816  break;
817  case AV_CODEC_ID_H261:
818  if (!CONFIG_H261_ENCODER)
820  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
821  av_log(avctx, AV_LOG_ERROR,
822  "The specified picture size of %dx%d is not valid for the "
823  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
824  s->width, s->height);
825  return AVERROR(EINVAL);
826  }
827  s->out_format = FMT_H261;
828  avctx->delay = 0;
829  s->low_delay = 1;
830  s->rtp_mode = 0; /* Sliced encoding not supported */
831  break;
832  case AV_CODEC_ID_H263:
833  if (!CONFIG_H263_ENCODER)
836  s->width, s->height) == 8) {
837  av_log(avctx, AV_LOG_ERROR,
838  "The specified picture size of %dx%d is not valid for "
839  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
840  "352x288, 704x576, and 1408x1152. "
841  "Try H.263+.\n", s->width, s->height);
842  return AVERROR(EINVAL);
843  }
844  s->out_format = FMT_H263;
845  avctx->delay = 0;
846  s->low_delay = 1;
847  break;
848  case AV_CODEC_ID_H263P:
849  s->out_format = FMT_H263;
850  s->h263_plus = 1;
851  /* Fx */
852  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
853  s->modified_quant = s->h263_aic;
854  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
855  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
856 
857  /* /Fx */
858  /* These are just to be sure */
859  avctx->delay = 0;
860  s->low_delay = 1;
861  break;
862  case AV_CODEC_ID_FLV1:
863  s->out_format = FMT_H263;
864  s->h263_flv = 2; /* format = 1; 11-bit codes */
865  s->unrestricted_mv = 1;
866  s->rtp_mode = 0; /* don't allow GOB */
867  avctx->delay = 0;
868  s->low_delay = 1;
869  break;
870  case AV_CODEC_ID_RV10:
871  s->out_format = FMT_H263;
872  avctx->delay = 0;
873  s->low_delay = 1;
874  break;
875  case AV_CODEC_ID_RV20:
876  s->out_format = FMT_H263;
877  avctx->delay = 0;
878  s->low_delay = 1;
879  s->modified_quant = 1;
880  s->h263_aic = 1;
881  s->h263_plus = 1;
882  s->loop_filter = 1;
883  s->unrestricted_mv = 0;
884  break;
885  case AV_CODEC_ID_MPEG4:
886  s->out_format = FMT_H263;
887  s->h263_pred = 1;
888  s->unrestricted_mv = 1;
889  s->low_delay = s->max_b_frames ? 0 : 1;
890  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
891  break;
893  s->out_format = FMT_H263;
894  s->h263_pred = 1;
895  s->unrestricted_mv = 1;
896  s->msmpeg4_version = 2;
897  avctx->delay = 0;
898  s->low_delay = 1;
899  break;
901  s->out_format = FMT_H263;
902  s->h263_pred = 1;
903  s->unrestricted_mv = 1;
904  s->msmpeg4_version = 3;
905  s->flipflop_rounding = 1;
906  avctx->delay = 0;
907  s->low_delay = 1;
908  break;
909  case AV_CODEC_ID_WMV1:
910  s->out_format = FMT_H263;
911  s->h263_pred = 1;
912  s->unrestricted_mv = 1;
913  s->msmpeg4_version = 4;
914  s->flipflop_rounding = 1;
915  avctx->delay = 0;
916  s->low_delay = 1;
917  break;
918  case AV_CODEC_ID_WMV2:
919  s->out_format = FMT_H263;
920  s->h263_pred = 1;
921  s->unrestricted_mv = 1;
922  s->msmpeg4_version = 5;
923  s->flipflop_rounding = 1;
924  avctx->delay = 0;
925  s->low_delay = 1;
926  break;
927  default:
928  return AVERROR(EINVAL);
929  }
930 
931 #if FF_API_PRIVATE_OPT
933  if (avctx->noise_reduction)
934  s->noise_reduction = avctx->noise_reduction;
936 #endif
937 
938  avctx->has_b_frames = !s->low_delay;
939 
940  s->encoding = 1;
941 
942  s->progressive_frame =
943  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
945  s->alternate_scan);
946 
947  /* init */
949  if ((ret = ff_mpv_common_init(s)) < 0)
950  return ret;
951 
952  ff_fdctdsp_init(&s->fdsp, avctx);
953  ff_me_cmp_init(&s->mecc, avctx);
954  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
955  ff_pixblockdsp_init(&s->pdsp, avctx);
956  ff_qpeldsp_init(&s->qdsp);
957 
958  if (s->msmpeg4_version) {
959  int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
960  if (!(s->ac_stats = av_mallocz(ac_stats_size)))
961  return AVERROR(ENOMEM);
962  }
963 
964  if (!(avctx->stats_out = av_mallocz(256)) ||
965  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
966  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
967  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
968  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
969  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
970  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
971  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
972  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
973  return AVERROR(ENOMEM);
974 
975  if (s->noise_reduction) {
976  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
977  return AVERROR(ENOMEM);
978  }
979 
981 
982  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
983  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
984 
985  if (s->slice_context_count > 1) {
986  s->rtp_mode = 1;
987 
988  if (avctx->codec_id == AV_CODEC_ID_H263P)
989  s->h263_slice_structured = 1;
990  }
991 
992  s->quant_precision = 5;
993 
994 #if FF_API_PRIVATE_OPT
996  if (avctx->frame_skip_threshold)
997  s->frame_skip_threshold = avctx->frame_skip_threshold;
998  if (avctx->frame_skip_factor)
999  s->frame_skip_factor = avctx->frame_skip_factor;
1000  if (avctx->frame_skip_exp)
1001  s->frame_skip_exp = avctx->frame_skip_exp;
1002  if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
1003  s->frame_skip_cmp = avctx->frame_skip_cmp;
1005 #endif
1006 
1007  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
1008  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1009 
1010  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1012  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1014  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1017  && s->out_format == FMT_MPEG1)
1019 
1020  /* init q matrix */
1021  for (i = 0; i < 64; i++) {
1022  int j = s->idsp.idct_permutation[i];
1023  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1024  s->mpeg_quant) {
1025  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1026  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1027  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1028  s->intra_matrix[j] =
1029  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1030  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1031  s->intra_matrix[j] =
1032  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1033  } else {
1034  /* MPEG-1/2 */
1035  s->chroma_intra_matrix[j] =
1036  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1037  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1038  }
1039  if (avctx->intra_matrix)
1040  s->intra_matrix[j] = avctx->intra_matrix[i];
1041  if (avctx->inter_matrix)
1042  s->inter_matrix[j] = avctx->inter_matrix[i];
1043  }
1044 
1045  /* precompute matrix */
1046  /* for mjpeg, we do include qscale in the matrix */
1047  if (s->out_format != FMT_MJPEG) {
1048  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1049  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1050  31, 1);
1051  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1052  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1053  31, 0);
1054  }
1055 
1056  if ((ret = ff_rate_control_init(s)) < 0)
1057  return ret;
1058 
1059 #if FF_API_PRIVATE_OPT
1061  if (avctx->brd_scale)
1062  s->brd_scale = avctx->brd_scale;
1063 
1064  if (avctx->prediction_method)
1065  s->pred = avctx->prediction_method + 1;
1067 #endif
1068 
1069  if (s->b_frame_strategy == 2) {
1070  for (i = 0; i < s->max_b_frames + 2; i++) {
1071  s->tmp_frames[i] = av_frame_alloc();
1072  if (!s->tmp_frames[i])
1073  return AVERROR(ENOMEM);
1074 
1075  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1076  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1077  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1078 
1079  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1080  if (ret < 0)
1081  return ret;
1082  }
1083  }
1084 
1085  cpb_props = ff_add_cpb_side_data(avctx);
1086  if (!cpb_props)
1087  return AVERROR(ENOMEM);
1088  cpb_props->max_bitrate = avctx->rc_max_rate;
1089  cpb_props->min_bitrate = avctx->rc_min_rate;
1090  cpb_props->avg_bitrate = avctx->bit_rate;
1091  cpb_props->buffer_size = avctx->rc_buffer_size;
1092 
1093  return 0;
1094 }
1095 
1097 {
1098  MpegEncContext *s = avctx->priv_data;
1099  int i;
1100 
1102 
1104  if (CONFIG_MJPEG_ENCODER &&
1105  s->out_format == FMT_MJPEG)
1107 
1108  av_freep(&avctx->extradata);
1109 
1110  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1111  av_frame_free(&s->tmp_frames[i]);
1112 
1113  ff_free_picture_tables(&s->new_picture);
1114  ff_mpeg_unref_picture(avctx, &s->new_picture);
1115 
1116  av_freep(&avctx->stats_out);
1117  av_freep(&s->ac_stats);
1118 
1119  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1120  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1121  s->q_chroma_intra_matrix= NULL;
1122  s->q_chroma_intra_matrix16= NULL;
1123  av_freep(&s->q_intra_matrix);
1124  av_freep(&s->q_inter_matrix);
1125  av_freep(&s->q_intra_matrix16);
1126  av_freep(&s->q_inter_matrix16);
1127  av_freep(&s->input_picture);
1128  av_freep(&s->reordered_input_picture);
1129  av_freep(&s->dct_offset);
1130 
1131  return 0;
1132 }
1133 
1134 static int get_sae(uint8_t *src, int ref, int stride)
1135 {
1136  int x,y;
1137  int acc = 0;
1138 
1139  for (y = 0; y < 16; y++) {
1140  for (x = 0; x < 16; x++) {
1141  acc += FFABS(src[x + y * stride] - ref);
1142  }
1143  }
1144 
1145  return acc;
1146 }
1147 
1149  uint8_t *ref, int stride)
1150 {
1151  int x, y, w, h;
1152  int acc = 0;
1153 
1154  w = s->width & ~15;
1155  h = s->height & ~15;
1156 
1157  for (y = 0; y < h; y += 16) {
1158  for (x = 0; x < w; x += 16) {
1159  int offset = x + y * stride;
1160  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1161  stride, 16);
1162  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1163  int sae = get_sae(src + offset, mean, stride);
1164 
1165  acc += sae + 500 < sad;
1166  }
1167  }
1168  return acc;
1169 }
1170 
1171 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1172 {
1173  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1174  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1175  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1176  &s->linesize, &s->uvlinesize);
1177 }
1178 
1179 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1180 {
1181  Picture *pic = NULL;
1182  int64_t pts;
1183  int i, display_picture_number = 0, ret;
1184  int encoding_delay = s->max_b_frames ? s->max_b_frames
1185  : (s->low_delay ? 0 : 1);
1186  int flush_offset = 1;
1187  int direct = 1;
1188 
1189  if (pic_arg) {
1190  pts = pic_arg->pts;
1191  display_picture_number = s->input_picture_number++;
1192 
1193  if (pts != AV_NOPTS_VALUE) {
1194  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1195  int64_t last = s->user_specified_pts;
1196 
1197  if (pts <= last) {
1198  av_log(s->avctx, AV_LOG_ERROR,
1199  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1200  pts, last);
1201  return AVERROR(EINVAL);
1202  }
1203 
1204  if (!s->low_delay && display_picture_number == 1)
1205  s->dts_delta = pts - last;
1206  }
1207  s->user_specified_pts = pts;
1208  } else {
1209  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1210  s->user_specified_pts =
1211  pts = s->user_specified_pts + 1;
1212  av_log(s->avctx, AV_LOG_INFO,
1213  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1214  pts);
1215  } else {
1216  pts = display_picture_number;
1217  }
1218  }
1219 
1220  if (!pic_arg->buf[0] ||
1221  pic_arg->linesize[0] != s->linesize ||
1222  pic_arg->linesize[1] != s->uvlinesize ||
1223  pic_arg->linesize[2] != s->uvlinesize)
1224  direct = 0;
1225  if ((s->width & 15) || (s->height & 15))
1226  direct = 0;
1227  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1228  direct = 0;
1229  if (s->linesize & (STRIDE_ALIGN-1))
1230  direct = 0;
1231 
1232  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1233  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1234 
1235  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1236  if (i < 0)
1237  return i;
1238 
1239  pic = &s->picture[i];
1240  pic->reference = 3;
1241 
1242  if (direct) {
1243  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1244  return ret;
1245  }
1246  ret = alloc_picture(s, pic, direct);
1247  if (ret < 0)
1248  return ret;
1249 
1250  if (!direct) {
1251  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1252  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1253  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1254  // empty
1255  } else {
1256  int h_chroma_shift, v_chroma_shift;
1257  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1258  &h_chroma_shift,
1259  &v_chroma_shift);
1260 
1261  for (i = 0; i < 3; i++) {
1262  ptrdiff_t src_stride = pic_arg->linesize[i];
1263  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1264  int h_shift = i ? h_chroma_shift : 0;
1265  int v_shift = i ? v_chroma_shift : 0;
1266  int w = AV_CEIL_RSHIFT(s->width , h_shift);
1267  int h = AV_CEIL_RSHIFT(s->height, v_shift);
1268  uint8_t *src = pic_arg->data[i];
1269  uint8_t *dst = pic->f->data[i];
1270  int vpad = 16;
1271 
1272  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1273  && !s->progressive_sequence
1274  && FFALIGN(s->height, 32) - s->height > 16)
1275  vpad = 32;
1276 
1277  if (!s->avctx->rc_buffer_size)
1278  dst += INPLACE_OFFSET;
1279 
1280  if (src_stride == dst_stride)
1281  memcpy(dst, src, src_stride * h - src_stride + w);
1282  else {
1283  int h2 = h;
1284  uint8_t *dst2 = dst;
1285  while (h2--) {
1286  memcpy(dst2, src, w);
1287  dst2 += dst_stride;
1288  src += src_stride;
1289  }
1290  }
1291  if ((s->width & 15) || (s->height & (vpad-1))) {
1292  s->mpvencdsp.draw_edges(dst, dst_stride,
1293  w, h,
1294  16 >> h_shift,
1295  vpad >> v_shift,
1296  EDGE_BOTTOM);
1297  }
1298  }
1299  emms_c();
1300  }
1301  }
1302  ret = av_frame_copy_props(pic->f, pic_arg);
1303  if (ret < 0)
1304  return ret;
1305 
1306  pic->f->display_picture_number = display_picture_number;
1307  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1308  } else {
1309  /* Flushing: When we have not received enough input frames,
1310  * ensure s->input_picture[0] contains the first picture */
1311  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1312  if (s->input_picture[flush_offset])
1313  break;
1314 
1315  if (flush_offset <= 1)
1316  flush_offset = 1;
1317  else
1318  encoding_delay = encoding_delay - flush_offset + 1;
1319  }
1320 
1321  /* shift buffer entries */
1322  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1323  s->input_picture[i - flush_offset] = s->input_picture[i];
1324  for (int i = MAX_B_FRAMES + 1 - flush_offset; i <= MAX_B_FRAMES; i++)
1325  s->input_picture[i] = NULL;
1326 
1327  s->input_picture[encoding_delay] = (Picture*) pic;
1328 
1329  return 0;
1330 }
1331 
1333 {
1334  int x, y, plane;
1335  int score = 0;
1336  int64_t score64 = 0;
1337 
1338  for (plane = 0; plane < 3; plane++) {
1339  const int stride = p->f->linesize[plane];
1340  const int bw = plane ? 1 : 2;
1341  for (y = 0; y < s->mb_height * bw; y++) {
1342  for (x = 0; x < s->mb_width * bw; x++) {
1343  int off = p->shared ? 0 : 16;
1344  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1345  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1346  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1347 
1348  switch (FFABS(s->frame_skip_exp)) {
1349  case 0: score = FFMAX(score, v); break;
1350  case 1: score += FFABS(v); break;
1351  case 2: score64 += v * (int64_t)v; break;
1352  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1353  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1354  }
1355  }
1356  }
1357  }
1358  emms_c();
1359 
1360  if (score)
1361  score64 = score;
1362  if (s->frame_skip_exp < 0)
1363  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1364  -1.0/s->frame_skip_exp);
1365 
1366  if (score64 < s->frame_skip_threshold)
1367  return 1;
1368  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1369  return 1;
1370  return 0;
1371 }
1372 
1374 {
1375  int ret;
1376  int size = 0;
1377 
1378  ret = avcodec_send_frame(c, frame);
1379  if (ret < 0)
1380  return ret;
1381 
1382  do {
1383  ret = avcodec_receive_packet(c, pkt);
1384  if (ret >= 0) {
1385  size += pkt->size;
1387  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1388  return ret;
1389  } while (ret >= 0);
1390 
1391  return size;
1392 }
1393 
1395 {
1396  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1397  AVPacket *pkt;
1398  const int scale = s->brd_scale;
1399  int width = s->width >> scale;
1400  int height = s->height >> scale;
1401  int i, j, out_size, p_lambda, b_lambda, lambda2;
1402  int64_t best_rd = INT64_MAX;
1403  int best_b_count = -1;
1404  int ret = 0;
1405 
1406  av_assert0(scale >= 0 && scale <= 3);
1407 
1408  pkt = av_packet_alloc();
1409  if (!pkt)
1410  return AVERROR(ENOMEM);
1411 
1412  //emms_c();
1413  //s->next_picture_ptr->quality;
1414  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1415  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1416  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1417  if (!b_lambda) // FIXME we should do this somewhere else
1418  b_lambda = p_lambda;
1419  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1421 
1422  for (i = 0; i < s->max_b_frames + 2; i++) {
1423  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1424  s->next_picture_ptr;
1425  uint8_t *data[4];
1426 
1427  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1428  pre_input = *pre_input_ptr;
1429  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1430 
1431  if (!pre_input.shared && i) {
1432  data[0] += INPLACE_OFFSET;
1433  data[1] += INPLACE_OFFSET;
1434  data[2] += INPLACE_OFFSET;
1435  }
1436 
1437  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1438  s->tmp_frames[i]->linesize[0],
1439  data[0],
1440  pre_input.f->linesize[0],
1441  width, height);
1442  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1443  s->tmp_frames[i]->linesize[1],
1444  data[1],
1445  pre_input.f->linesize[1],
1446  width >> 1, height >> 1);
1447  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1448  s->tmp_frames[i]->linesize[2],
1449  data[2],
1450  pre_input.f->linesize[2],
1451  width >> 1, height >> 1);
1452  }
1453  }
1454 
1455  for (j = 0; j < s->max_b_frames + 1; j++) {
1456  AVCodecContext *c;
1457  int64_t rd = 0;
1458 
1459  if (!s->input_picture[j])
1460  break;
1461 
1463  if (!c) {
1464  ret = AVERROR(ENOMEM);
1465  goto fail;
1466  }
1467 
1468  c->width = width;
1469  c->height = height;
1471  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1472  c->mb_decision = s->avctx->mb_decision;
1473  c->me_cmp = s->avctx->me_cmp;
1474  c->mb_cmp = s->avctx->mb_cmp;
1475  c->me_sub_cmp = s->avctx->me_sub_cmp;
1476  c->pix_fmt = AV_PIX_FMT_YUV420P;
1477  c->time_base = s->avctx->time_base;
1478  c->max_b_frames = s->max_b_frames;
1479 
1480  ret = avcodec_open2(c, codec, NULL);
1481  if (ret < 0)
1482  goto fail;
1483 
1484 
1485  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1486  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1487 
1488  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1489  if (out_size < 0) {
1490  ret = out_size;
1491  goto fail;
1492  }
1493 
1494  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1495 
1496  for (i = 0; i < s->max_b_frames + 1; i++) {
1497  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1498 
1499  s->tmp_frames[i + 1]->pict_type = is_p ?
1501  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1502 
1503  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1504  if (out_size < 0) {
1505  ret = out_size;
1506  goto fail;
1507  }
1508 
1509  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1510  }
1511 
1512  /* get the delayed frames */
1514  if (out_size < 0) {
1515  ret = out_size;
1516  goto fail;
1517  }
1518  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1519 
1520  rd += c->error[0] + c->error[1] + c->error[2];
1521 
1522  if (rd < best_rd) {
1523  best_rd = rd;
1524  best_b_count = j;
1525  }
1526 
1527 fail:
1530  if (ret < 0) {
1531  best_b_count = ret;
1532  break;
1533  }
1534  }
1535 
1536  av_packet_free(&pkt);
1537 
1538  return best_b_count;
1539 }
1540 
1542 {
1543  int i, ret;
1544 
1545  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1546  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1547  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1548 
1549  /* set next picture type & ordering */
1550  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1551  if (s->frame_skip_threshold || s->frame_skip_factor) {
1552  if (s->picture_in_gop_number < s->gop_size &&
1553  s->next_picture_ptr &&
1554  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1555  // FIXME check that the gop check above is +-1 correct
1556  av_frame_unref(s->input_picture[0]->f);
1557 
1558  ff_vbv_update(s, 0);
1559 
1560  goto no_output_pic;
1561  }
1562  }
1563 
1564  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1565  !s->next_picture_ptr || s->intra_only) {
1566  s->reordered_input_picture[0] = s->input_picture[0];
1567  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1568  s->reordered_input_picture[0]->f->coded_picture_number =
1569  s->coded_picture_number++;
1570  } else {
1571  int b_frames = 0;
1572 
1573  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1574  for (i = 0; i < s->max_b_frames + 1; i++) {
1575  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1576 
1577  if (pict_num >= s->rc_context.num_entries)
1578  break;
1579  if (!s->input_picture[i]) {
1580  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1581  break;
1582  }
1583 
1584  s->input_picture[i]->f->pict_type =
1585  s->rc_context.entry[pict_num].new_pict_type;
1586  }
1587  }
1588 
1589  if (s->b_frame_strategy == 0) {
1590  b_frames = s->max_b_frames;
1591  while (b_frames && !s->input_picture[b_frames])
1592  b_frames--;
1593  } else if (s->b_frame_strategy == 1) {
1594  for (i = 1; i < s->max_b_frames + 1; i++) {
1595  if (s->input_picture[i] &&
1596  s->input_picture[i]->b_frame_score == 0) {
1597  s->input_picture[i]->b_frame_score =
1599  s->input_picture[i ]->f->data[0],
1600  s->input_picture[i - 1]->f->data[0],
1601  s->linesize) + 1;
1602  }
1603  }
1604  for (i = 0; i < s->max_b_frames + 1; i++) {
1605  if (!s->input_picture[i] ||
1606  s->input_picture[i]->b_frame_score - 1 >
1607  s->mb_num / s->b_sensitivity)
1608  break;
1609  }
1610 
1611  b_frames = FFMAX(0, i - 1);
1612 
1613  /* reset scores */
1614  for (i = 0; i < b_frames + 1; i++) {
1615  s->input_picture[i]->b_frame_score = 0;
1616  }
1617  } else if (s->b_frame_strategy == 2) {
1618  b_frames = estimate_best_b_count(s);
1619  if (b_frames < 0)
1620  return b_frames;
1621  }
1622 
1623  emms_c();
1624 
1625  for (i = b_frames - 1; i >= 0; i--) {
1626  int type = s->input_picture[i]->f->pict_type;
1627  if (type && type != AV_PICTURE_TYPE_B)
1628  b_frames = i;
1629  }
1630  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1631  b_frames == s->max_b_frames) {
1632  av_log(s->avctx, AV_LOG_ERROR,
1633  "warning, too many B-frames in a row\n");
1634  }
1635 
1636  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1637  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1638  s->gop_size > s->picture_in_gop_number) {
1639  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1640  } else {
1641  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1642  b_frames = 0;
1643  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1644  }
1645  }
1646 
1647  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1648  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1649  b_frames--;
1650 
1651  s->reordered_input_picture[0] = s->input_picture[b_frames];
1652  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1653  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1654  s->reordered_input_picture[0]->f->coded_picture_number =
1655  s->coded_picture_number++;
1656  for (i = 0; i < b_frames; i++) {
1657  s->reordered_input_picture[i + 1] = s->input_picture[i];
1658  s->reordered_input_picture[i + 1]->f->pict_type =
1660  s->reordered_input_picture[i + 1]->f->coded_picture_number =
1661  s->coded_picture_number++;
1662  }
1663  }
1664  }
1665 no_output_pic:
1666  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1667 
1668  if (s->reordered_input_picture[0]) {
1669  s->reordered_input_picture[0]->reference =
1670  s->reordered_input_picture[0]->f->pict_type !=
1671  AV_PICTURE_TYPE_B ? 3 : 0;
1672 
1673  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1674  return ret;
1675 
1676  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1677  // input is a shared pix, so we can't modify it -> allocate a new
1678  // one & ensure that the shared one is reuseable
1679 
1680  Picture *pic;
1681  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1682  if (i < 0)
1683  return i;
1684  pic = &s->picture[i];
1685 
1686  pic->reference = s->reordered_input_picture[0]->reference;
1687  if (alloc_picture(s, pic, 0) < 0) {
1688  return -1;
1689  }
1690 
1691  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1692  if (ret < 0)
1693  return ret;
1694 
1695  /* mark us unused / free shared pic */
1696  av_frame_unref(s->reordered_input_picture[0]->f);
1697  s->reordered_input_picture[0]->shared = 0;
1698 
1699  s->current_picture_ptr = pic;
1700  } else {
1701  // input is not a shared pix -> reuse buffer for current_pix
1702  s->current_picture_ptr = s->reordered_input_picture[0];
1703  for (i = 0; i < 4; i++) {
1704  if (s->new_picture.f->data[i])
1705  s->new_picture.f->data[i] += INPLACE_OFFSET;
1706  }
1707  }
1708  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1709  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1710  s->current_picture_ptr)) < 0)
1711  return ret;
1712 
1713  s->picture_number = s->new_picture.f->display_picture_number;
1714  }
1715  return 0;
1716 }
1717 
1719 {
1720  if (s->unrestricted_mv &&
1721  s->current_picture.reference &&
1722  !s->intra_only) {
1723  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1724  int hshift = desc->log2_chroma_w;
1725  int vshift = desc->log2_chroma_h;
1726  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1727  s->current_picture.f->linesize[0],
1728  s->h_edge_pos, s->v_edge_pos,
1730  EDGE_TOP | EDGE_BOTTOM);
1731  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1732  s->current_picture.f->linesize[1],
1733  s->h_edge_pos >> hshift,
1734  s->v_edge_pos >> vshift,
1735  EDGE_WIDTH >> hshift,
1736  EDGE_WIDTH >> vshift,
1737  EDGE_TOP | EDGE_BOTTOM);
1738  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1739  s->current_picture.f->linesize[2],
1740  s->h_edge_pos >> hshift,
1741  s->v_edge_pos >> vshift,
1742  EDGE_WIDTH >> hshift,
1743  EDGE_WIDTH >> vshift,
1744  EDGE_TOP | EDGE_BOTTOM);
1745  }
1746 
1747  emms_c();
1748 
1749  s->last_pict_type = s->pict_type;
1750  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1751  if (s->pict_type!= AV_PICTURE_TYPE_B)
1752  s->last_non_b_pict_type = s->pict_type;
1753 
1754 #if FF_API_CODED_FRAME
1756  av_frame_unref(s->avctx->coded_frame);
1757  av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1759 #endif
1760 #if FF_API_ERROR_FRAME
1762  memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1763  sizeof(s->current_picture.encoding_error));
1765 #endif
1766 }
1767 
1769 {
1770  int intra, i;
1771 
1772  for (intra = 0; intra < 2; intra++) {
1773  if (s->dct_count[intra] > (1 << 16)) {
1774  for (i = 0; i < 64; i++) {
1775  s->dct_error_sum[intra][i] >>= 1;
1776  }
1777  s->dct_count[intra] >>= 1;
1778  }
1779 
1780  for (i = 0; i < 64; i++) {
1781  s->dct_offset[intra][i] = (s->noise_reduction *
1782  s->dct_count[intra] +
1783  s->dct_error_sum[intra][i] / 2) /
1784  (s->dct_error_sum[intra][i] + 1);
1785  }
1786  }
1787 }
1788 
1790 {
1791  int ret;
1792 
1793  /* mark & release old frames */
1794  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1795  s->last_picture_ptr != s->next_picture_ptr &&
1796  s->last_picture_ptr->f->buf[0]) {
1797  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1798  }
1799 
1800  s->current_picture_ptr->f->pict_type = s->pict_type;
1801  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1802 
1803  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1804  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1805  s->current_picture_ptr)) < 0)
1806  return ret;
1807 
1808  if (s->pict_type != AV_PICTURE_TYPE_B) {
1809  s->last_picture_ptr = s->next_picture_ptr;
1810  if (!s->droppable)
1811  s->next_picture_ptr = s->current_picture_ptr;
1812  }
1813 
1814  if (s->last_picture_ptr) {
1815  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1816  if (s->last_picture_ptr->f->buf[0] &&
1817  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1818  s->last_picture_ptr)) < 0)
1819  return ret;
1820  }
1821  if (s->next_picture_ptr) {
1822  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1823  if (s->next_picture_ptr->f->buf[0] &&
1824  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1825  s->next_picture_ptr)) < 0)
1826  return ret;
1827  }
1828 
1829  if (s->picture_structure!= PICT_FRAME) {
1830  int i;
1831  for (i = 0; i < 4; i++) {
1832  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1833  s->current_picture.f->data[i] +=
1834  s->current_picture.f->linesize[i];
1835  }
1836  s->current_picture.f->linesize[i] *= 2;
1837  s->last_picture.f->linesize[i] *= 2;
1838  s->next_picture.f->linesize[i] *= 2;
1839  }
1840  }
1841 
1842  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1843  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1844  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1845  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1846  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1847  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1848  } else {
1849  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1850  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1851  }
1852 
1853  if (s->dct_error_sum) {
1854  av_assert2(s->noise_reduction && s->encoding);
1856  }
1857 
1858  return 0;
1859 }
1860 
1862  const AVFrame *pic_arg, int *got_packet)
1863 {
1864  MpegEncContext *s = avctx->priv_data;
1865  int i, stuffing_count, ret;
1866  int context_count = s->slice_context_count;
1867 
1868  s->vbv_ignore_qmax = 0;
1869 
1870  s->picture_in_gop_number++;
1871 
1872  if (load_input_picture(s, pic_arg) < 0)
1873  return -1;
1874 
1875  if (select_input_picture(s) < 0) {
1876  return -1;
1877  }
1878 
1879  /* output? */
1880  if (s->new_picture.f->data[0]) {
1881  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1882  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1883  :
1884  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1885  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1886  return ret;
1887  if (s->mb_info) {
1888  s->mb_info_ptr = av_packet_new_side_data(pkt,
1890  s->mb_width*s->mb_height*12);
1891  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1892  }
1893 
1894  for (i = 0; i < context_count; i++) {
1895  int start_y = s->thread_context[i]->start_mb_y;
1896  int end_y = s->thread_context[i]-> end_mb_y;
1897  int h = s->mb_height;
1898  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1899  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1900 
1901  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1902  }
1903 
1904  s->pict_type = s->new_picture.f->pict_type;
1905  //emms_c();
1906  ret = frame_start(s);
1907  if (ret < 0)
1908  return ret;
1909 vbv_retry:
1910  ret = encode_picture(s, s->picture_number);
1911  if (growing_buffer) {
1912  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1913  pkt->data = s->pb.buf;
1914  pkt->size = avctx->internal->byte_buffer_size;
1915  }
1916  if (ret < 0)
1917  return -1;
1918 
1919 #if FF_API_STAT_BITS
1921  avctx->header_bits = s->header_bits;
1922  avctx->mv_bits = s->mv_bits;
1923  avctx->misc_bits = s->misc_bits;
1924  avctx->i_tex_bits = s->i_tex_bits;
1925  avctx->p_tex_bits = s->p_tex_bits;
1926  avctx->i_count = s->i_count;
1927  // FIXME f/b_count in avctx
1928  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1929  avctx->skip_count = s->skip_count;
1931 #endif
1932 
1933  frame_end(s);
1934 
1935  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1936  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1937 
1938  if (avctx->rc_buffer_size) {
1939  RateControlContext *rcc = &s->rc_context;
1940  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1941  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1942  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1943 
1944  if (put_bits_count(&s->pb) > max_size &&
1945  s->lambda < s->lmax) {
1946  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1947  (s->qscale + 1) / s->qscale);
1948  if (s->adaptive_quant) {
1949  int i;
1950  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1951  s->lambda_table[i] =
1952  FFMAX(s->lambda_table[i] + min_step,
1953  s->lambda_table[i] * (s->qscale + 1) /
1954  s->qscale);
1955  }
1956  s->mb_skipped = 0; // done in frame_start()
1957  // done in encode_picture() so we must undo it
1958  if (s->pict_type == AV_PICTURE_TYPE_P) {
1959  if (s->flipflop_rounding ||
1960  s->codec_id == AV_CODEC_ID_H263P ||
1961  s->codec_id == AV_CODEC_ID_MPEG4)
1962  s->no_rounding ^= 1;
1963  }
1964  if (s->pict_type != AV_PICTURE_TYPE_B) {
1965  s->time_base = s->last_time_base;
1966  s->last_non_b_time = s->time - s->pp_time;
1967  }
1968  for (i = 0; i < context_count; i++) {
1969  PutBitContext *pb = &s->thread_context[i]->pb;
1970  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1971  }
1972  s->vbv_ignore_qmax = 1;
1973  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1974  goto vbv_retry;
1975  }
1976 
1977  av_assert0(avctx->rc_max_rate);
1978  }
1979 
1980  if (avctx->flags & AV_CODEC_FLAG_PASS1)
1982 
1983  for (i = 0; i < 4; i++) {
1984  s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1985  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1986  }
1987  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1988  s->current_picture_ptr->encoding_error,
1989  (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1990  s->pict_type);
1991 
1992  if (avctx->flags & AV_CODEC_FLAG_PASS1)
1993  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1994  s->misc_bits + s->i_tex_bits +
1995  s->p_tex_bits);
1996  flush_put_bits(&s->pb);
1997  s->frame_bits = put_bits_count(&s->pb);
1998 
1999  stuffing_count = ff_vbv_update(s, s->frame_bits);
2000  s->stuffing_bits = 8*stuffing_count;
2001  if (stuffing_count) {
2002  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2003  stuffing_count + 50) {
2004  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2005  return -1;
2006  }
2007 
2008  switch (s->codec_id) {
2011  while (stuffing_count--) {
2012  put_bits(&s->pb, 8, 0);
2013  }
2014  break;
2015  case AV_CODEC_ID_MPEG4:
2016  put_bits(&s->pb, 16, 0);
2017  put_bits(&s->pb, 16, 0x1C3);
2018  stuffing_count -= 4;
2019  while (stuffing_count--) {
2020  put_bits(&s->pb, 8, 0xFF);
2021  }
2022  break;
2023  default:
2024  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2025  s->stuffing_bits = 0;
2026  }
2027  flush_put_bits(&s->pb);
2028  s->frame_bits = put_bits_count(&s->pb);
2029  }
2030 
2031  /* update MPEG-1/2 vbv_delay for CBR */
2032  if (avctx->rc_max_rate &&
2033  avctx->rc_min_rate == avctx->rc_max_rate &&
2034  s->out_format == FMT_MPEG1 &&
2035  90000LL * (avctx->rc_buffer_size - 1) <=
2036  avctx->rc_max_rate * 0xFFFFLL) {
2037  AVCPBProperties *props;
2038  size_t props_size;
2039 
2040  int vbv_delay, min_delay;
2041  double inbits = avctx->rc_max_rate *
2042  av_q2d(avctx->time_base);
2043  int minbits = s->frame_bits - 8 *
2044  (s->vbv_delay_ptr - s->pb.buf - 1);
2045  double bits = s->rc_context.buffer_index + minbits - inbits;
2046 
2047  if (bits < 0)
2048  av_log(avctx, AV_LOG_ERROR,
2049  "Internal error, negative bits\n");
2050 
2051  av_assert1(s->repeat_first_field == 0);
2052 
2053  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2054  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2055  avctx->rc_max_rate;
2056 
2057  vbv_delay = FFMAX(vbv_delay, min_delay);
2058 
2059  av_assert0(vbv_delay < 0xFFFF);
2060 
2061  s->vbv_delay_ptr[0] &= 0xF8;
2062  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2063  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2064  s->vbv_delay_ptr[2] &= 0x07;
2065  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2066 
2067  props = av_cpb_properties_alloc(&props_size);
2068  if (!props)
2069  return AVERROR(ENOMEM);
2070  props->vbv_delay = vbv_delay * 300;
2071 
2073  (uint8_t*)props, props_size);
2074  if (ret < 0) {
2075  av_freep(&props);
2076  return ret;
2077  }
2078 
2079 #if FF_API_VBV_DELAY
2081  avctx->vbv_delay = vbv_delay * 300;
2083 #endif
2084  }
2085  s->total_bits += s->frame_bits;
2086 #if FF_API_STAT_BITS
2088  avctx->frame_bits = s->frame_bits;
2090 #endif
2091 
2092 
2093  pkt->pts = s->current_picture.f->pts;
2094  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2095  if (!s->current_picture.f->coded_picture_number)
2096  pkt->dts = pkt->pts - s->dts_delta;
2097  else
2098  pkt->dts = s->reordered_pts;
2099  s->reordered_pts = pkt->pts;
2100  } else
2101  pkt->dts = pkt->pts;
2102  if (s->current_picture.f->key_frame)
2104  if (s->mb_info)
2106  } else {
2107  s->frame_bits = 0;
2108  }
2109 
2110  /* release non-reference frames */
2111  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2112  if (!s->picture[i].reference)
2113  ff_mpeg_unref_picture(avctx, &s->picture[i]);
2114  }
2115 
2116  av_assert1((s->frame_bits & 7) == 0);
2117 
2118  pkt->size = s->frame_bits / 8;
2119  *got_packet = !!pkt->size;
2120  return 0;
2121 }
2122 
2124  int n, int threshold)
2125 {
2126  static const char tab[64] = {
2127  3, 2, 2, 1, 1, 1, 1, 1,
2128  1, 1, 1, 1, 1, 1, 1, 1,
2129  1, 1, 1, 1, 1, 1, 1, 1,
2130  0, 0, 0, 0, 0, 0, 0, 0,
2131  0, 0, 0, 0, 0, 0, 0, 0,
2132  0, 0, 0, 0, 0, 0, 0, 0,
2133  0, 0, 0, 0, 0, 0, 0, 0,
2134  0, 0, 0, 0, 0, 0, 0, 0
2135  };
2136  int score = 0;
2137  int run = 0;
2138  int i;
2139  int16_t *block = s->block[n];
2140  const int last_index = s->block_last_index[n];
2141  int skip_dc;
2142 
2143  if (threshold < 0) {
2144  skip_dc = 0;
2145  threshold = -threshold;
2146  } else
2147  skip_dc = 1;
2148 
2149  /* Are all we could set to zero already zero? */
2150  if (last_index <= skip_dc - 1)
2151  return;
2152 
2153  for (i = 0; i <= last_index; i++) {
2154  const int j = s->intra_scantable.permutated[i];
2155  const int level = FFABS(block[j]);
2156  if (level == 1) {
2157  if (skip_dc && i == 0)
2158  continue;
2159  score += tab[run];
2160  run = 0;
2161  } else if (level > 1) {
2162  return;
2163  } else {
2164  run++;
2165  }
2166  }
2167  if (score >= threshold)
2168  return;
2169  for (i = skip_dc; i <= last_index; i++) {
2170  const int j = s->intra_scantable.permutated[i];
2171  block[j] = 0;
2172  }
2173  if (block[0])
2174  s->block_last_index[n] = 0;
2175  else
2176  s->block_last_index[n] = -1;
2177 }
2178 
2179 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2180  int last_index)
2181 {
2182  int i;
2183  const int maxlevel = s->max_qcoeff;
2184  const int minlevel = s->min_qcoeff;
2185  int overflow = 0;
2186 
2187  if (s->mb_intra) {
2188  i = 1; // skip clipping of intra dc
2189  } else
2190  i = 0;
2191 
2192  for (; i <= last_index; i++) {
2193  const int j = s->intra_scantable.permutated[i];
2194  int level = block[j];
2195 
2196  if (level > maxlevel) {
2197  level = maxlevel;
2198  overflow++;
2199  } else if (level < minlevel) {
2200  level = minlevel;
2201  overflow++;
2202  }
2203 
2204  block[j] = level;
2205  }
2206 
2207  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2208  av_log(s->avctx, AV_LOG_INFO,
2209  "warning, clipping %d dct coefficients to %d..%d\n",
2210  overflow, minlevel, maxlevel);
2211 }
2212 
2213 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2214 {
2215  int x, y;
2216  // FIXME optimize
2217  for (y = 0; y < 8; y++) {
2218  for (x = 0; x < 8; x++) {
2219  int x2, y2;
2220  int sum = 0;
2221  int sqr = 0;
2222  int count = 0;
2223 
2224  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2225  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2226  int v = ptr[x2 + y2 * stride];
2227  sum += v;
2228  sqr += v * v;
2229  count++;
2230  }
2231  }
2232  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2233  }
2234  }
2235 }
2236 
2238  int motion_x, int motion_y,
2239  int mb_block_height,
2240  int mb_block_width,
2241  int mb_block_count)
2242 {
2243  int16_t weight[12][64];
2244  int16_t orig[12][64];
2245  const int mb_x = s->mb_x;
2246  const int mb_y = s->mb_y;
2247  int i;
2248  int skip_dct[12];
2249  int dct_offset = s->linesize * 8; // default for progressive frames
2250  int uv_dct_offset = s->uvlinesize * 8;
2251  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2252  ptrdiff_t wrap_y, wrap_c;
2253 
2254  for (i = 0; i < mb_block_count; i++)
2255  skip_dct[i] = s->skipdct;
2256 
2257  if (s->adaptive_quant) {
2258  const int last_qp = s->qscale;
2259  const int mb_xy = mb_x + mb_y * s->mb_stride;
2260 
2261  s->lambda = s->lambda_table[mb_xy];
2262  update_qscale(s);
2263 
2264  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2265  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2266  s->dquant = s->qscale - last_qp;
2267 
2268  if (s->out_format == FMT_H263) {
2269  s->dquant = av_clip(s->dquant, -2, 2);
2270 
2271  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2272  if (!s->mb_intra) {
2273  if (s->pict_type == AV_PICTURE_TYPE_B) {
2274  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2275  s->dquant = 0;
2276  }
2277  if (s->mv_type == MV_TYPE_8X8)
2278  s->dquant = 0;
2279  }
2280  }
2281  }
2282  }
2283  ff_set_qscale(s, last_qp + s->dquant);
2284  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2285  ff_set_qscale(s, s->qscale + s->dquant);
2286 
2287  wrap_y = s->linesize;
2288  wrap_c = s->uvlinesize;
2289  ptr_y = s->new_picture.f->data[0] +
2290  (mb_y * 16 * wrap_y) + mb_x * 16;
2291  ptr_cb = s->new_picture.f->data[1] +
2292  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2293  ptr_cr = s->new_picture.f->data[2] +
2294  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2295 
2296  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2297  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2298  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2299  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2300  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2301  wrap_y, wrap_y,
2302  16, 16, mb_x * 16, mb_y * 16,
2303  s->width, s->height);
2304  ptr_y = ebuf;
2305  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2306  wrap_c, wrap_c,
2307  mb_block_width, mb_block_height,
2308  mb_x * mb_block_width, mb_y * mb_block_height,
2309  cw, ch);
2310  ptr_cb = ebuf + 16 * wrap_y;
2311  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2312  wrap_c, wrap_c,
2313  mb_block_width, mb_block_height,
2314  mb_x * mb_block_width, mb_y * mb_block_height,
2315  cw, ch);
2316  ptr_cr = ebuf + 16 * wrap_y + 16;
2317  }
2318 
2319  if (s->mb_intra) {
2320  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2321  int progressive_score, interlaced_score;
2322 
2323  s->interlaced_dct = 0;
2324  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2325  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2326  NULL, wrap_y, 8) - 400;
2327 
2328  if (progressive_score > 0) {
2329  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2330  NULL, wrap_y * 2, 8) +
2331  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2332  NULL, wrap_y * 2, 8);
2333  if (progressive_score > interlaced_score) {
2334  s->interlaced_dct = 1;
2335 
2336  dct_offset = wrap_y;
2337  uv_dct_offset = wrap_c;
2338  wrap_y <<= 1;
2339  if (s->chroma_format == CHROMA_422 ||
2340  s->chroma_format == CHROMA_444)
2341  wrap_c <<= 1;
2342  }
2343  }
2344  }
2345 
2346  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2347  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2348  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2349  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2350 
2351  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2352  skip_dct[4] = 1;
2353  skip_dct[5] = 1;
2354  } else {
2355  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2356  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2357  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2358  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2359  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2360  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2361  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2362  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2363  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2364  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2365  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2366  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2367  }
2368  }
2369  } else {
2370  op_pixels_func (*op_pix)[4];
2371  qpel_mc_func (*op_qpix)[16];
2372  uint8_t *dest_y, *dest_cb, *dest_cr;
2373 
2374  dest_y = s->dest[0];
2375  dest_cb = s->dest[1];
2376  dest_cr = s->dest[2];
2377 
2378  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2379  op_pix = s->hdsp.put_pixels_tab;
2380  op_qpix = s->qdsp.put_qpel_pixels_tab;
2381  } else {
2382  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2383  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2384  }
2385 
2386  if (s->mv_dir & MV_DIR_FORWARD) {
2387  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2388  s->last_picture.f->data,
2389  op_pix, op_qpix);
2390  op_pix = s->hdsp.avg_pixels_tab;
2391  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2392  }
2393  if (s->mv_dir & MV_DIR_BACKWARD) {
2394  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2395  s->next_picture.f->data,
2396  op_pix, op_qpix);
2397  }
2398 
2399  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2400  int progressive_score, interlaced_score;
2401 
2402  s->interlaced_dct = 0;
2403  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2404  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2405  ptr_y + wrap_y * 8,
2406  wrap_y, 8) - 400;
2407 
2408  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2409  progressive_score -= 400;
2410 
2411  if (progressive_score > 0) {
2412  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2413  wrap_y * 2, 8) +
2414  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2415  ptr_y + wrap_y,
2416  wrap_y * 2, 8);
2417 
2418  if (progressive_score > interlaced_score) {
2419  s->interlaced_dct = 1;
2420 
2421  dct_offset = wrap_y;
2422  uv_dct_offset = wrap_c;
2423  wrap_y <<= 1;
2424  if (s->chroma_format == CHROMA_422)
2425  wrap_c <<= 1;
2426  }
2427  }
2428  }
2429 
2430  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2431  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2432  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2433  dest_y + dct_offset, wrap_y);
2434  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2435  dest_y + dct_offset + 8, wrap_y);
2436 
2437  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2438  skip_dct[4] = 1;
2439  skip_dct[5] = 1;
2440  } else {
2441  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2442  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2443  if (!s->chroma_y_shift) { /* 422 */
2444  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2445  dest_cb + uv_dct_offset, wrap_c);
2446  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2447  dest_cr + uv_dct_offset, wrap_c);
2448  }
2449  }
2450  /* pre quantization */
2451  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2452  2 * s->qscale * s->qscale) {
2453  // FIXME optimize
2454  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2455  skip_dct[0] = 1;
2456  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2457  skip_dct[1] = 1;
2458  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2459  wrap_y, 8) < 20 * s->qscale)
2460  skip_dct[2] = 1;
2461  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2462  wrap_y, 8) < 20 * s->qscale)
2463  skip_dct[3] = 1;
2464  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2465  skip_dct[4] = 1;
2466  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2467  skip_dct[5] = 1;
2468  if (!s->chroma_y_shift) { /* 422 */
2469  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2470  dest_cb + uv_dct_offset,
2471  wrap_c, 8) < 20 * s->qscale)
2472  skip_dct[6] = 1;
2473  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2474  dest_cr + uv_dct_offset,
2475  wrap_c, 8) < 20 * s->qscale)
2476  skip_dct[7] = 1;
2477  }
2478  }
2479  }
2480 
2481  if (s->quantizer_noise_shaping) {
2482  if (!skip_dct[0])
2483  get_visual_weight(weight[0], ptr_y , wrap_y);
2484  if (!skip_dct[1])
2485  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2486  if (!skip_dct[2])
2487  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2488  if (!skip_dct[3])
2489  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2490  if (!skip_dct[4])
2491  get_visual_weight(weight[4], ptr_cb , wrap_c);
2492  if (!skip_dct[5])
2493  get_visual_weight(weight[5], ptr_cr , wrap_c);
2494  if (!s->chroma_y_shift) { /* 422 */
2495  if (!skip_dct[6])
2496  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2497  wrap_c);
2498  if (!skip_dct[7])
2499  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2500  wrap_c);
2501  }
2502  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2503  }
2504 
2505  /* DCT & quantize */
2506  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2507  {
2508  for (i = 0; i < mb_block_count; i++) {
2509  if (!skip_dct[i]) {
2510  int overflow;
2511  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2512  // FIXME we could decide to change to quantizer instead of
2513  // clipping
2514  // JS: I don't think that would be a good idea it could lower
2515  // quality instead of improve it. Just INTRADC clipping
2516  // deserves changes in quantizer
2517  if (overflow)
2518  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2519  } else
2520  s->block_last_index[i] = -1;
2521  }
2522  if (s->quantizer_noise_shaping) {
2523  for (i = 0; i < mb_block_count; i++) {
2524  if (!skip_dct[i]) {
2525  s->block_last_index[i] =
2526  dct_quantize_refine(s, s->block[i], weight[i],
2527  orig[i], i, s->qscale);
2528  }
2529  }
2530  }
2531 
2532  if (s->luma_elim_threshold && !s->mb_intra)
2533  for (i = 0; i < 4; i++)
2534  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2535  if (s->chroma_elim_threshold && !s->mb_intra)
2536  for (i = 4; i < mb_block_count; i++)
2537  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2538 
2539  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2540  for (i = 0; i < mb_block_count; i++) {
2541  if (s->block_last_index[i] == -1)
2542  s->coded_score[i] = INT_MAX / 256;
2543  }
2544  }
2545  }
2546 
2547  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2548  s->block_last_index[4] =
2549  s->block_last_index[5] = 0;
2550  s->block[4][0] =
2551  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2552  if (!s->chroma_y_shift) { /* 422 / 444 */
2553  for (i=6; i<12; i++) {
2554  s->block_last_index[i] = 0;
2555  s->block[i][0] = s->block[4][0];
2556  }
2557  }
2558  }
2559 
2560  // non c quantize code returns incorrect block_last_index FIXME
2561  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2562  for (i = 0; i < mb_block_count; i++) {
2563  int j;
2564  if (s->block_last_index[i] > 0) {
2565  for (j = 63; j > 0; j--) {
2566  if (s->block[i][s->intra_scantable.permutated[j]])
2567  break;
2568  }
2569  s->block_last_index[i] = j;
2570  }
2571  }
2572  }
2573 
2574  /* huffman encode */
2575  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2579  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2580  break;
2581  case AV_CODEC_ID_MPEG4:
2583  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2584  break;
2585  case AV_CODEC_ID_MSMPEG4V2:
2586  case AV_CODEC_ID_MSMPEG4V3:
2587  case AV_CODEC_ID_WMV1:
2589  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2590  break;
2591  case AV_CODEC_ID_WMV2:
2592  if (CONFIG_WMV2_ENCODER)
2593  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2594  break;
2595  case AV_CODEC_ID_H261:
2596  if (CONFIG_H261_ENCODER)
2597  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2598  break;
2599  case AV_CODEC_ID_H263:
2600  case AV_CODEC_ID_H263P:
2601  case AV_CODEC_ID_FLV1:
2602  case AV_CODEC_ID_RV10:
2603  case AV_CODEC_ID_RV20:
2604  if (CONFIG_H263_ENCODER)
2605  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2606  break;
2607  case AV_CODEC_ID_MJPEG:
2608  case AV_CODEC_ID_AMV:
2610  ff_mjpeg_encode_mb(s, s->block);
2611  break;
2612  case AV_CODEC_ID_SPEEDHQ:
2614  ff_speedhq_encode_mb(s, s->block);
2615  break;
2616  default:
2617  av_assert1(0);
2618  }
2619 }
2620 
2621 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2622 {
2623  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2624  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2625  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2626 }
2627 
2629  int i;
2630 
2631  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2632 
2633  /* MPEG-1 */
2634  d->mb_skip_run= s->mb_skip_run;
2635  for(i=0; i<3; i++)
2636  d->last_dc[i] = s->last_dc[i];
2637 
2638  /* statistics */
2639  d->mv_bits= s->mv_bits;
2640  d->i_tex_bits= s->i_tex_bits;
2641  d->p_tex_bits= s->p_tex_bits;
2642  d->i_count= s->i_count;
2643  d->f_count= s->f_count;
2644  d->b_count= s->b_count;
2645  d->skip_count= s->skip_count;
2646  d->misc_bits= s->misc_bits;
2647  d->last_bits= 0;
2648 
2649  d->mb_skipped= 0;
2650  d->qscale= s->qscale;
2651  d->dquant= s->dquant;
2652 
2653  d->esc3_level_length= s->esc3_level_length;
2654 }
2655 
2657  int i;
2658 
2659  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2660  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2661 
2662  /* MPEG-1 */
2663  d->mb_skip_run= s->mb_skip_run;
2664  for(i=0; i<3; i++)
2665  d->last_dc[i] = s->last_dc[i];
2666 
2667  /* statistics */
2668  d->mv_bits= s->mv_bits;
2669  d->i_tex_bits= s->i_tex_bits;
2670  d->p_tex_bits= s->p_tex_bits;
2671  d->i_count= s->i_count;
2672  d->f_count= s->f_count;
2673  d->b_count= s->b_count;
2674  d->skip_count= s->skip_count;
2675  d->misc_bits= s->misc_bits;
2676 
2677  d->mb_intra= s->mb_intra;
2678  d->mb_skipped= s->mb_skipped;
2679  d->mv_type= s->mv_type;
2680  d->mv_dir= s->mv_dir;
2681  d->pb= s->pb;
2682  if(s->data_partitioning){
2683  d->pb2= s->pb2;
2684  d->tex_pb= s->tex_pb;
2685  }
2686  d->block= s->block;
2687  for(i=0; i<8; i++)
2688  d->block_last_index[i]= s->block_last_index[i];
2689  d->interlaced_dct= s->interlaced_dct;
2690  d->qscale= s->qscale;
2691 
2692  d->esc3_level_length= s->esc3_level_length;
2693 }
2694 
2695 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2696  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2697  int *dmin, int *next_block, int motion_x, int motion_y)
2698 {
2699  int score;
2700  uint8_t *dest_backup[3];
2701 
2702  copy_context_before_encode(s, backup, type);
2703 
2704  s->block= s->blocks[*next_block];
2705  s->pb= pb[*next_block];
2706  if(s->data_partitioning){
2707  s->pb2 = pb2 [*next_block];
2708  s->tex_pb= tex_pb[*next_block];
2709  }
2710 
2711  if(*next_block){
2712  memcpy(dest_backup, s->dest, sizeof(s->dest));
2713  s->dest[0] = s->sc.rd_scratchpad;
2714  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2715  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2716  av_assert0(s->linesize >= 32); //FIXME
2717  }
2718 
2719  encode_mb(s, motion_x, motion_y);
2720 
2721  score= put_bits_count(&s->pb);
2722  if(s->data_partitioning){
2723  score+= put_bits_count(&s->pb2);
2724  score+= put_bits_count(&s->tex_pb);
2725  }
2726 
2727  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2728  ff_mpv_reconstruct_mb(s, s->block);
2729 
2730  score *= s->lambda2;
2731  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2732  }
2733 
2734  if(*next_block){
2735  memcpy(s->dest, dest_backup, sizeof(s->dest));
2736  }
2737 
2738  if(score<*dmin){
2739  *dmin= score;
2740  *next_block^=1;
2741 
2743  }
2744 }
2745 
2746 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2747  const uint32_t *sq = ff_square_tab + 256;
2748  int acc=0;
2749  int x,y;
2750 
2751  if(w==16 && h==16)
2752  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2753  else if(w==8 && h==8)
2754  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2755 
2756  for(y=0; y<h; y++){
2757  for(x=0; x<w; x++){
2758  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2759  }
2760  }
2761 
2762  av_assert2(acc>=0);
2763 
2764  return acc;
2765 }
2766 
2767 static int sse_mb(MpegEncContext *s){
2768  int w= 16;
2769  int h= 16;
2770 
2771  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2772  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2773 
2774  if(w==16 && h==16)
2775  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2776  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2777  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2778  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2779  }else{
2780  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2781  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2782  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2783  }
2784  else
2785  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2786  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2787  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2788 }
2789 
2791  MpegEncContext *s= *(void**)arg;
2792 
2793 
2794  s->me.pre_pass=1;
2795  s->me.dia_size= s->avctx->pre_dia_size;
2796  s->first_slice_line=1;
2797  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2798  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2799  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2800  }
2801  s->first_slice_line=0;
2802  }
2803 
2804  s->me.pre_pass=0;
2805 
2806  return 0;
2807 }
2808 
2810  MpegEncContext *s= *(void**)arg;
2811 
2812  s->me.dia_size= s->avctx->dia_size;
2813  s->first_slice_line=1;
2814  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2815  s->mb_x=0; //for block init below
2817  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2818  s->block_index[0]+=2;
2819  s->block_index[1]+=2;
2820  s->block_index[2]+=2;
2821  s->block_index[3]+=2;
2822 
2823  /* compute motion vector & mb_type and store in context */
2824  if(s->pict_type==AV_PICTURE_TYPE_B)
2825  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2826  else
2827  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2828  }
2829  s->first_slice_line=0;
2830  }
2831  return 0;
2832 }
2833 
2834 static int mb_var_thread(AVCodecContext *c, void *arg){
2835  MpegEncContext *s= *(void**)arg;
2836  int mb_x, mb_y;
2837 
2838  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2839  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2840  int xx = mb_x * 16;
2841  int yy = mb_y * 16;
2842  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2843  int varc;
2844  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2845 
2846  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2847  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2848 
2849  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2850  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2851  s->me.mb_var_sum_temp += varc;
2852  }
2853  }
2854  return 0;
2855 }
2856 
2858  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2859  if(s->partitioned_frame){
2861  }
2862 
2863  ff_mpeg4_stuffing(&s->pb);
2864  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2866  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2868  }
2869 
2870  flush_put_bits(&s->pb);
2871 
2872  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2873  s->misc_bits+= get_bits_diff(s);
2874 }
2875 
2877 {
2878  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2879  int offset = put_bits_count(&s->pb);
2880  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2881  int gobn = s->mb_y / s->gob_index;
2882  int pred_x, pred_y;
2883  if (CONFIG_H263_ENCODER)
2884  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2885  bytestream_put_le32(&ptr, offset);
2886  bytestream_put_byte(&ptr, s->qscale);
2887  bytestream_put_byte(&ptr, gobn);
2888  bytestream_put_le16(&ptr, mba);
2889  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2890  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2891  /* 4MV not implemented */
2892  bytestream_put_byte(&ptr, 0); /* hmv2 */
2893  bytestream_put_byte(&ptr, 0); /* vmv2 */
2894 }
2895 
2896 static void update_mb_info(MpegEncContext *s, int startcode)
2897 {
2898  if (!s->mb_info)
2899  return;
2900  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2901  s->mb_info_size += 12;
2902  s->prev_mb_info = s->last_mb_info;
2903  }
2904  if (startcode) {
2905  s->prev_mb_info = put_bits_count(&s->pb)/8;
2906  /* This might have incremented mb_info_size above, and we return without
2907  * actually writing any info into that slot yet. But in that case,
2908  * this will be called again at the start of the after writing the
2909  * start code, actually writing the mb info. */
2910  return;
2911  }
2912 
2913  s->last_mb_info = put_bits_count(&s->pb)/8;
2914  if (!s->mb_info_size)
2915  s->mb_info_size += 12;
2916  write_mb_info(s);
2917 }
2918 
2919 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2920 {
2921  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2922  && s->slice_context_count == 1
2923  && s->pb.buf == s->avctx->internal->byte_buffer) {
2924  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2925  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2926 
2927  uint8_t *new_buffer = NULL;
2928  int new_buffer_size = 0;
2929 
2930  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2931  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2932  return AVERROR(ENOMEM);
2933  }
2934 
2935  emms_c();
2936 
2937  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2938  s->avctx->internal->byte_buffer_size + size_increase);
2939  if (!new_buffer)
2940  return AVERROR(ENOMEM);
2941 
2942  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2943  av_free(s->avctx->internal->byte_buffer);
2944  s->avctx->internal->byte_buffer = new_buffer;
2945  s->avctx->internal->byte_buffer_size = new_buffer_size;
2946  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2947  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2948  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2949  }
2950  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2951  return AVERROR(EINVAL);
2952  return 0;
2953 }
2954 
2955 static int encode_thread(AVCodecContext *c, void *arg){
2956  MpegEncContext *s= *(void**)arg;
2957  int mb_x, mb_y, mb_y_order;
2958  int chr_h= 16>>s->chroma_y_shift;
2959  int i, j;
2960  MpegEncContext best_s = { 0 }, backup_s;
2961  uint8_t bit_buf[2][MAX_MB_BYTES];
2962  uint8_t bit_buf2[2][MAX_MB_BYTES];
2963  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2964  PutBitContext pb[2], pb2[2], tex_pb[2];
2965 
2966  for(i=0; i<2; i++){
2967  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2968  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2969  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2970  }
2971 
2972  s->last_bits= put_bits_count(&s->pb);
2973  s->mv_bits=0;
2974  s->misc_bits=0;
2975  s->i_tex_bits=0;
2976  s->p_tex_bits=0;
2977  s->i_count=0;
2978  s->f_count=0;
2979  s->b_count=0;
2980  s->skip_count=0;
2981 
2982  for(i=0; i<3; i++){
2983  /* init last dc values */
2984  /* note: quant matrix value (8) is implied here */
2985  s->last_dc[i] = 128 << s->intra_dc_precision;
2986 
2987  s->current_picture.encoding_error[i] = 0;
2988  }
2989  if(s->codec_id==AV_CODEC_ID_AMV){
2990  s->last_dc[0] = 128*8/13;
2991  s->last_dc[1] = 128*8/14;
2992  s->last_dc[2] = 128*8/14;
2993  }
2994  s->mb_skip_run = 0;
2995  memset(s->last_mv, 0, sizeof(s->last_mv));
2996 
2997  s->last_mv_dir = 0;
2998 
2999  switch(s->codec_id){
3000  case AV_CODEC_ID_H263:
3001  case AV_CODEC_ID_H263P:
3002  case AV_CODEC_ID_FLV1:
3003  if (CONFIG_H263_ENCODER)
3004  s->gob_index = H263_GOB_HEIGHT(s->height);
3005  break;
3006  case AV_CODEC_ID_MPEG4:
3007  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3009  break;
3010  }
3011 
3012  s->resync_mb_x=0;
3013  s->resync_mb_y=0;
3014  s->first_slice_line = 1;
3015  s->ptr_lastgob = s->pb.buf;
3016  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
3017  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
3018  int first_in_slice;
3019  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
3020  if (first_in_slice && mb_y_order != s->start_mb_y)
3022  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3023  } else {
3024  mb_y = mb_y_order;
3025  }
3026  s->mb_x=0;
3027  s->mb_y= mb_y;
3028 
3029  ff_set_qscale(s, s->qscale);
3031 
3032  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3033  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3034  int mb_type= s->mb_type[xy];
3035 // int d;
3036  int dmin= INT_MAX;
3037  int dir;
3038  int size_increase = s->avctx->internal->byte_buffer_size/4
3039  + s->mb_width*MAX_MB_BYTES;
3040 
3042  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3043  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3044  return -1;
3045  }
3046  if(s->data_partitioning){
3047  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3048  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3049  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3050  return -1;
3051  }
3052  }
3053 
3054  s->mb_x = mb_x;
3055  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3057 
3058  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3060  xy= s->mb_y*s->mb_stride + s->mb_x;
3061  mb_type= s->mb_type[xy];
3062  }
3063 
3064  /* write gob / video packet header */
3065  if(s->rtp_mode){
3066  int current_packet_size, is_gob_start;
3067 
3068  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3069 
3070  is_gob_start = s->rtp_payload_size &&
3071  current_packet_size >= s->rtp_payload_size &&
3072  mb_y + mb_x > 0;
3073 
3074  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3075 
3076  switch(s->codec_id){
3077  case AV_CODEC_ID_H263:
3078  case AV_CODEC_ID_H263P:
3079  if(!s->h263_slice_structured)
3080  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3081  break;
3083  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3085  if(s->mb_skip_run) is_gob_start=0;
3086  break;
3087  case AV_CODEC_ID_MJPEG:
3088  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3089  break;
3090  }
3091 
3092  if(is_gob_start){
3093  if(s->start_mb_y != mb_y || mb_x!=0){
3094  write_slice_end(s);
3095 
3096  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3098  }
3099  }
3100 
3101  av_assert2((put_bits_count(&s->pb)&7) == 0);
3102  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3103 
3104  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3105  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3106  int d = 100 / s->error_rate;
3107  if(r % d == 0){
3108  current_packet_size=0;
3109  s->pb.buf_ptr= s->ptr_lastgob;
3110  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3111  }
3112  }
3113 
3114 #if FF_API_RTP_CALLBACK
3116  if (s->avctx->rtp_callback){
3117  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3118  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3119  }
3121 #endif
3122  update_mb_info(s, 1);
3123 
3124  switch(s->codec_id){
3125  case AV_CODEC_ID_MPEG4:
3126  if (CONFIG_MPEG4_ENCODER) {
3129  }
3130  break;
3136  }
3137  break;
3138  case AV_CODEC_ID_H263:
3139  case AV_CODEC_ID_H263P:
3140  if (CONFIG_H263_ENCODER)
3142  break;
3143  }
3144 
3145  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3146  int bits= put_bits_count(&s->pb);
3147  s->misc_bits+= bits - s->last_bits;
3148  s->last_bits= bits;
3149  }
3150 
3151  s->ptr_lastgob += current_packet_size;
3152  s->first_slice_line=1;
3153  s->resync_mb_x=mb_x;
3154  s->resync_mb_y=mb_y;
3155  }
3156  }
3157 
3158  if( (s->resync_mb_x == s->mb_x)
3159  && s->resync_mb_y+1 == s->mb_y){
3160  s->first_slice_line=0;
3161  }
3162 
3163  s->mb_skipped=0;
3164  s->dquant=0; //only for QP_RD
3165 
3166  update_mb_info(s, 0);
3167 
3168  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3169  int next_block=0;
3170  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3171 
3172  copy_context_before_encode(&backup_s, s, -1);
3173  backup_s.pb= s->pb;
3174  best_s.data_partitioning= s->data_partitioning;
3175  best_s.partitioned_frame= s->partitioned_frame;
3176  if(s->data_partitioning){
3177  backup_s.pb2= s->pb2;
3178  backup_s.tex_pb= s->tex_pb;
3179  }
3180 
3181  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3182  s->mv_dir = MV_DIR_FORWARD;
3183  s->mv_type = MV_TYPE_16X16;
3184  s->mb_intra= 0;
3185  s->mv[0][0][0] = s->p_mv_table[xy][0];
3186  s->mv[0][0][1] = s->p_mv_table[xy][1];
3187  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3188  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3189  }
3190  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3191  s->mv_dir = MV_DIR_FORWARD;
3192  s->mv_type = MV_TYPE_FIELD;
3193  s->mb_intra= 0;
3194  for(i=0; i<2; i++){
3195  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3196  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3197  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3198  }
3199  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3200  &dmin, &next_block, 0, 0);
3201  }
3202  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3203  s->mv_dir = MV_DIR_FORWARD;
3204  s->mv_type = MV_TYPE_16X16;
3205  s->mb_intra= 0;
3206  s->mv[0][0][0] = 0;
3207  s->mv[0][0][1] = 0;
3208  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3209  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3210  }
3211  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3212  s->mv_dir = MV_DIR_FORWARD;
3213  s->mv_type = MV_TYPE_8X8;
3214  s->mb_intra= 0;
3215  for(i=0; i<4; i++){
3216  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3217  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3218  }
3219  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3220  &dmin, &next_block, 0, 0);
3221  }
3222  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3223  s->mv_dir = MV_DIR_FORWARD;
3224  s->mv_type = MV_TYPE_16X16;
3225  s->mb_intra= 0;
3226  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3227  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3228  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3229  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3230  }
3231  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3232  s->mv_dir = MV_DIR_BACKWARD;
3233  s->mv_type = MV_TYPE_16X16;
3234  s->mb_intra= 0;
3235  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3236  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3237  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3238  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3239  }
3240  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3241  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3242  s->mv_type = MV_TYPE_16X16;
3243  s->mb_intra= 0;
3244  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3245  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3246  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3247  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3248  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3249  &dmin, &next_block, 0, 0);
3250  }
3251  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3252  s->mv_dir = MV_DIR_FORWARD;
3253  s->mv_type = MV_TYPE_FIELD;
3254  s->mb_intra= 0;
3255  for(i=0; i<2; i++){
3256  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3257  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3258  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3259  }
3260  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3261  &dmin, &next_block, 0, 0);
3262  }
3263  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3264  s->mv_dir = MV_DIR_BACKWARD;
3265  s->mv_type = MV_TYPE_FIELD;
3266  s->mb_intra= 0;
3267  for(i=0; i<2; i++){
3268  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3269  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3270  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3271  }
3272  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3273  &dmin, &next_block, 0, 0);
3274  }
3275  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3276  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3277  s->mv_type = MV_TYPE_FIELD;
3278  s->mb_intra= 0;
3279  for(dir=0; dir<2; dir++){
3280  for(i=0; i<2; i++){
3281  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3282  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3283  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3284  }
3285  }
3286  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3287  &dmin, &next_block, 0, 0);
3288  }
3289  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3290  s->mv_dir = 0;
3291  s->mv_type = MV_TYPE_16X16;
3292  s->mb_intra= 1;
3293  s->mv[0][0][0] = 0;
3294  s->mv[0][0][1] = 0;
3295  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3296  &dmin, &next_block, 0, 0);
3297  if(s->h263_pred || s->h263_aic){
3298  if(best_s.mb_intra)
3299  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3300  else
3301  ff_clean_intra_table_entries(s); //old mode?
3302  }
3303  }
3304 
3305  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3306  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3307  const int last_qp= backup_s.qscale;
3308  int qpi, qp, dc[6];
3309  int16_t ac[6][16];
3310  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3311  static const int dquant_tab[4]={-1,1,-2,2};
3312  int storecoefs = s->mb_intra && s->dc_val[0];
3313 
3314  av_assert2(backup_s.dquant == 0);
3315 
3316  //FIXME intra
3317  s->mv_dir= best_s.mv_dir;
3318  s->mv_type = MV_TYPE_16X16;
3319  s->mb_intra= best_s.mb_intra;
3320  s->mv[0][0][0] = best_s.mv[0][0][0];
3321  s->mv[0][0][1] = best_s.mv[0][0][1];
3322  s->mv[1][0][0] = best_s.mv[1][0][0];
3323  s->mv[1][0][1] = best_s.mv[1][0][1];
3324 
3325  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3326  for(; qpi<4; qpi++){
3327  int dquant= dquant_tab[qpi];
3328  qp= last_qp + dquant;
3329  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3330  continue;
3331  backup_s.dquant= dquant;
3332  if(storecoefs){
3333  for(i=0; i<6; i++){
3334  dc[i]= s->dc_val[0][ s->block_index[i] ];
3335  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3336  }
3337  }
3338 
3339  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3340  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3341  if(best_s.qscale != qp){
3342  if(storecoefs){
3343  for(i=0; i<6; i++){
3344  s->dc_val[0][ s->block_index[i] ]= dc[i];
3345  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3346  }
3347  }
3348  }
3349  }
3350  }
3351  }
3353  int mx= s->b_direct_mv_table[xy][0];
3354  int my= s->b_direct_mv_table[xy][1];
3355 
3356  backup_s.dquant = 0;
3357  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3358  s->mb_intra= 0;
3359  ff_mpeg4_set_direct_mv(s, mx, my);
3360  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3361  &dmin, &next_block, mx, my);
3362  }
3364  backup_s.dquant = 0;
3365  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3366  s->mb_intra= 0;
3367  ff_mpeg4_set_direct_mv(s, 0, 0);
3368  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3369  &dmin, &next_block, 0, 0);
3370  }
3371  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3372  int coded=0;
3373  for(i=0; i<6; i++)
3374  coded |= s->block_last_index[i];
3375  if(coded){
3376  int mx,my;
3377  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3378  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3379  mx=my=0; //FIXME find the one we actually used
3380  ff_mpeg4_set_direct_mv(s, mx, my);
3381  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3382  mx= s->mv[1][0][0];
3383  my= s->mv[1][0][1];
3384  }else{
3385  mx= s->mv[0][0][0];
3386  my= s->mv[0][0][1];
3387  }
3388 
3389  s->mv_dir= best_s.mv_dir;
3390  s->mv_type = best_s.mv_type;
3391  s->mb_intra= 0;
3392 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3393  s->mv[0][0][1] = best_s.mv[0][0][1];
3394  s->mv[1][0][0] = best_s.mv[1][0][0];
3395  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3396  backup_s.dquant= 0;
3397  s->skipdct=1;
3398  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3399  &dmin, &next_block, mx, my);
3400  s->skipdct=0;
3401  }
3402  }
3403 
3404  s->current_picture.qscale_table[xy] = best_s.qscale;
3405 
3406  copy_context_after_encode(s, &best_s, -1);
3407 
3408  pb_bits_count= put_bits_count(&s->pb);
3409  flush_put_bits(&s->pb);
3410  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3411  s->pb= backup_s.pb;
3412 
3413  if(s->data_partitioning){
3414  pb2_bits_count= put_bits_count(&s->pb2);
3415  flush_put_bits(&s->pb2);
3416  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3417  s->pb2= backup_s.pb2;
3418 
3419  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3420  flush_put_bits(&s->tex_pb);
3421  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3422  s->tex_pb= backup_s.tex_pb;
3423  }
3424  s->last_bits= put_bits_count(&s->pb);
3425 
3426  if (CONFIG_H263_ENCODER &&
3427  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3429 
3430  if(next_block==0){ //FIXME 16 vs linesize16
3431  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3432  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3433  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3434  }
3435 
3436  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3437  ff_mpv_reconstruct_mb(s, s->block);
3438  } else {
3439  int motion_x = 0, motion_y = 0;
3440  s->mv_type=MV_TYPE_16X16;
3441  // only one MB-Type possible
3442 
3443  switch(mb_type){
3445  s->mv_dir = 0;
3446  s->mb_intra= 1;
3447  motion_x= s->mv[0][0][0] = 0;
3448  motion_y= s->mv[0][0][1] = 0;
3449  break;
3451  s->mv_dir = MV_DIR_FORWARD;
3452  s->mb_intra= 0;
3453  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3454  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3455  break;
3457  s->mv_dir = MV_DIR_FORWARD;
3458  s->mv_type = MV_TYPE_FIELD;
3459  s->mb_intra= 0;
3460  for(i=0; i<2; i++){
3461  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3462  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3463  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3464  }
3465  break;
3467  s->mv_dir = MV_DIR_FORWARD;
3468  s->mv_type = MV_TYPE_8X8;
3469  s->mb_intra= 0;
3470  for(i=0; i<4; i++){
3471  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3472  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3473  }
3474  break;
3476  if (CONFIG_MPEG4_ENCODER) {
3478  s->mb_intra= 0;
3479  motion_x=s->b_direct_mv_table[xy][0];
3480  motion_y=s->b_direct_mv_table[xy][1];
3481  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3482  }
3483  break;
3485  if (CONFIG_MPEG4_ENCODER) {
3487  s->mb_intra= 0;
3488  ff_mpeg4_set_direct_mv(s, 0, 0);
3489  }
3490  break;
3492  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3493  s->mb_intra= 0;
3494  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3495  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3496  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3497  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3498  break;
3500  s->mv_dir = MV_DIR_BACKWARD;
3501  s->mb_intra= 0;
3502  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3503  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3504  break;
3506  s->mv_dir = MV_DIR_FORWARD;
3507  s->mb_intra= 0;
3508  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3509  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3510  break;
3512  s->mv_dir = MV_DIR_FORWARD;
3513  s->mv_type = MV_TYPE_FIELD;
3514  s->mb_intra= 0;
3515  for(i=0; i<2; i++){
3516  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3517  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3518  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3519  }
3520  break;
3522  s->mv_dir = MV_DIR_BACKWARD;
3523  s->mv_type = MV_TYPE_FIELD;
3524  s->mb_intra= 0;
3525  for(i=0; i<2; i++){
3526  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3527  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3528  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3529  }
3530  break;
3532  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3533  s->mv_type = MV_TYPE_FIELD;
3534  s->mb_intra= 0;
3535  for(dir=0; dir<2; dir++){
3536  for(i=0; i<2; i++){
3537  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3538  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3539  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3540  }
3541  }
3542  break;
3543  default:
3544  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3545  }
3546 
3547  encode_mb(s, motion_x, motion_y);
3548 
3549  // RAL: Update last macroblock type
3550  s->last_mv_dir = s->mv_dir;
3551 
3552  if (CONFIG_H263_ENCODER &&
3553  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3555 
3556  ff_mpv_reconstruct_mb(s, s->block);
3557  }
3558 
3559  /* clean the MV table in IPS frames for direct mode in B-frames */
3560  if(s->mb_intra /* && I,P,S_TYPE */){
3561  s->p_mv_table[xy][0]=0;
3562  s->p_mv_table[xy][1]=0;
3563  }
3564 
3565  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3566  int w= 16;
3567  int h= 16;
3568 
3569  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3570  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3571 
3572  s->current_picture.encoding_error[0] += sse(
3573  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3574  s->dest[0], w, h, s->linesize);
3575  s->current_picture.encoding_error[1] += sse(
3576  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3577  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3578  s->current_picture.encoding_error[2] += sse(
3579  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3580  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3581  }
3582  if(s->loop_filter){
3583  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3585  }
3586  ff_dlog(s->avctx, "MB %d %d bits\n",
3587  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3588  }
3589  }
3590 
3591  //not beautiful here but we must write it before flushing so it has to be here
3592  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3594 
3595  write_slice_end(s);
3596 
3597 #if FF_API_RTP_CALLBACK
3599  /* Send the last GOB if RTP */
3600  if (s->avctx->rtp_callback) {
3601  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3602  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3603  /* Call the RTP callback to send the last GOB */
3604  emms_c();
3605  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3606  }
3608 #endif
3609 
3610  return 0;
3611 }
3612 
3613 #define MERGE(field) dst->field += src->field; src->field=0
3615  MERGE(me.scene_change_score);
3616  MERGE(me.mc_mb_var_sum_temp);
3617  MERGE(me.mb_var_sum_temp);
3618 }
3619 
3621  int i;
3622 
3623  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3624  MERGE(dct_count[1]);
3625  MERGE(mv_bits);
3626  MERGE(i_tex_bits);
3627  MERGE(p_tex_bits);
3628  MERGE(i_count);
3629  MERGE(f_count);
3630  MERGE(b_count);
3631  MERGE(skip_count);
3632  MERGE(misc_bits);
3633  MERGE(er.error_count);
3634  MERGE(padding_bug_score);
3635  MERGE(current_picture.encoding_error[0]);
3636  MERGE(current_picture.encoding_error[1]);
3637  MERGE(current_picture.encoding_error[2]);
3638 
3639  if (dst->noise_reduction){
3640  for(i=0; i<64; i++){
3641  MERGE(dct_error_sum[0][i]);
3642  MERGE(dct_error_sum[1][i]);
3643  }
3644  }
3645 
3646  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3647  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3648  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3649  flush_put_bits(&dst->pb);
3650 }
3651 
3652 static int estimate_qp(MpegEncContext *s, int dry_run){
3653  if (s->next_lambda){
3654  s->current_picture_ptr->f->quality =
3655  s->current_picture.f->quality = s->next_lambda;
3656  if(!dry_run) s->next_lambda= 0;
3657  } else if (!s->fixed_qscale) {
3658  int quality = ff_rate_estimate_qscale(s, dry_run);
3659  s->current_picture_ptr->f->quality =
3660  s->current_picture.f->quality = quality;
3661  if (s->current_picture.f->quality < 0)
3662  return -1;
3663  }
3664 
3665  if(s->adaptive_quant){
3666  switch(s->codec_id){
3667  case AV_CODEC_ID_MPEG4:
3670  break;
3671  case AV_CODEC_ID_H263:
3672  case AV_CODEC_ID_H263P:
3673  case AV_CODEC_ID_FLV1:
3674  if (CONFIG_H263_ENCODER)
3676  break;
3677  default:
3679  }
3680 
3681  s->lambda= s->lambda_table[0];
3682  //FIXME broken
3683  }else
3684  s->lambda = s->current_picture.f->quality;
3685  update_qscale(s);
3686  return 0;
3687 }
3688 
3689 /* must be called before writing the header */
3691  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3692  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3693 
3694  if(s->pict_type==AV_PICTURE_TYPE_B){
3695  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3696  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3697  }else{
3698  s->pp_time= s->time - s->last_non_b_time;
3699  s->last_non_b_time= s->time;
3700  av_assert1(s->picture_number==0 || s->pp_time > 0);
3701  }
3702 }
3703 
3704 static int encode_picture(MpegEncContext *s, int picture_number)
3705 {
3706  int i, ret;
3707  int bits;
3708  int context_count = s->slice_context_count;
3709 
3710  s->picture_number = picture_number;
3711 
3712  /* Reset the average MB variance */
3713  s->me.mb_var_sum_temp =
3714  s->me.mc_mb_var_sum_temp = 0;
3715 
3716  /* we need to initialize some time vars before we can encode B-frames */
3717  // RAL: Condition added for MPEG1VIDEO
3718  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3720  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3722 
3723  s->me.scene_change_score=0;
3724 
3725 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3726 
3727  if(s->pict_type==AV_PICTURE_TYPE_I){
3728  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3729  else s->no_rounding=0;
3730  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3731  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3732  s->no_rounding ^= 1;
3733  }
3734 
3735  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3736  if (estimate_qp(s,1) < 0)
3737  return -1;
3739  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3740  if(s->pict_type==AV_PICTURE_TYPE_B)
3741  s->lambda= s->last_lambda_for[s->pict_type];
3742  else
3743  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3744  update_qscale(s);
3745  }
3746 
3747  if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3748  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3749  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3750  s->q_chroma_intra_matrix = s->q_intra_matrix;
3751  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3752  }
3753 
3754  s->mb_intra=0; //for the rate distortion & bit compare functions
3755  for(i=1; i<context_count; i++){
3756  ret = ff_update_duplicate_context(s->thread_context[i], s);
3757  if (ret < 0)
3758  return ret;
3759  }
3760 
3761  if(ff_init_me(s)<0)
3762  return -1;
3763 
3764  /* Estimate motion for every MB */
3765  if(s->pict_type != AV_PICTURE_TYPE_I){
3766  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3767  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3768  if (s->pict_type != AV_PICTURE_TYPE_B) {
3769  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3770  s->me_pre == 2) {
3771  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3772  }
3773  }
3774 
3775  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3776  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3777  /* I-Frame */
3778  for(i=0; i<s->mb_stride*s->mb_height; i++)
3779  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3780 
3781  if(!s->fixed_qscale){
3782  /* finding spatial complexity for I-frame rate control */
3783  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3784  }
3785  }
3786  for(i=1; i<context_count; i++){
3787  merge_context_after_me(s, s->thread_context[i]);
3788  }
3789  s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3790  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3791  emms_c();
3792 
3793  if (s->me.scene_change_score > s->scenechange_threshold &&
3794  s->pict_type == AV_PICTURE_TYPE_P) {
3795  s->pict_type= AV_PICTURE_TYPE_I;
3796  for(i=0; i<s->mb_stride*s->mb_height; i++)
3797  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3798  if(s->msmpeg4_version >= 3)
3799  s->no_rounding=1;
3800  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3801  s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3802  }
3803 
3804  if(!s->umvplus){
3805  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3806  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3807 
3808  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3809  int a,b;
3810  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3811  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3812  s->f_code= FFMAX3(s->f_code, a, b);
3813  }
3814 
3816  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3817  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3818  int j;
3819  for(i=0; i<2; i++){
3820  for(j=0; j<2; j++)
3821  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3822  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3823  }
3824  }
3825  }
3826 
3827  if(s->pict_type==AV_PICTURE_TYPE_B){
3828  int a, b;
3829 
3830  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3831  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3832  s->f_code = FFMAX(a, b);
3833 
3834  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3835  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3836  s->b_code = FFMAX(a, b);
3837 
3838  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3839  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3840  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3841  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3842  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3843  int dir, j;
3844  for(dir=0; dir<2; dir++){
3845  for(i=0; i<2; i++){
3846  for(j=0; j<2; j++){
3849  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3850  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3851  }
3852  }
3853  }
3854  }
3855  }
3856  }
3857 
3858  if (estimate_qp(s, 0) < 0)
3859  return -1;
3860 
3861  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3862  s->pict_type == AV_PICTURE_TYPE_I &&
3863  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3864  s->qscale= 3; //reduce clipping problems
3865 
3866  if (s->out_format == FMT_MJPEG) {
3867  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3868  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3869 
3870  if (s->avctx->intra_matrix) {
3871  chroma_matrix =
3872  luma_matrix = s->avctx->intra_matrix;
3873  }
3874  if (s->avctx->chroma_intra_matrix)
3875  chroma_matrix = s->avctx->chroma_intra_matrix;
3876 
3877  /* for mjpeg, we do include qscale in the matrix */
3878  for(i=1;i<64;i++){
3879  int j = s->idsp.idct_permutation[i];
3880 
3881  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3882  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3883  }
3884  s->y_dc_scale_table=
3885  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3886  s->chroma_intra_matrix[0] =
3887  s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3888  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3889  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3890  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3891  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3892  s->qscale= 8;
3893  }
3894  if(s->codec_id == AV_CODEC_ID_AMV){
3895  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3896  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3897  for(i=1;i<64;i++){
3898  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3899 
3900  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3901  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3902  }
3903  s->y_dc_scale_table= y;
3904  s->c_dc_scale_table= c;
3905  s->intra_matrix[0] = 13;
3906  s->chroma_intra_matrix[0] = 14;
3907  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3908  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3909  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3910  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3911  s->qscale= 8;
3912  }
3913 
3914  if (s->out_format == FMT_SPEEDHQ) {
3915  s->y_dc_scale_table=
3916  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3917  }
3918 
3919  //FIXME var duplication
3920  s->current_picture_ptr->f->key_frame =
3921  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3922  s->current_picture_ptr->f->pict_type =
3923  s->current_picture.f->pict_type = s->pict_type;
3924 
3925  if (s->current_picture.f->key_frame)
3926  s->picture_in_gop_number=0;
3927 
3928  s->mb_x = s->mb_y = 0;
3929  s->last_bits= put_bits_count(&s->pb);
3930  switch(s->out_format) {
3931  case FMT_MJPEG:
3932  if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3933  ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3934  s->pred, s->intra_matrix, s->chroma_intra_matrix);
3935  break;
3936  case FMT_SPEEDHQ:
3939  break;
3940  case FMT_H261:
3941  if (CONFIG_H261_ENCODER)
3942  ff_h261_encode_picture_header(s, picture_number);
3943  break;
3944  case FMT_H263:
3945  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3946  ff_wmv2_encode_picture_header(s, picture_number);
3947  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3948  ff_msmpeg4_encode_picture_header(s, picture_number);
3949  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3950  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3951  if (ret < 0)
3952  return ret;
3953  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3954  ret = ff_rv10_encode_picture_header(s, picture_number);
3955  if (ret < 0)
3956  return ret;
3957  }
3958  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3959  ff_rv20_encode_picture_header(s, picture_number);
3960  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3961  ff_flv_encode_picture_header(s, picture_number);
3962  else if (CONFIG_H263_ENCODER)
3963  ff_h263_encode_picture_header(s, picture_number);
3964  break;
3965  case FMT_MPEG1:
3967  ff_mpeg1_encode_picture_header(s, picture_number);
3968  break;
3969  default:
3970  av_assert0(0);
3971  }
3972  bits= put_bits_count(&s->pb);
3973  s->header_bits= bits - s->last_bits;
3974 
3975  for(i=1; i<context_count; i++){
3976  update_duplicate_context_after_me(s->thread_context[i], s);
3977  }
3978  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3979  for(i=1; i<context_count; i++){
3980  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3981  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3982  merge_context_after_encode(s, s->thread_context[i]);
3983  }
3984  emms_c();
3985  return 0;
3986 }
3987 
3988 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3989  const int intra= s->mb_intra;
3990  int i;
3991 
3992  s->dct_count[intra]++;
3993 
3994  for(i=0; i<64; i++){
3995  int level= block[i];
3996 
3997  if(level){
3998  if(level>0){
3999  s->dct_error_sum[intra][i] += level;
4000  level -= s->dct_offset[intra][i];
4001  if(level<0) level=0;
4002  }else{
4003  s->dct_error_sum[intra][i] -= level;
4004  level += s->dct_offset[intra][i];
4005  if(level>0) level=0;
4006  }
4007  block[i]= level;
4008  }
4009  }
4010 }
4011 
4013  int16_t *block, int n,
4014  int qscale, int *overflow){
4015  const int *qmat;
4016  const uint16_t *matrix;
4017  const uint8_t *scantable;
4018  const uint8_t *perm_scantable;
4019  int max=0;
4020  unsigned int threshold1, threshold2;
4021  int bias=0;
4022  int run_tab[65];
4023  int level_tab[65];
4024  int score_tab[65];
4025  int survivor[65];
4026  int survivor_count;
4027  int last_run=0;
4028  int last_level=0;
4029  int last_score= 0;
4030  int last_i;
4031  int coeff[2][64];
4032  int coeff_count[64];
4033  int qmul, qadd, start_i, last_non_zero, i, dc;
4034  const int esc_length= s->ac_esc_length;
4035  uint8_t * length;
4036  uint8_t * last_length;
4037  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4038  int mpeg2_qscale;
4039 
4040  s->fdsp.fdct(block);
4041 
4042  if(s->dct_error_sum)
4043  s->denoise_dct(s, block);
4044  qmul= qscale*16;
4045  qadd= ((qscale-1)|1)*8;
4046 
4047  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4048  else mpeg2_qscale = qscale << 1;
4049 
4050  if (s->mb_intra) {
4051  int q;
4052  scantable= s->intra_scantable.scantable;
4053  perm_scantable= s->intra_scantable.permutated;
4054  if (!s->h263_aic) {
4055  if (n < 4)
4056  q = s->y_dc_scale;
4057  else
4058  q = s->c_dc_scale;
4059  q = q << 3;
4060  } else{
4061  /* For AIC we skip quant/dequant of INTRADC */
4062  q = 1 << 3;
4063  qadd=0;
4064  }
4065 
4066  /* note: block[0] is assumed to be positive */
4067  block[0] = (block[0] + (q >> 1)) / q;
4068  start_i = 1;
4069  last_non_zero = 0;
4070  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4071  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4072  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4073  bias= 1<<(QMAT_SHIFT-1);
4074 
4075  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4076  length = s->intra_chroma_ac_vlc_length;
4077  last_length= s->intra_chroma_ac_vlc_last_length;
4078  } else {
4079  length = s->intra_ac_vlc_length;
4080  last_length= s->intra_ac_vlc_last_length;
4081  }
4082  } else {
4083  scantable= s->inter_scantable.scantable;
4084  perm_scantable= s->inter_scantable.permutated;
4085  start_i = 0;
4086  last_non_zero = -1;
4087  qmat = s->q_inter_matrix[qscale];
4088  matrix = s->inter_matrix;
4089  length = s->inter_ac_vlc_length;
4090  last_length= s->inter_ac_vlc_last_length;
4091  }
4092  last_i= start_i;
4093 
4094  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4095  threshold2= (threshold1<<1);
4096 
4097  for(i=63; i>=start_i; i--) {
4098  const int j = scantable[i];
4099  int level = block[j] * qmat[j];
4100 
4101  if(((unsigned)(level+threshold1))>threshold2){
4102  last_non_zero = i;
4103  break;
4104  }
4105  }
4106 
4107  for(i=start_i; i<=last_non_zero; i++) {
4108  const int j = scantable[i];
4109  int level = block[j] * qmat[j];
4110 
4111 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4112 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4113  if(((unsigned)(level+threshold1))>threshold2){
4114  if(level>0){
4115  level= (bias + level)>>QMAT_SHIFT;
4116  coeff[0][i]= level;
4117  coeff[1][i]= level-1;
4118 // coeff[2][k]= level-2;
4119  }else{
4120  level= (bias - level)>>QMAT_SHIFT;
4121  coeff[0][i]= -level;
4122  coeff[1][i]= -level+1;
4123 // coeff[2][k]= -level+2;
4124  }
4125  coeff_count[i]= FFMIN(level, 2);
4126  av_assert2(coeff_count[i]);
4127  max |=level;
4128  }else{
4129  coeff[0][i]= (level>>31)|1;
4130  coeff_count[i]= 1;
4131  }
4132  }
4133 
4134  *overflow= s->max_qcoeff < max; //overflow might have happened
4135 
4136  if(last_non_zero < start_i){
4137  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4138  return last_non_zero;
4139  }
4140 
4141  score_tab[start_i]= 0;
4142  survivor[0]= start_i;
4143  survivor_count= 1;
4144 
4145  for(i=start_i; i<=last_non_zero; i++){
4146  int level_index, j, zero_distortion;
4147  int dct_coeff= FFABS(block[ scantable[i] ]);
4148  int best_score=256*256*256*120;
4149 
4150  if (s->fdsp.fdct == ff_fdct_ifast)
4151  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4152  zero_distortion= dct_coeff*dct_coeff;
4153 
4154  for(level_index=0; level_index < coeff_count[i]; level_index++){
4155  int distortion;
4156  int level= coeff[level_index][i];
4157  const int alevel= FFABS(level);
4158  int unquant_coeff;
4159 
4160  av_assert2(level);
4161 
4162  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4163  unquant_coeff= alevel*qmul + qadd;
4164  } else if(s->out_format == FMT_MJPEG) {
4165  j = s->idsp.idct_permutation[scantable[i]];
4166  unquant_coeff = alevel * matrix[j] * 8;
4167  }else{ // MPEG-1
4168  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4169  if(s->mb_intra){
4170  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4171  unquant_coeff = (unquant_coeff - 1) | 1;
4172  }else{
4173  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4174  unquant_coeff = (unquant_coeff - 1) | 1;
4175  }
4176  unquant_coeff<<= 3;
4177  }
4178 
4179  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4180  level+=64;
4181  if((level&(~127)) == 0){
4182  for(j=survivor_count-1; j>=0; j--){
4183  int run= i - survivor[j];
4184  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4185  score += score_tab[i-run];
4186 
4187  if(score < best_score){
4188  best_score= score;
4189  run_tab[i+1]= run;
4190  level_tab[i+1]= level-64;
4191  }
4192  }
4193 
4194  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4195  for(j=survivor_count-1; j>=0; j--){
4196  int run= i - survivor[j];
4197  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4198  score += score_tab[i-run];
4199  if(score < last_score){
4200  last_score= score;
4201  last_run= run;
4202  last_level= level-64;
4203  last_i= i+1;
4204  }
4205  }
4206  }
4207  }else{
4208  distortion += esc_length*lambda;
4209  for(j=survivor_count-1; j>=0; j--){
4210  int run= i - survivor[j];
4211  int score= distortion + score_tab[i-run];
4212 
4213  if(score < best_score){
4214  best_score= score;
4215  run_tab[i+1]= run;
4216  level_tab[i+1]= level-64;
4217  }
4218  }
4219 
4220  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4221  for(j=survivor_count-1; j>=0; j--){
4222  int run= i - survivor[j];
4223  int score= distortion + score_tab[i-run];
4224  if(score < last_score){
4225  last_score= score;
4226  last_run= run;
4227  last_level= level-64;
4228  last_i= i+1;
4229  }
4230  }
4231  }
4232  }
4233  }
4234 
4235  score_tab[i+1]= best_score;
4236 
4237  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4238  if(last_non_zero <= 27){
4239  for(; survivor_count; survivor_count--){
4240  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4241  break;
4242  }
4243  }else{
4244  for(; survivor_count; survivor_count--){
4245  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4246  break;
4247  }
4248  }
4249 
4250  survivor[ survivor_count++ ]= i+1;
4251  }
4252 
4253  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4254  last_score= 256*256*256*120;
4255  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4256  int score= score_tab[i];
4257  if (i)
4258  score += lambda * 2; // FIXME more exact?
4259 
4260  if(score < last_score){
4261  last_score= score;
4262  last_i= i;
4263  last_level= level_tab[i];
4264  last_run= run_tab[i];
4265  }
4266  }
4267  }
4268 
4269  s->coded_score[n] = last_score;
4270 
4271  dc= FFABS(block[0]);
4272  last_non_zero= last_i - 1;
4273  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4274 
4275  if(last_non_zero < start_i)
4276  return last_non_zero;
4277 
4278  if(last_non_zero == 0 && start_i == 0){
4279  int best_level= 0;
4280  int best_score= dc * dc;
4281 
4282  for(i=0; i<coeff_count[0]; i++){
4283  int level= coeff[i][0];
4284  int alevel= FFABS(level);
4285  int unquant_coeff, score, distortion;
4286 
4287  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4288  unquant_coeff= (alevel*qmul + qadd)>>3;
4289  } else{ // MPEG-1
4290  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4291  unquant_coeff = (unquant_coeff - 1) | 1;
4292  }
4293  unquant_coeff = (unquant_coeff + 4) >> 3;
4294  unquant_coeff<<= 3 + 3;
4295 
4296  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4297  level+=64;
4298  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4299  else score= distortion + esc_length*lambda;
4300 
4301  if(score < best_score){
4302  best_score= score;
4303  best_level= level - 64;
4304  }
4305  }
4306  block[0]= best_level;
4307  s->coded_score[n] = best_score - dc*dc;
4308  if(best_level == 0) return -1;
4309  else return last_non_zero;
4310  }
4311 
4312  i= last_i;
4313  av_assert2(last_level);
4314 
4315  block[ perm_scantable[last_non_zero] ]= last_level;
4316  i -= last_run + 1;
4317 
4318  for(; i>start_i; i -= run_tab[i] + 1){
4319  block[ perm_scantable[i-1] ]= level_tab[i];
4320  }
4321 
4322  return last_non_zero;
4323 }
4324 
4325 static int16_t basis[64][64];
4326 
4327 static void build_basis(uint8_t *perm){
4328  int i, j, x, y;
4329  emms_c();
4330  for(i=0; i<8; i++){
4331  for(j=0; j<8; j++){
4332  for(y=0; y<8; y++){
4333  for(x=0; x<8; x++){
4334  double s= 0.25*(1<<BASIS_SHIFT);
4335  int index= 8*i + j;
4336  int perm_index= perm[index];
4337  if(i==0) s*= sqrt(0.5);
4338  if(j==0) s*= sqrt(0.5);
4339  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4340  }
4341  }
4342  }
4343  }
4344 }
4345 
4346 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4347  int16_t *block, int16_t *weight, int16_t *orig,
4348  int n, int qscale){
4349  int16_t rem[64];
4350  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4351  const uint8_t *scantable;
4352  const uint8_t *perm_scantable;
4353 // unsigned int threshold1, threshold2;
4354 // int bias=0;
4355  int run_tab[65];
4356  int prev_run=0;
4357  int prev_level=0;
4358  int qmul, qadd, start_i, last_non_zero, i, dc;
4359  uint8_t * length;
4360  uint8_t * last_length;
4361  int lambda;
4362  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4363 
4364  if(basis[0][0] == 0)
4365  build_basis(s->idsp.idct_permutation);
4366 
4367  qmul= qscale*2;
4368  qadd= (qscale-1)|1;
4369  if (s->mb_intra) {
4370  scantable= s->intra_scantable.scantable;
4371  perm_scantable= s->intra_scantable.permutated;
4372  if (!s->h263_aic) {
4373  if (n < 4)
4374  q = s->y_dc_scale;
4375  else
4376  q = s->c_dc_scale;
4377  } else{
4378  /* For AIC we skip quant/dequant of INTRADC */
4379  q = 1;
4380  qadd=0;
4381  }
4382  q <<= RECON_SHIFT-3;
4383  /* note: block[0] is assumed to be positive */
4384  dc= block[0]*q;
4385 // block[0] = (block[0] + (q >> 1)) / q;
4386  start_i = 1;
4387 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4388 // bias= 1<<(QMAT_SHIFT-1);
4389  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4390  length = s->intra_chroma_ac_vlc_length;
4391  last_length= s->intra_chroma_ac_vlc_last_length;
4392  } else {
4393  length = s->intra_ac_vlc_length;
4394  last_length= s->intra_ac_vlc_last_length;
4395  }
4396  } else {
4397  scantable= s->inter_scantable.scantable;
4398  perm_scantable= s->inter_scantable.permutated;
4399  dc= 0;
4400  start_i = 0;
4401  length = s->inter_ac_vlc_length;
4402  last_length= s->inter_ac_vlc_last_length;
4403  }
4404  last_non_zero = s->block_last_index[n];
4405 
4406  dc += (1<<(RECON_SHIFT-1));
4407  for(i=0; i<64; i++){
4408  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4409  }
4410 
4411  sum=0;
4412  for(i=0; i<64; i++){
4413  int one= 36;
4414  int qns=4;
4415  int w;
4416 
4417  w= FFABS(weight[i]) + qns*one;
4418  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4419 
4420  weight[i] = w;
4421 // w=weight[i] = (63*qns + (w/2)) / w;
4422 
4423  av_assert2(w>0);
4424  av_assert2(w<(1<<6));
4425  sum += w*w;
4426  }
4427  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4428 
4429  run=0;
4430  rle_index=0;
4431  for(i=start_i; i<=last_non_zero; i++){
4432  int j= perm_scantable[i];
4433  const int level= block[j];
4434  int coeff;
4435 
4436  if(level){
4437  if(level<0) coeff= qmul*level - qadd;
4438  else coeff= qmul*level + qadd;
4439  run_tab[rle_index++]=run;
4440  run=0;
4441 
4442  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4443  }else{
4444  run++;
4445  }
4446  }
4447 
4448  for(;;){
4449  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4450  int best_coeff=0;
4451  int best_change=0;
4452  int run2, best_unquant_change=0, analyze_gradient;
4453  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4454 
4455  if(analyze_gradient){
4456  for(i=0; i<64; i++){
4457  int w= weight[i];
4458 
4459  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4460  }
4461  s->fdsp.fdct(d1);
4462  }
4463 
4464  if(start_i){
4465  const int level= block[0];
4466  int change, old_coeff;
4467 
4468  av_assert2(s->mb_intra);
4469 
4470  old_coeff= q*level;
4471 
4472  for(change=-1; change<=1; change+=2){
4473  int new_level= level + change;
4474  int score, new_coeff;
4475 
4476  new_coeff= q*new_level;
4477  if(new_coeff >= 2048 || new_coeff < 0)
4478  continue;
4479 
4480  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4481  new_coeff - old_coeff);
4482  if(score<best_score){
4483  best_score= score;
4484  best_coeff= 0;
4485  best_change= change;
4486  best_unquant_change= new_coeff - old_coeff;
4487  }
4488  }
4489  }
4490 
4491  run=0;
4492  rle_index=0;
4493  run2= run_tab[rle_index++];
4494  prev_level=0;
4495  prev_run=0;
4496 
4497  for(i=start_i; i<64; i++){
4498  int j= perm_scantable[i];
4499  const int level= block[j];
4500  int change, old_coeff;
4501 
4502  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4503  break;
4504 
4505  if(level){
4506  if(level<0) old_coeff= qmul*level - qadd;
4507  else old_coeff= qmul*level + qadd;
4508  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4509  }else{
4510  old_coeff=0;
4511  run2--;
4512  av_assert2(run2>=0 || i >= last_non_zero );
4513  }
4514 
4515  for(change=-1; change<=1; change+=2){
4516  int new_level= level + change;
4517  int score, new_coeff, unquant_change;
4518 
4519  score=0;
4520  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4521  continue;
4522 
4523  if(new_level){
4524  if(new_level<0) new_coeff= qmul*new_level - qadd;
4525  else new_coeff= qmul*new_level + qadd;
4526  if(new_coeff >= 2048 || new_coeff <= -2048)
4527  continue;
4528  //FIXME check for overflow
4529 
4530  if(level){
4531  if(level < 63 && level > -63){
4532  if(i < last_non_zero)
4533  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4534  - length[UNI_AC_ENC_INDEX(run, level+64)];
4535  else
4536  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4537  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4538  }
4539  }else{
4540  av_assert2(FFABS(new_level)==1);
4541 
4542  if(analyze_gradient){
4543  int g= d1[ scantable[i] ];
4544  if(g && (g^new_level) >= 0)
4545  continue;
4546  }
4547 
4548  if(i < last_non_zero){
4549  int next_i= i + run2 + 1;
4550  int next_level= block[ perm_scantable[next_i] ] + 64;
4551 
4552  if(next_level&(~127))
4553  next_level= 0;
4554 
4555  if(next_i < last_non_zero)
4556  score += length[UNI_AC_ENC_INDEX(run, 65)]
4557  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4558  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4559  else
4560  score += length[UNI_AC_ENC_INDEX(run, 65)]
4561  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4562  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4563  }else{
4564  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4565  if(prev_level){
4566  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4567  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4568  }
4569  }
4570  }
4571  }else{
4572  new_coeff=0;
4573  av_assert2(FFABS(level)==1);
4574 
4575  if(i < last_non_zero){
4576  int next_i= i + run2 + 1;
4577  int next_level= block[ perm_scantable[next_i] ] + 64;
4578 
4579  if(next_level&(~127))
4580  next_level= 0;
4581 
4582  if(next_i < last_non_zero)
4583  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4584  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4585  - length[UNI_AC_ENC_INDEX(run, 65)];
4586  else
4587  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4588  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4589  - length[UNI_AC_ENC_INDEX(run, 65)];
4590  }else{
4591  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4592  if(prev_level){
4593  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4594  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4595  }
4596  }
4597  }
4598 
4599  score *= lambda;
4600 
4601  unquant_change= new_coeff - old_coeff;
4602  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4603 
4604  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4605  unquant_change);
4606  if(score<best_score){
4607  best_score= score;
4608  best_coeff= i;
4609  best_change= change;
4610  best_unquant_change= unquant_change;
4611  }
4612  }
4613  if(level){
4614  prev_level= level + 64;
4615  if(prev_level&(~127))
4616  prev_level= 0;
4617  prev_run= run;
4618  run=0;
4619  }else{
4620  run++;
4621  }
4622  }
4623 
4624  if(best_change){
4625  int j= perm_scantable[ best_coeff ];
4626 
4627  block[j] += best_change;
4628 
4629  if(best_coeff > last_non_zero){
4630  last_non_zero= best_coeff;
4631  av_assert2(block[j]);
4632  }else{
4633  for(; last_non_zero>=start_i; last_non_zero--){
4634  if(block[perm_scantable[last_non_zero]])
4635  break;
4636  }
4637  }
4638 
4639  run=0;
4640  rle_index=0;
4641  for(i=start_i; i<=last_non_zero; i++){
4642  int j= perm_scantable[i];
4643  const int level= block[j];
4644 
4645  if(level){
4646  run_tab[rle_index++]=run;
4647  run=0;
4648  }else{
4649  run++;
4650  }
4651  }
4652 
4653  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4654  }else{
4655  break;
4656  }
4657  }
4658 
4659  return last_non_zero;
4660 }
4661 
4662 /**
4663  * Permute an 8x8 block according to permutation.
4664  * @param block the block which will be permuted according to
4665  * the given permutation vector
4666  * @param permutation the permutation vector
4667  * @param last the last non zero coefficient in scantable order, used to
4668  * speed the permutation up
4669  * @param scantable the used scantable, this is only used to speed the
4670  * permutation up, the block is not (inverse) permutated
4671  * to scantable order!
4672  */
4673 void ff_block_permute(int16_t *block, uint8_t *permutation,
4674  const uint8_t *scantable, int last)
4675 {
4676  int i;
4677  int16_t temp[64];
4678 
4679  if (last <= 0)
4680  return;
4681  //FIXME it is ok but not clean and might fail for some permutations
4682  // if (permutation[1] == 1)
4683  // return;
4684 
4685  for (i = 0; i <= last; i++) {
4686  const int j = scantable[i];
4687  temp[j] = block[j];
4688  block[j] = 0;
4689  }
4690 
4691  for (i = 0; i <= last; i++) {
4692  const int j = scantable[i];
4693  const int perm_j = permutation[j];
4694  block[perm_j] = temp[j];
4695  }
4696 }
4697 
4699  int16_t *block, int n,
4700  int qscale, int *overflow)
4701 {
4702  int i, j, level, last_non_zero, q, start_i;
4703  const int *qmat;
4704  const uint8_t *scantable;
4705  int bias;
4706  int max=0;
4707  unsigned int threshold1, threshold2;
4708 
4709  s->fdsp.fdct(block);
4710 
4711  if(s->dct_error_sum)
4712  s->denoise_dct(s, block);
4713 
4714  if (s->mb_intra) {
4715  scantable= s->intra_scantable.scantable;
4716  if (!s->h263_aic) {
4717  if (n < 4)
4718  q = s->y_dc_scale;
4719  else
4720  q = s->c_dc_scale;
4721  q = q << 3;
4722  } else
4723  /* For AIC we skip quant/dequant of INTRADC */
4724  q = 1 << 3;
4725 
4726  /* note: block[0] is assumed to be positive */
4727  block[0] = (block[0] + (q >> 1)) / q;
4728  start_i = 1;
4729  last_non_zero = 0;
4730  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4731  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4732  } else {
4733  scantable= s->inter_scantable.scantable;
4734  start_i = 0;
4735  last_non_zero = -1;
4736  qmat = s->q_inter_matrix[qscale];
4737  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4738  }
4739  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4740  threshold2= (threshold1<<1);
4741  for(i=63;i>=start_i;i--) {
4742  j = scantable[i];
4743  level = block[j] * qmat[j];
4744 
4745  if(((unsigned)(level+threshold1))>threshold2){
4746  last_non_zero = i;
4747  break;
4748  }else{
4749  block[j]=0;
4750  }
4751  }
4752  for(i=start_i; i<=last_non_zero; i++) {
4753  j = scantable[i];
4754  level = block[j] * qmat[j];
4755 
4756 // if( bias+level >= (1<<QMAT_SHIFT)
4757 // || bias-level >= (1<<QMAT_SHIFT)){
4758  if(((unsigned)(level+threshold1))>threshold2){
4759  if(level>0){
4760  level= (bias + level)>>QMAT_SHIFT;
4761  block[j]= level;
4762  }else{
4763  level= (bias - level)>>QMAT_SHIFT;
4764  block[j]= -level;
4765  }
4766  max |=level;
4767  }else{
4768  block[j]=0;
4769  }
4770  }
4771  *overflow= s->max_qcoeff < max; //overflow might have happened
4772 
4773  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4774  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4775  ff_block_permute(block, s->idsp.idct_permutation,
4776  scantable, last_non_zero);
4777 
4778  return last_non_zero;
4779 }
4780 
4781 #define OFFSET(x) offsetof(MpegEncContext, x)
4782 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4783 static const AVOption h263_options[] = {
4784  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4785  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4787  { NULL },
4788 };
4789 
4790 static const AVClass h263_class = {
4791  .class_name = "H.263 encoder",
4792  .item_name = av_default_item_name,
4793  .option = h263_options,
4794  .version = LIBAVUTIL_VERSION_INT,
4795 };
4796 
4798  .name = "h263",
4799  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4800  .type = AVMEDIA_TYPE_VIDEO,
4801  .id = AV_CODEC_ID_H263,
4802  .priv_data_size = sizeof(MpegEncContext),
4804  .encode2 = ff_mpv_encode_picture,
4805  .close = ff_mpv_encode_end,
4806  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4808  .priv_class = &h263_class,
4809 };
4810 
4811 static const AVOption h263p_options[] = {
4812  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4813  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4814  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4815  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4817  { NULL },
4818 };
4819 static const AVClass h263p_class = {
4820  .class_name = "H.263p encoder",
4821  .item_name = av_default_item_name,
4822  .option = h263p_options,
4823  .version = LIBAVUTIL_VERSION_INT,
4824 };
4825 
4827  .name = "h263p",
4828  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4829  .type = AVMEDIA_TYPE_VIDEO,
4830  .id = AV_CODEC_ID_H263P,
4831  .priv_data_size = sizeof(MpegEncContext),
4833  .encode2 = ff_mpv_encode_picture,
4834  .close = ff_mpv_encode_end,
4835  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4836  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4838  .priv_class = &h263p_class,
4839 };
4840 
4841 static const AVClass msmpeg4v2_class = {
4842  .class_name = "msmpeg4v2 encoder",
4843  .item_name = av_default_item_name,
4844  .option = ff_mpv_generic_options,
4845  .version = LIBAVUTIL_VERSION_INT,
4846 };
4847 
4849  .name = "msmpeg4v2",
4850  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4851  .type = AVMEDIA_TYPE_VIDEO,
4852  .id = AV_CODEC_ID_MSMPEG4V2,
4853  .priv_data_size = sizeof(MpegEncContext),
4855  .encode2 = ff_mpv_encode_picture,
4856  .close = ff_mpv_encode_end,
4857  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4859  .priv_class = &msmpeg4v2_class,
4860 };
4861 
4862 static const AVClass msmpeg4v3_class = {
4863  .class_name = "msmpeg4v3 encoder",
4864  .item_name = av_default_item_name,
4865  .option = ff_mpv_generic_options,
4866  .version = LIBAVUTIL_VERSION_INT,
4867 };
4868 
4870  .name = "msmpeg4",
4871  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4872  .type = AVMEDIA_TYPE_VIDEO,
4873  .id = AV_CODEC_ID_MSMPEG4V3,
4874  .priv_data_size = sizeof(MpegEncContext),
4876  .encode2 = ff_mpv_encode_picture,
4877  .close = ff_mpv_encode_end,
4878  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4880  .priv_class = &msmpeg4v3_class,
4881 };
4882 
4883 static const AVClass wmv1_class = {
4884  .class_name = "wmv1 encoder",
4885  .item_name = av_default_item_name,
4886  .option = ff_mpv_generic_options,
4887  .version = LIBAVUTIL_VERSION_INT,
4888 };
4889 
4891  .name = "wmv1",
4892  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4893  .type = AVMEDIA_TYPE_VIDEO,
4894  .id = AV_CODEC_ID_WMV1,
4895  .priv_data_size = sizeof(MpegEncContext),
4897  .encode2 = ff_mpv_encode_picture,
4898  .close = ff_mpv_encode_end,
4899  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4901  .priv_class = &wmv1_class,
4902 };
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
AAN (Arai, Agui and Nakajima) (I)DCT tables.
#define av_always_inline
Definition: attributes.h:45
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
uint8_t
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Libavcodec external API header.
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1605
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1606
#define FF_CMP_DCTMAX
Definition: avcodec.h:954
#define FF_CMP_VSSE
Definition: avcodec.h:950
#define FF_CMP_NSSE
Definition: avcodec.h:951
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1027
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:1604
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:1026
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1785
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:1025
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Definition: avpacket.c:584
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:820
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Definition: avpacket.c:343
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:69
#define s(width, name)
Definition: cbs_vp9.c:257
#define fail()
Definition: checkasm.h:133
#define FFMAX3(a, b, c)
Definition: common.h:104
#define FFMIN(a, b)
Definition: common.h:105
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
#define av_clip
Definition: common.h:122
#define ROUNDED_DIV(a, b)
Definition: common.h:56
#define FFMAX(a, b)
Definition: common.h:103
#define av_clip_uint8
Definition: common.h:128
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define CONFIG_H261_ENCODER
Definition: config.h:1319
#define CONFIG_WMV2_ENCODER
Definition: config.h:1370
#define CONFIG_MPEG2VIDEO_ENCODER
Definition: config.h:1330
#define CONFIG_RV20_ENCODER
Definition: config.h:1353
#define CONFIG_MPEG1VIDEO_ENCODER
Definition: config.h:1329
#define CONFIG_SPEEDHQ_ENCODER
Definition: config.h:1357
#define ARCH_X86
Definition: config.h:39
#define CONFIG_FLV_ENCODER
Definition: config.h:1317
#define CONFIG_FAANDCT
Definition: config.h:634
#define CONFIG_RV10_ENCODER
Definition: config.h:1352
#define CONFIG_H263P_ENCODER
Definition: config.h:1321
#define CONFIG_H263_ENCODER
Definition: config.h:1320
#define CONFIG_MJPEG_ENCODER
Definition: config.h:1328
#define CONFIG_MPEG4_ENCODER
Definition: config.h:1331
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
#define max(a, b)
Definition: cuda_runtime.h:33
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
void ff_jpeg_fdct_islow_10(int16_t *data)
void ff_jpeg_fdct_islow_8(int16_t *data)
static AVFrame * frame
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
perm
Definition: f_perms.c:74
void ff_faandct(int16_t *data)
Definition: faandct.c:114
Floating point AAN DCT.
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
int
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:287
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:343
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:275
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:338
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:112
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:304
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:941
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:325
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:279
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:173
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:188
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:63
@ AV_CODEC_ID_H261
Definition: codec_id.h:52
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:70
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:67
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:64
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:66
@ AV_CODEC_ID_RV10
Definition: codec_id.h:54
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:274
@ AV_CODEC_ID_RV20
Definition: codec_id.h:55
@ AV_CODEC_ID_H263
Definition: codec_id.h:53
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
@ AV_CODEC_ID_H263P
Definition: codec_id.h:68
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:65
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
@ AV_CODEC_ID_AMV
Definition: codec_id.h:156
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:395
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
Definition: avcodec.h:215
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:364
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:1050
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:410
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:309
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:93
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:54
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AVERROR(e)
Definition: error.h:43
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
int index
Definition: gxfenc.c:89
H.261 codec.
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:373
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:238
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:54
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:109
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:41
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:54
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:147
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:319
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:266
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:757
#define H263_GOB_HEIGHT(h)
Definition: h263.h:43
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
H.263 tables.
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
for(j=16;j >0;--j)
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
cl_device_type type
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:38
int i
Definition: input.c:407
#define av_log2
Definition: intmath.h:83
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1561
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1064
#define STRIDE_ALIGN
Definition: internal.h:118
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:903
const char * arg
Definition: jacosubdec.c:66
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
FLV common header.
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
#define emms_c()
Definition: internal.h:54
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:103
#define AVOnce
Definition: thread.h:172
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:175
#define AV_ONCE_INIT
Definition: thread.h:173
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
#define lrintf(x)
Definition: libm_mips.h:70
const char * desc
Definition: libsvtav1.c:79
uint8_t w
Definition: llviddspenc.c:39
int stride
Definition: mace.c:144
#define FFALIGN(x, a)
Definition: macros.h:48
#define M_PI
Definition: mathematics.h:52
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
#define ff_sqrt
Definition: mathops.h:206
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:34
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:475
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1015
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:472
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:258
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:184
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:313
MJPEG encoder.
@ HUFFMAN_TABLE_OPTIMAL
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:162
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1650
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1699
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1061
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1598
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1490
#define MAX_MV
Definition: motion_est.h:35
#define MAX_DMV
Definition: motion_est.h:37
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:114
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
void ff_mpeg1_encode_init(MpegEncContext *s)
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:335
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:346
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
void ff_mpeg4_merge_partitions(MpegEncContext *s)
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
void ff_set_mpeg4_time(MpegEncContext *s)
void ff_mpeg4_init_partitions(MpegEncContext *s)
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:232
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:295
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:440
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:355
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:454
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
#define EDGE_WIDTH
Definition: mpegpicture.h:33
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:104
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:111
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:115
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:114
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:117
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:116
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:107
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:105
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:109
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:112
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:110
#define MAX_FCODE
Definition: mpegutils.h:48
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:106
#define MAX_MB_BYTES
Definition: mpegutils.h:47
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:119
#define INPLACE_OFFSET
Definition: mpegutils.h:121
@ FMT_H261
Definition: mpegutils.h:125
@ FMT_MPEG1
Definition: mpegutils.h:124
@ FMT_SPEEDHQ
Definition: mpegutils.h:128
@ FMT_H263
Definition: mpegutils.h:126
@ FMT_MJPEG
Definition: mpegutils.h:127
#define PICT_FRAME
Definition: mpegutils.h:39
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:676
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1111
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:913
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:499
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2248
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2331
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2267
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1904
mpegvideo header.
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:596
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:214
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:765
#define MAX_B_FRAMES
Definition: mpegvideo.h:64
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:595
#define CHROMA_420
Definition: mpegvideo.h:488
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
#define CHROMA_444
Definition: mpegvideo.h:490
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:629
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:264
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:593
#define CHROMA_422
Definition: mpegvideo.h:489
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:750
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:594
static int estimate_qp(MpegEncContext *s, int dry_run)
static void update_noise_reduction(MpegEncContext *s)
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:75
AVCodec ff_wmv1_encoder
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
AVCodec ff_msmpeg4v2_encoder
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:92
static void set_frame_distances(MpegEncContext *s)
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:87
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define MERGE(field)
AVCodec ff_h263_encoder
static const AVClass h263p_class
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:84
static int estimate_best_b_count(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
static int estimate_motion_thread(AVCodecContext *c, void *arg)
#define VE
static int select_input_picture(MpegEncContext *s)
static const AVOption h263p_options[]
static const AVClass wmv1_class
static const AVClass msmpeg4v3_class
static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
static int get_sae(uint8_t *src, int ref, int stride)
AVCodec ff_msmpeg4v3_encoder
static const AVClass h263_class
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:85
AVCodec ff_h263p_encoder
static void build_basis(uint8_t *perm)
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:73
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
static void frame_end(MpegEncContext *s)
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
static void mpv_encode_init_static(void)
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
static int sse_mb(MpegEncContext *s)
static int frame_start(MpegEncContext *s)
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
static int16_t basis[64][64]
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
static int encode_picture(MpegEncContext *s, int picture_number)
static const AVOption h263_options[]
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
static void write_mb_info(MpegEncContext *s)
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
static const AVClass msmpeg4v2_class
#define OFFSET(x)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
static void update_qscale(MpegEncContext *s)
#define COPY(a)
static int encode_thread(AVCodecContext *c, void *arg)
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
static void write_slice_end(MpegEncContext *s)
static void update_mb_info(MpegEncContext *s, int startcode)
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
av_cold int ff_dct_encode_init(MpegEncContext *s)
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:76
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:77
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
#define BASIS_SHIFT
#define EDGE_BOTTOM
#define RECON_SHIFT
#define EDGE_TOP
void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:116
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:217
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:277
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:369
const char data[16]
Definition: mxf.c:142
AVOptions.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:586
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:57
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:88
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:376
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:342
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:76
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:110
static const int BUF_BITS
Definition: put_bits.h:42
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
quarterpel DSP functions
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
#define MAX_LEVEL
Definition: rl.h:36
#define MAX_RUN
Definition: rl.h:35
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
#define FF_ARRAY_ELEMS(a)
static int shift(int a, int b)
Definition: sonic.c:82
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:245
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:150
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:102
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:273
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:140
SpeedHQ encoder.
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:453
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:477
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:495
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:468
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:486
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:459
Describe the class of an AVClass context structure.
Definition: log.h:67
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
main external API structure.
Definition: avcodec.h:536
attribute_deprecated int brd_scale
Definition: avcodec.h:1109
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1430
int trellis
trellis RD quantization
Definition: avcodec.h:1487
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
attribute_deprecated int pre_me
Definition: avcodec.h:976
int width
picture width / height.
Definition: avcodec.h:709
attribute_deprecated int i_count
Definition: avcodec.h:1539
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1557
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1401
attribute_deprecated int header_bits
Definition: avcodec.h:1533
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:1050
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1601
attribute_deprecated int frame_bits
Definition: avcodec.h:1549
attribute_deprecated int mv_bits
Definition: avcodec.h:1531
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:796
int qmin
minimum quantizer
Definition: avcodec.h:1380
attribute_deprecated int b_sensitivity
Definition: avcodec.h:1142
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:915
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:1045
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1792
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:594
int mb_decision
macroblock decision mode
Definition: avcodec.h:1024
attribute_deprecated int misc_bits
Definition: avcodec.h:1545
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:826
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:1467
int64_t bit_rate
the average bitrate
Definition: avcodec.h:586
const struct AVCodec * codec
Definition: avcodec.h:545
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:1479
attribute_deprecated int p_count
Definition: avcodec.h:1541
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1747
attribute_deprecated int mpeg_quant
Definition: avcodec.h:831
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:862
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:876
int delay
Codec delay.
Definition: avcodec.h:692
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:883
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:731
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:1520
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:940
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1416
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:1097
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1773
int qmax
maximum quantizer
Definition: avcodec.h:1387
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:1036
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:659
attribute_deprecated int i_tex_bits
Definition: avcodec.h:1535
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:1475
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
attribute_deprecated int prediction_method
Definition: avcodec.h:895
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1423
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1062
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1699
attribute_deprecated int p_tex_bits
Definition: avcodec.h:1537
attribute_deprecated int noise_reduction
Definition: avcodec.h:1054
attribute_deprecated int skip_count
Definition: avcodec.h:1543
enum AVCodecID codec_id
Definition: avcodec.h:546
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:810
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:855
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:1471
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:2029
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:571
void * priv_data
Definition: avcodec.h:563
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:869
int slices
Number of slices.
Definition: avcodec.h:1187
unsigned int byte_buffer_size
Definition: internal.h:166
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:165
AVCodec.
Definition: codec.h:197
enum AVCodecID id
Definition: codec.h:211
const char * name
Name of the codec implementation.
Definition: codec.h:204
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
int display_picture_number
picture number in display order
Definition: frame.h:436
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:509
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
AVOption.
Definition: opt.h:248
This structure stores compressed data.
Definition: packet.h:346
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:375
int size
Definition: packet.h:370
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:362
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:368
uint8_t * data
Definition: packet.h:369
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
MpegEncContext.
Definition: mpegvideo.h:81
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:407
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:513
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:410
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:406
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int interlaced_dct
Definition: mpegvideo.h:496
int noise_reduction
Definition: mpegvideo.h:587
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:411
int qscale
QP.
Definition: mpegvideo.h:204
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:86
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:185
int esc3_level_length
Definition: mpegvideo.h:442
PutBitContext pb
bit output
Definition: mpegvideo.h:151
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:195
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:210
Picture.
Definition: mpegpicture.h:45
int reference
Definition: mpegpicture.h:88
int shared
Definition: mpegpicture.h:89
struct AVFrame * f
Definition: mpegpicture.h:46
uint8_t * buf
Definition: put_bits.h:47
uint8_t * buf_end
Definition: put_bits.h:47
rate control context.
Definition: ratecontrol.h:63
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
uint8_t run
Definition: svq3.c:205
uint8_t level
Definition: svq3.c:206
#define av_free(p)
#define ff_dlog(a,...)
#define av_freep(p)
#define av_log(a,...)
#define src1
Definition: h264pred.c:140
#define src
Definition: vp8dsp.c:255
static int16_t block[64]
Definition: dct.c:116
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVPacket * pkt
Definition: movenc.c:59
int out_size
Definition: movenc.c:55
#define height
#define width
static int64_t pts
int size
static const struct twinvq_data tab
#define me
const char * b
Definition: vf_curves.c:118
const char * g
Definition: vf_curves.c:117
const char * r
Definition: vf_curves.c:116
else temp
Definition: vf_mcdeint.c:259
if(ret< 0)
Definition: vf_mcdeint.c:282
static float mean(const float *input, int size)
Definition: vf_nnedi.c:864
static const double coeff[2][5]
Definition: vf_owdenoise.c:73
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static const uint8_t offset[127][2]
Definition: vf_spp.c:107
uint8_t bits
Definition: vp3data.h:141
static double c[64]
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
int acc
Definition: yuv2rgb.c:555