FFmpeg  4.4.4
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "libavutil/avassert.h"
25 #include "libavutil/internal.h"
26 #include "libavutil/mem_internal.h"
27 
28 #include "avcodec.h"
29 #include "h261.h"
30 #include "mpegutils.h"
31 #include "mpegvideo.h"
32 #include "qpeldsp.h"
33 #include "wmv2.h"
34 
36  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
37  uint8_t **ref_picture)
38 {
39  uint8_t *ptr;
40  int src_x, src_y, motion_x, motion_y;
41  ptrdiff_t offset, linesize, uvlinesize;
42  int emu = 0;
43 
44  motion_x = s->sprite_offset[0][0];
45  motion_y = s->sprite_offset[0][1];
46  src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
47  src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
48  motion_x *= 1 << (3 - s->sprite_warping_accuracy);
49  motion_y *= 1 << (3 - s->sprite_warping_accuracy);
50  src_x = av_clip(src_x, -16, s->width);
51  if (src_x == s->width)
52  motion_x = 0;
53  src_y = av_clip(src_y, -16, s->height);
54  if (src_y == s->height)
55  motion_y = 0;
56 
57  linesize = s->linesize;
58  uvlinesize = s->uvlinesize;
59 
60  ptr = ref_picture[0] + src_y * linesize + src_x;
61 
62  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
63  (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
64  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
65  linesize, linesize,
66  17, 17,
67  src_x, src_y,
68  s->h_edge_pos, s->v_edge_pos);
69  ptr = s->sc.edge_emu_buffer;
70  }
71 
72  if ((motion_x | motion_y) & 7) {
73  s->mdsp.gmc1(dest_y, ptr, linesize, 16,
74  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
75  s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
76  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
77  } else {
78  int dxy;
79 
80  dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
81  if (s->no_rounding) {
82  s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
83  } else {
84  s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
85  }
86  }
87 
88  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
89  return;
90 
91  motion_x = s->sprite_offset[1][0];
92  motion_y = s->sprite_offset[1][1];
93  src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
94  src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
95  motion_x *= 1 << (3 - s->sprite_warping_accuracy);
96  motion_y *= 1 << (3 - s->sprite_warping_accuracy);
97  src_x = av_clip(src_x, -8, s->width >> 1);
98  if (src_x == s->width >> 1)
99  motion_x = 0;
100  src_y = av_clip(src_y, -8, s->height >> 1);
101  if (src_y == s->height >> 1)
102  motion_y = 0;
103 
104  offset = (src_y * uvlinesize) + src_x;
105  ptr = ref_picture[1] + offset;
106  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
107  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
108  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
109  uvlinesize, uvlinesize,
110  9, 9,
111  src_x, src_y,
112  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
113  ptr = s->sc.edge_emu_buffer;
114  emu = 1;
115  }
116  s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
117  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
118 
119  ptr = ref_picture[2] + offset;
120  if (emu) {
121  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
122  uvlinesize, uvlinesize,
123  9, 9,
124  src_x, src_y,
125  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
126  ptr = s->sc.edge_emu_buffer;
127  }
128  s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
129  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
130 }
131 
133  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
134  uint8_t **ref_picture)
135 {
136  uint8_t *ptr;
137  int linesize, uvlinesize;
138  const int a = s->sprite_warping_accuracy;
139  int ox, oy;
140 
141  linesize = s->linesize;
142  uvlinesize = s->uvlinesize;
143 
144  ptr = ref_picture[0];
145 
146  ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
147  s->sprite_delta[0][1] * s->mb_y * 16;
148  oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
149  s->sprite_delta[1][1] * s->mb_y * 16;
150 
151  s->mdsp.gmc(dest_y, ptr, linesize, 16,
152  ox, oy,
153  s->sprite_delta[0][0], s->sprite_delta[0][1],
154  s->sprite_delta[1][0], s->sprite_delta[1][1],
155  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
156  s->h_edge_pos, s->v_edge_pos);
157  s->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
158  ox + s->sprite_delta[0][0] * 8,
159  oy + s->sprite_delta[1][0] * 8,
160  s->sprite_delta[0][0], s->sprite_delta[0][1],
161  s->sprite_delta[1][0], s->sprite_delta[1][1],
162  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
163  s->h_edge_pos, s->v_edge_pos);
164 
165  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
166  return;
167 
168  ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
169  s->sprite_delta[0][1] * s->mb_y * 8;
170  oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
171  s->sprite_delta[1][1] * s->mb_y * 8;
172 
173  ptr = ref_picture[1];
174  s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
175  ox, oy,
176  s->sprite_delta[0][0], s->sprite_delta[0][1],
177  s->sprite_delta[1][0], s->sprite_delta[1][1],
178  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
179  (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
180 
181  ptr = ref_picture[2];
182  s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
183  ox, oy,
184  s->sprite_delta[0][0], s->sprite_delta[0][1],
185  s->sprite_delta[1][0], s->sprite_delta[1][1],
186  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
187  (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
188 }
189 
190 static inline int hpel_motion(MpegEncContext *s,
191  uint8_t *dest, uint8_t *src,
192  int src_x, int src_y,
193  op_pixels_func *pix_op,
194  int motion_x, int motion_y)
195 {
196  int dxy = 0;
197  int emu = 0;
198 
199  src_x += motion_x >> 1;
200  src_y += motion_y >> 1;
201 
202  /* WARNING: do no forget half pels */
203  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
204  if (src_x != s->width)
205  dxy |= motion_x & 1;
206  src_y = av_clip(src_y, -16, s->height);
207  if (src_y != s->height)
208  dxy |= (motion_y & 1) << 1;
209  src += src_y * s->linesize + src_x;
210 
211  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
212  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
213  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
214  s->linesize, s->linesize,
215  9, 9,
216  src_x, src_y,
217  s->h_edge_pos, s->v_edge_pos);
218  src = s->sc.edge_emu_buffer;
219  emu = 1;
220  }
221  pix_op[dxy](dest, src, s->linesize, 8);
222  return emu;
223 }
224 
225 static av_always_inline
227  uint8_t *dest_y,
228  uint8_t *dest_cb,
229  uint8_t *dest_cr,
230  int field_based,
231  int bottom_field,
232  int field_select,
233  uint8_t **ref_picture,
234  op_pixels_func (*pix_op)[4],
235  int motion_x,
236  int motion_y,
237  int h,
238  int is_mpeg12,
239  int is_16x8,
240  int mb_y)
241 {
242  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
243  int dxy, uvdxy, mx, my, src_x, src_y,
244  uvsrc_x, uvsrc_y, v_edge_pos, block_y_half;
245  ptrdiff_t uvlinesize, linesize;
246 
247  v_edge_pos = s->v_edge_pos >> field_based;
248  linesize = s->current_picture.f->linesize[0] << field_based;
249  uvlinesize = s->current_picture.f->linesize[1] << field_based;
250  block_y_half = (field_based | is_16x8);
251 
252  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
253  src_x = s->mb_x * 16 + (motion_x >> 1);
254  src_y = (mb_y << (4 - block_y_half)) + (motion_y >> 1);
255 
256  if (!is_mpeg12 && s->out_format == FMT_H263) {
257  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
258  mx = (motion_x >> 1) | (motion_x & 1);
259  my = motion_y >> 1;
260  uvdxy = ((my & 1) << 1) | (mx & 1);
261  uvsrc_x = s->mb_x * 8 + (mx >> 1);
262  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
263  } else {
264  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
265  uvsrc_x = src_x >> 1;
266  uvsrc_y = src_y >> 1;
267  }
268  // Even chroma mv's are full pel in H261
269  } else if (!is_mpeg12 && s->out_format == FMT_H261) {
270  mx = motion_x / 4;
271  my = motion_y / 4;
272  uvdxy = 0;
273  uvsrc_x = s->mb_x * 8 + mx;
274  uvsrc_y = mb_y * 8 + my;
275  } else {
276  if (s->chroma_y_shift) {
277  mx = motion_x / 2;
278  my = motion_y / 2;
279  uvdxy = ((my & 1) << 1) | (mx & 1);
280  uvsrc_x = s->mb_x * 8 + (mx >> 1);
281  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
282  } else {
283  if (s->chroma_x_shift) {
284  // Chroma422
285  mx = motion_x / 2;
286  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
287  uvsrc_x = s->mb_x * 8 + (mx >> 1);
288  uvsrc_y = src_y;
289  } else {
290  // Chroma444
291  uvdxy = dxy;
292  uvsrc_x = src_x;
293  uvsrc_y = src_y;
294  }
295  }
296  }
297 
298  ptr_y = ref_picture[0] + src_y * linesize + src_x;
299  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
300  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
301 
302  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15 , 0) ||
303  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) {
304  if (is_mpeg12 || (CONFIG_SMALL &&
305  (s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
306  s->codec_id == AV_CODEC_ID_MPEG1VIDEO))) {
307  av_log(s->avctx, AV_LOG_DEBUG,
308  "MPEG motion vector out of boundary (%d %d)\n", src_x,
309  src_y);
310  return;
311  }
312  src_y = (unsigned)src_y << field_based;
313  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
314  s->linesize, s->linesize,
315  17, 17 + field_based,
316  src_x, src_y,
317  s->h_edge_pos, s->v_edge_pos);
318  ptr_y = s->sc.edge_emu_buffer;
319  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
320  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
321  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
322  if (s->workaround_bugs & FF_BUG_IEDGE)
323  vbuf -= s->uvlinesize;
324  uvsrc_y = (unsigned)uvsrc_y << field_based;
325  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
326  s->uvlinesize, s->uvlinesize,
327  9, 9 + field_based,
328  uvsrc_x, uvsrc_y,
329  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
330  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
331  s->uvlinesize, s->uvlinesize,
332  9, 9 + field_based,
333  uvsrc_x, uvsrc_y,
334  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
335  ptr_cb = ubuf;
336  ptr_cr = vbuf;
337  }
338  }
339 
340  /* FIXME use this for field pix too instead of the obnoxious hack which
341  * changes picture.data */
342  if (bottom_field) {
343  dest_y += s->linesize;
344  dest_cb += s->uvlinesize;
345  dest_cr += s->uvlinesize;
346  }
347 
348  if (field_select) {
349  ptr_y += s->linesize;
350  ptr_cb += s->uvlinesize;
351  ptr_cr += s->uvlinesize;
352  }
353 
354  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
355 
356  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
357  pix_op[s->chroma_x_shift][uvdxy]
358  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
359  pix_op[s->chroma_x_shift][uvdxy]
360  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
361  }
362  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
363  s->out_format == FMT_H261) {
365  }
366 }
367 /* apply one mpeg motion vector to the three components */
369  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
370  int field_select, uint8_t **ref_picture,
371  op_pixels_func (*pix_op)[4],
372  int motion_x, int motion_y, int h, int is_16x8, int mb_y)
373 {
374 #if !CONFIG_SMALL
375  if (s->out_format == FMT_MPEG1)
376  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
377  field_select, ref_picture, pix_op,
378  motion_x, motion_y, h, 1, is_16x8, mb_y);
379  else
380 #endif
381  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
382  field_select, ref_picture, pix_op,
383  motion_x, motion_y, h, 0, is_16x8, mb_y);
384 }
385 
387  uint8_t *dest_cb, uint8_t *dest_cr,
388  int bottom_field, int field_select,
389  uint8_t **ref_picture,
390  op_pixels_func (*pix_op)[4],
391  int motion_x, int motion_y, int h, int mb_y)
392 {
393 #if !CONFIG_SMALL
394  if (s->out_format == FMT_MPEG1)
395  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
396  bottom_field, field_select, ref_picture, pix_op,
397  motion_x, motion_y, h, 1, 0, mb_y);
398  else
399 #endif
400  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
401  bottom_field, field_select, ref_picture, pix_op,
402  motion_x, motion_y, h, 0, 0, mb_y);
403 }
404 
405 // FIXME: SIMDify, avg variant, 16x16 version
406 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
407 {
408  int x;
409  uint8_t *const top = src[1];
410  uint8_t *const left = src[2];
411  uint8_t *const mid = src[0];
412  uint8_t *const right = src[3];
413  uint8_t *const bottom = src[4];
414 #define OBMC_FILTER(x, t, l, m, r, b)\
415  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
416 #define OBMC_FILTER4(x, t, l, m, r, b)\
417  OBMC_FILTER(x , t, l, m, r, b);\
418  OBMC_FILTER(x+1 , t, l, m, r, b);\
419  OBMC_FILTER(x +stride, t, l, m, r, b);\
420  OBMC_FILTER(x+1+stride, t, l, m, r, b);
421 
422  x = 0;
423  OBMC_FILTER (x , 2, 2, 4, 0, 0);
424  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
425  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
426  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
427  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
428  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
429  x += stride;
430  OBMC_FILTER (x , 1, 2, 5, 0, 0);
431  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
432  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
433  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
434  x += stride;
435  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
436  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
437  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
438  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
439  x += 2 * stride;
440  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
441  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
442  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
443  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
444  x += 2*stride;
445  OBMC_FILTER (x , 0, 2, 5, 0, 1);
446  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
447  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
448  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
449  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
450  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
451  x += stride;
452  OBMC_FILTER (x , 0, 2, 4, 0, 2);
453  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
454  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
455  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
456 }
457 
458 /* obmc for 1 8x8 luma block */
459 static inline void obmc_motion(MpegEncContext *s,
460  uint8_t *dest, uint8_t *src,
461  int src_x, int src_y,
462  op_pixels_func *pix_op,
463  int16_t mv[5][2] /* mid top left right bottom */)
464 #define MID 0
465 {
466  int i;
467  uint8_t *ptr[5];
468 
469  av_assert2(s->quarter_sample == 0);
470 
471  for (i = 0; i < 5; i++) {
472  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
473  ptr[i] = ptr[MID];
474  } else {
475  ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
476  s->linesize * 8 * (i >> 1);
477  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
478  mv[i][0], mv[i][1]);
479  }
480  }
481 
482  put_obmc(dest, ptr, s->linesize);
483 }
484 
485 static inline void qpel_motion(MpegEncContext *s,
486  uint8_t *dest_y,
487  uint8_t *dest_cb,
488  uint8_t *dest_cr,
489  int field_based, int bottom_field,
490  int field_select, uint8_t **ref_picture,
491  op_pixels_func (*pix_op)[4],
492  qpel_mc_func (*qpix_op)[16],
493  int motion_x, int motion_y, int h)
494 {
495  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
496  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
497  ptrdiff_t linesize, uvlinesize;
498 
499  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
500 
501  src_x = s->mb_x * 16 + (motion_x >> 2);
502  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
503 
504  v_edge_pos = s->v_edge_pos >> field_based;
505  linesize = s->linesize << field_based;
506  uvlinesize = s->uvlinesize << field_based;
507 
508  if (field_based) {
509  mx = motion_x / 2;
510  my = motion_y >> 1;
511  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
512  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
513  mx = (motion_x >> 1) + rtab[motion_x & 7];
514  my = (motion_y >> 1) + rtab[motion_y & 7];
515  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
516  mx = (motion_x >> 1) | (motion_x & 1);
517  my = (motion_y >> 1) | (motion_y & 1);
518  } else {
519  mx = motion_x / 2;
520  my = motion_y / 2;
521  }
522  mx = (mx >> 1) | (mx & 1);
523  my = (my >> 1) | (my & 1);
524 
525  uvdxy = (mx & 1) | ((my & 1) << 1);
526  mx >>= 1;
527  my >>= 1;
528 
529  uvsrc_x = s->mb_x * 8 + mx;
530  uvsrc_y = s->mb_y * (8 >> field_based) + my;
531 
532  ptr_y = ref_picture[0] + src_y * linesize + src_x;
533  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
534  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
535 
536  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) ||
537  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) {
538  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
539  s->linesize, s->linesize,
540  17, 17 + field_based,
541  src_x, src_y * (1 << field_based),
542  s->h_edge_pos, s->v_edge_pos);
543  ptr_y = s->sc.edge_emu_buffer;
544  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
545  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
546  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
547  if (s->workaround_bugs & FF_BUG_IEDGE)
548  vbuf -= s->uvlinesize;
549  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
550  s->uvlinesize, s->uvlinesize,
551  9, 9 + field_based,
552  uvsrc_x, uvsrc_y * (1 << field_based),
553  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
554  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
555  s->uvlinesize, s->uvlinesize,
556  9, 9 + field_based,
557  uvsrc_x, uvsrc_y * (1 << field_based),
558  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
559  ptr_cb = ubuf;
560  ptr_cr = vbuf;
561  }
562  }
563 
564  if (!field_based)
565  qpix_op[0][dxy](dest_y, ptr_y, linesize);
566  else {
567  if (bottom_field) {
568  dest_y += s->linesize;
569  dest_cb += s->uvlinesize;
570  dest_cr += s->uvlinesize;
571  }
572 
573  if (field_select) {
574  ptr_y += s->linesize;
575  ptr_cb += s->uvlinesize;
576  ptr_cr += s->uvlinesize;
577  }
578  // damn interlaced mode
579  // FIXME boundary mirroring is not exactly correct here
580  qpix_op[1][dxy](dest_y, ptr_y, linesize);
581  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
582  }
583  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
584  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
585  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
586  }
587 }
588 
589 /**
590  * H.263 chroma 4mv motion compensation.
591  */
593  uint8_t *dest_cb, uint8_t *dest_cr,
594  uint8_t **ref_picture,
595  op_pixels_func *pix_op,
596  int mx, int my)
597 {
598  uint8_t *ptr;
599  int src_x, src_y, dxy, emu = 0;
600  ptrdiff_t offset;
601 
602  /* In case of 8X8, we construct a single chroma motion vector
603  * with a special rounding */
604  mx = ff_h263_round_chroma(mx);
605  my = ff_h263_round_chroma(my);
606 
607  dxy = ((my & 1) << 1) | (mx & 1);
608  mx >>= 1;
609  my >>= 1;
610 
611  src_x = s->mb_x * 8 + mx;
612  src_y = s->mb_y * 8 + my;
613  src_x = av_clip(src_x, -8, (s->width >> 1));
614  if (src_x == (s->width >> 1))
615  dxy &= ~1;
616  src_y = av_clip(src_y, -8, (s->height >> 1));
617  if (src_y == (s->height >> 1))
618  dxy &= ~2;
619 
620  offset = src_y * s->uvlinesize + src_x;
621  ptr = ref_picture[1] + offset;
622  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
623  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
624  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
625  s->uvlinesize, s->uvlinesize,
626  9, 9, src_x, src_y,
627  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
628  ptr = s->sc.edge_emu_buffer;
629  emu = 1;
630  }
631  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
632 
633  ptr = ref_picture[2] + offset;
634  if (emu) {
635  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
636  s->uvlinesize, s->uvlinesize,
637  9, 9, src_x, src_y,
638  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
639  ptr = s->sc.edge_emu_buffer;
640  }
641  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
642 }
643 
644 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
645 {
646  /* fetch pixels for estimated mv 4 macroblocks ahead
647  * optimized for 64byte cache lines */
648  const int shift = s->quarter_sample ? 2 : 1;
649  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
650  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
651  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
652 
653  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
654  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
655  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
656 }
657 
658 static inline void apply_obmc(MpegEncContext *s,
659  uint8_t *dest_y,
660  uint8_t *dest_cb,
661  uint8_t *dest_cr,
662  uint8_t **ref_picture,
663  op_pixels_func (*pix_op)[4])
664 {
665  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
666  Picture *cur_frame = &s->current_picture;
667  int mb_x = s->mb_x;
668  int mb_y = s->mb_y;
669  const int xy = mb_x + mb_y * s->mb_stride;
670  const int mot_stride = s->b8_stride;
671  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
672  int mx, my, i;
673 
674  av_assert2(!s->mb_skipped);
675 
676  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
677  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
678 
679  AV_COPY32(mv_cache[2][1],
680  cur_frame->motion_val[0][mot_xy + mot_stride]);
681  AV_COPY32(mv_cache[2][2],
682  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
683 
684  AV_COPY32(mv_cache[3][1],
685  cur_frame->motion_val[0][mot_xy + mot_stride]);
686  AV_COPY32(mv_cache[3][2],
687  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
688 
689  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
690  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
691  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
692  } else {
693  AV_COPY32(mv_cache[0][1],
694  cur_frame->motion_val[0][mot_xy - mot_stride]);
695  AV_COPY32(mv_cache[0][2],
696  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
697  }
698 
699  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
700  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
701  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
702  } else {
703  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
704  AV_COPY32(mv_cache[2][0],
705  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
706  }
707 
708  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
709  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
710  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
711  } else {
712  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
713  AV_COPY32(mv_cache[2][3],
714  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
715  }
716 
717  mx = 0;
718  my = 0;
719  for (i = 0; i < 4; i++) {
720  const int x = (i & 1) + 1;
721  const int y = (i >> 1) + 1;
722  int16_t mv[5][2] = {
723  { mv_cache[y][x][0], mv_cache[y][x][1] },
724  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
725  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
726  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
727  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
728  };
729  // FIXME cleanup
730  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
731  ref_picture[0],
732  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
733  pix_op[1],
734  mv);
735 
736  mx += mv[0][0];
737  my += mv[0][1];
738  }
739  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
740  chroma_4mv_motion(s, dest_cb, dest_cr,
741  ref_picture, pix_op[1],
742  mx, my);
743 }
744 
745 static inline void apply_8x8(MpegEncContext *s,
746  uint8_t *dest_y,
747  uint8_t *dest_cb,
748  uint8_t *dest_cr,
749  int dir,
750  uint8_t **ref_picture,
751  qpel_mc_func (*qpix_op)[16],
752  op_pixels_func (*pix_op)[4])
753 {
754  int dxy, mx, my, src_x, src_y;
755  int i;
756  int mb_x = s->mb_x;
757  int mb_y = s->mb_y;
758  uint8_t *ptr, *dest;
759 
760  mx = 0;
761  my = 0;
762  if (s->quarter_sample) {
763  for (i = 0; i < 4; i++) {
764  int motion_x = s->mv[dir][i][0];
765  int motion_y = s->mv[dir][i][1];
766 
767  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
768  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
769  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
770 
771  /* WARNING: do no forget half pels */
772  src_x = av_clip(src_x, -16, s->width);
773  if (src_x == s->width)
774  dxy &= ~3;
775  src_y = av_clip(src_y, -16, s->height);
776  if (src_y == s->height)
777  dxy &= ~12;
778 
779  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
780  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
781  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
782  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
783  s->linesize, s->linesize,
784  9, 9,
785  src_x, src_y,
786  s->h_edge_pos,
787  s->v_edge_pos);
788  ptr = s->sc.edge_emu_buffer;
789  }
790  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
791  qpix_op[1][dxy](dest, ptr, s->linesize);
792 
793  mx += s->mv[dir][i][0] / 2;
794  my += s->mv[dir][i][1] / 2;
795  }
796  } else {
797  for (i = 0; i < 4; i++) {
798  hpel_motion(s,
799  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
800  ref_picture[0],
801  mb_x * 16 + (i & 1) * 8,
802  mb_y * 16 + (i >> 1) * 8,
803  pix_op[1],
804  s->mv[dir][i][0],
805  s->mv[dir][i][1]);
806 
807  mx += s->mv[dir][i][0];
808  my += s->mv[dir][i][1];
809  }
810  }
811 
812  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
813  chroma_4mv_motion(s, dest_cb, dest_cr,
814  ref_picture, pix_op[1], mx, my);
815 }
816 
817 /**
818  * motion compensation of a single macroblock
819  * @param s context
820  * @param dest_y luma destination pointer
821  * @param dest_cb chroma cb/u destination pointer
822  * @param dest_cr chroma cr/v destination pointer
823  * @param dir direction (0->forward, 1->backward)
824  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
825  * @param pix_op halfpel motion compensation function (average or put normally)
826  * @param qpix_op qpel motion compensation function (average or put normally)
827  * the motion vectors are taken from s->mv and the MV type from s->mv_type
828  */
830  uint8_t *dest_y,
831  uint8_t *dest_cb,
832  uint8_t *dest_cr,
833  int dir,
834  uint8_t **ref_picture,
835  op_pixels_func (*pix_op)[4],
836  qpel_mc_func (*qpix_op)[16],
837  int is_mpeg12)
838 {
839  int i;
840  int mb_y = s->mb_y;
841 
842  prefetch_motion(s, ref_picture, dir);
843 
844  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
845  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
846  return;
847  }
848 
849  switch (s->mv_type) {
850  case MV_TYPE_16X16:
851  if (!is_mpeg12 && s->mcsel) {
852  if (s->real_sprite_warping_points == 1) {
853  gmc1_motion(s, dest_y, dest_cb, dest_cr,
854  ref_picture);
855  } else {
856  gmc_motion(s, dest_y, dest_cb, dest_cr,
857  ref_picture);
858  }
859  } else if (!is_mpeg12 && s->quarter_sample) {
860  qpel_motion(s, dest_y, dest_cb, dest_cr,
861  0, 0, 0,
862  ref_picture, pix_op, qpix_op,
863  s->mv[dir][0][0], s->mv[dir][0][1], 16);
864  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
865  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
866  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
867  ref_picture, pix_op,
868  s->mv[dir][0][0], s->mv[dir][0][1], 16);
869  } else {
870  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
871  ref_picture, pix_op,
872  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y);
873  }
874  break;
875  case MV_TYPE_8X8:
876  if (!is_mpeg12)
877  apply_8x8(s, dest_y, dest_cb, dest_cr,
878  dir, ref_picture, qpix_op, pix_op);
879  break;
880  case MV_TYPE_FIELD:
881  if (s->picture_structure == PICT_FRAME) {
882  if (!is_mpeg12 && s->quarter_sample) {
883  for (i = 0; i < 2; i++)
884  qpel_motion(s, dest_y, dest_cb, dest_cr,
885  1, i, s->field_select[dir][i],
886  ref_picture, pix_op, qpix_op,
887  s->mv[dir][i][0], s->mv[dir][i][1], 8);
888  } else {
889  /* top field */
890  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
891  0, s->field_select[dir][0],
892  ref_picture, pix_op,
893  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
894  /* bottom field */
895  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
896  1, s->field_select[dir][1],
897  ref_picture, pix_op,
898  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
899  }
900  } else {
901  if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
902  || !ref_picture[0]) {
903  ref_picture = s->current_picture_ptr->f->data;
904  }
905 
906  mpeg_motion(s, dest_y, dest_cb, dest_cr,
907  s->field_select[dir][0],
908  ref_picture, pix_op,
909  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y >> 1);
910  }
911  break;
912  case MV_TYPE_16X8:
913  if (CONFIG_SMALL || is_mpeg12) {
914  for (i = 0; i < 2; i++) {
915  uint8_t **ref2picture;
916 
917  if ((s->picture_structure == s->field_select[dir][i] + 1 ||
918  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) &&
919  ref_picture[0]) {
920  ref2picture = ref_picture;
921  } else {
922  ref2picture = s->current_picture_ptr->f->data;
923  }
924 
925  mpeg_motion(s, dest_y, dest_cb, dest_cr,
926  s->field_select[dir][i],
927  ref2picture, pix_op,
928  s->mv[dir][i][0], s->mv[dir][i][1],
929  8, 1, (mb_y & ~1) + i);
930 
931  dest_y += 16 * s->linesize;
932  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
933  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
934  }
935  break;
936  }
937  case MV_TYPE_DMV:
938  if (CONFIG_SMALL || is_mpeg12) {
939  if (s->picture_structure == PICT_FRAME) {
940  for (i = 0; i < 2; i++) {
941  for (int j = 0; j < 2; j++)
942  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
943  j, j ^ i, ref_picture, pix_op,
944  s->mv[dir][2 * i + j][0],
945  s->mv[dir][2 * i + j][1], 8, mb_y);
946  pix_op = s->hdsp.avg_pixels_tab;
947  }
948  } else {
949  if (!ref_picture[0]) {
950  ref_picture = s->current_picture_ptr->f->data;
951  }
952  for (i = 0; i < 2; i++) {
953  mpeg_motion(s, dest_y, dest_cb, dest_cr,
954  s->picture_structure != i + 1,
955  ref_picture, pix_op,
956  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
957  16, 0, mb_y >> 1);
958 
959  // after put we make avg of the same block
960  pix_op = s->hdsp.avg_pixels_tab;
961 
962  /* opposite parity is always in the same frame if this is
963  * second field */
964  if (!s->first_field)
965  ref_picture = s->current_picture_ptr->f->data;
966  }
967  }
968  break;
969  }
970  default: av_assert2(0);
971  }
972 }
973 
975  uint8_t *dest_y, uint8_t *dest_cb,
976  uint8_t *dest_cr, int dir,
977  uint8_t **ref_picture,
978  op_pixels_func (*pix_op)[4],
979  qpel_mc_func (*qpix_op)[16])
980 {
981 #if !CONFIG_SMALL
982  if (s->out_format == FMT_MPEG1)
983  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
984  ref_picture, pix_op, qpix_op, 1);
985  else
986 #endif
987  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
988  ref_picture, pix_op, qpix_op, 0);
989 }
#define av_always_inline
Definition: attributes.h:45
uint8_t
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
Libavcodec external API header.
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:1583
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:1580
#define FF_BUG_IEDGE
Definition: avcodec.h:1587
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:1578
#define s(width, name)
Definition: cbs_vp9.c:257
#define av_clip
Definition: common.h:122
#define FFMAX(a, b)
Definition: common.h:103
#define CONFIG_H261_ENCODER
Definition: config.h:1319
#define CONFIG_WMV2_ENCODER
Definition: config.h:1370
#define CONFIG_SMALL
Definition: config.h:562
#define CONFIG_H261_DECODER
Definition: config.h:820
#define CONFIG_WMV2_DECODER
Definition: config.h:1005
#define CONFIG_GRAY
Definition: config.h:556
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:67
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:61
H.261 codec.
#define IS_INTRA(x, y)
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
int i
Definition: input.c:407
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
static const int8_t mv[256][2]
Definition: 4xm.c:78
common internal API header
int stride
Definition: mace.c:144
#define LOCAL_ALIGNED_8(t, v,...)
Definition: mem_internal.h:124
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
@ FMT_H261
Definition: mpegutils.h:125
@ FMT_MPEG1
Definition: mpegutils.h:124
@ FMT_H263
Definition: mpegutils.h:126
#define PICT_FRAME
Definition: mpegutils.h:39
mpegvideo header.
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:268
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:270
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
#define MID
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, qpel_mc_func(*qpix_op)[16], op_pixels_func(*pix_op)[4])
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4])
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_16x8, int mb_y)
static void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
#define OBMC_FILTER4(x, t, l, m, r, b)
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int16_t mv[5][2])
static void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y)
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int is_16x8, int mb_y)
#define OBMC_FILTER(x, t, l, m, r, b)
static void gmc_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func *pix_op, int mx, int my)
H.263 chroma 4mv motion compensation.
static void gmc1_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
quarterpel DSP functions
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
static int shift(int a, int b)
Definition: sonic.c:82
MpegEncContext.
Definition: mpegvideo.h:81
Picture.
Definition: mpegpicture.h:45
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
#define av_log(a,...)
#define src
Definition: vp8dsp.c:255
static const uint8_t offset[127][2]
Definition: vf_spp.c:107
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:100