FFmpeg  4.4.4
vp8dsp_init.c
Go to the documentation of this file.
1 /*
2  * VP8 DSP functions x86-optimized
3  * Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4  * Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/attributes.h"
24 #include "libavutil/cpu.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/mem_internal.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavcodec/vp8dsp.h"
29 
30 #if HAVE_X86ASM
31 
32 /*
33  * MC functions
34  */
35 void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, ptrdiff_t dststride,
36  uint8_t *src, ptrdiff_t srcstride,
37  int height, int mx, int my);
38 void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, ptrdiff_t dststride,
39  uint8_t *src, ptrdiff_t srcstride,
40  int height, int mx, int my);
41 void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, ptrdiff_t dststride,
42  uint8_t *src, ptrdiff_t srcstride,
43  int height, int mx, int my);
44 void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, ptrdiff_t dststride,
45  uint8_t *src, ptrdiff_t srcstride,
46  int height, int mx, int my);
47 
48 void ff_put_vp8_epel8_h4_sse2 (uint8_t *dst, ptrdiff_t dststride,
49  uint8_t *src, ptrdiff_t srcstride,
50  int height, int mx, int my);
51 void ff_put_vp8_epel8_h6_sse2 (uint8_t *dst, ptrdiff_t dststride,
52  uint8_t *src, ptrdiff_t srcstride,
53  int height, int mx, int my);
54 void ff_put_vp8_epel8_v4_sse2 (uint8_t *dst, ptrdiff_t dststride,
55  uint8_t *src, ptrdiff_t srcstride,
56  int height, int mx, int my);
57 void ff_put_vp8_epel8_v6_sse2 (uint8_t *dst, ptrdiff_t dststride,
58  uint8_t *src, ptrdiff_t srcstride,
59  int height, int mx, int my);
60 
61 void ff_put_vp8_epel4_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
62  uint8_t *src, ptrdiff_t srcstride,
63  int height, int mx, int my);
64 void ff_put_vp8_epel4_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
65  uint8_t *src, ptrdiff_t srcstride,
66  int height, int mx, int my);
67 void ff_put_vp8_epel4_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
68  uint8_t *src, ptrdiff_t srcstride,
69  int height, int mx, int my);
70 void ff_put_vp8_epel4_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
71  uint8_t *src, ptrdiff_t srcstride,
72  int height, int mx, int my);
73 void ff_put_vp8_epel8_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
74  uint8_t *src, ptrdiff_t srcstride,
75  int height, int mx, int my);
76 void ff_put_vp8_epel8_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
77  uint8_t *src, ptrdiff_t srcstride,
78  int height, int mx, int my);
79 void ff_put_vp8_epel8_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
80  uint8_t *src, ptrdiff_t srcstride,
81  int height, int mx, int my);
82 void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
83  uint8_t *src, ptrdiff_t srcstride,
84  int height, int mx, int my);
85 
86 void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, ptrdiff_t dststride,
87  uint8_t *src, ptrdiff_t srcstride,
88  int height, int mx, int my);
89 void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, ptrdiff_t dststride,
90  uint8_t *src, ptrdiff_t srcstride,
91  int height, int mx, int my);
92 void ff_put_vp8_bilinear4_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
93  uint8_t *src, ptrdiff_t srcstride,
94  int height, int mx, int my);
95 void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
96  uint8_t *src, ptrdiff_t srcstride,
97  int height, int mx, int my);
98 
99 void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, ptrdiff_t dststride,
100  uint8_t *src, ptrdiff_t srcstride,
101  int height, int mx, int my);
102 void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, ptrdiff_t dststride,
103  uint8_t *src, ptrdiff_t srcstride,
104  int height, int mx, int my);
105 void ff_put_vp8_bilinear4_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
106  uint8_t *src, ptrdiff_t srcstride,
107  int height, int mx, int my);
108 void ff_put_vp8_bilinear8_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
109  uint8_t *src, ptrdiff_t srcstride,
110  int height, int mx, int my);
111 
112 
113 void ff_put_vp8_pixels8_mmx (uint8_t *dst, ptrdiff_t dststride,
114  uint8_t *src, ptrdiff_t srcstride,
115  int height, int mx, int my);
116 void ff_put_vp8_pixels16_mmx(uint8_t *dst, ptrdiff_t dststride,
117  uint8_t *src, ptrdiff_t srcstride,
118  int height, int mx, int my);
119 void ff_put_vp8_pixels16_sse(uint8_t *dst, ptrdiff_t dststride,
120  uint8_t *src, ptrdiff_t srcstride,
121  int height, int mx, int my);
122 
123 #define TAP_W16(OPT, FILTERTYPE, TAPTYPE) \
124 static void ff_put_vp8_ ## FILTERTYPE ## 16_ ## TAPTYPE ## _ ## OPT( \
125  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
126  ptrdiff_t srcstride, int height, int mx, int my) \
127 { \
128  ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
129  dst, dststride, src, srcstride, height, mx, my); \
130  ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
131  dst + 8, dststride, src + 8, srcstride, height, mx, my); \
132 }
133 #define TAP_W8(OPT, FILTERTYPE, TAPTYPE) \
134 static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
135  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
136  ptrdiff_t srcstride, int height, int mx, int my) \
137 { \
138  ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
139  dst, dststride, src, srcstride, height, mx, my); \
140  ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
141  dst + 4, dststride, src + 4, srcstride, height, mx, my); \
142 }
143 
144 #if ARCH_X86_32
145 TAP_W8 (mmxext, epel, h4)
146 TAP_W8 (mmxext, epel, h6)
147 TAP_W16(mmxext, epel, h6)
148 TAP_W8 (mmxext, epel, v4)
149 TAP_W8 (mmxext, epel, v6)
150 TAP_W16(mmxext, epel, v6)
151 TAP_W8 (mmxext, bilinear, h)
152 TAP_W16(mmxext, bilinear, h)
153 TAP_W8 (mmxext, bilinear, v)
154 TAP_W16(mmxext, bilinear, v)
155 #endif
156 
157 TAP_W16(sse2, epel, h6)
158 TAP_W16(sse2, epel, v6)
159 TAP_W16(sse2, bilinear, h)
160 TAP_W16(sse2, bilinear, v)
161 
162 TAP_W16(ssse3, epel, h6)
163 TAP_W16(ssse3, epel, v6)
164 TAP_W16(ssse3, bilinear, h)
165 TAP_W16(ssse3, bilinear, v)
166 
167 #define HVTAP(OPT, ALIGN, TAPNUMX, TAPNUMY, SIZE, MAXHEIGHT) \
168 static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT( \
169  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
170  ptrdiff_t srcstride, int height, int mx, int my) \
171 { \
172  LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + TAPNUMY - 1)]); \
173  uint8_t *tmpptr = tmp + SIZE * (TAPNUMY / 2 - 1); \
174  src -= srcstride * (TAPNUMY / 2 - 1); \
175  ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## _ ## OPT( \
176  tmp, SIZE, src, srcstride, height + TAPNUMY - 1, mx, my); \
177  ff_put_vp8_epel ## SIZE ## _v ## TAPNUMY ## _ ## OPT( \
178  dst, dststride, tmpptr, SIZE, height, mx, my); \
179 }
180 
181 #if ARCH_X86_32
182 #define HVTAPMMX(x, y) \
183 HVTAP(mmxext, 8, x, y, 4, 8) \
184 HVTAP(mmxext, 8, x, y, 8, 16)
185 
186 HVTAP(mmxext, 8, 6, 6, 16, 16)
187 #else
188 #define HVTAPMMX(x, y) \
189 HVTAP(mmxext, 8, x, y, 4, 8)
190 #endif
191 
192 HVTAPMMX(4, 4)
193 HVTAPMMX(4, 6)
194 HVTAPMMX(6, 4)
195 HVTAPMMX(6, 6)
196 
197 #define HVTAPSSE2(x, y, w) \
198 HVTAP(sse2, 16, x, y, w, 16) \
199 HVTAP(ssse3, 16, x, y, w, 16)
200 
201 HVTAPSSE2(4, 4, 8)
202 HVTAPSSE2(4, 6, 8)
203 HVTAPSSE2(6, 4, 8)
204 HVTAPSSE2(6, 6, 8)
205 HVTAPSSE2(6, 6, 16)
206 
207 HVTAP(ssse3, 16, 4, 4, 4, 8)
208 HVTAP(ssse3, 16, 4, 6, 4, 8)
209 HVTAP(ssse3, 16, 6, 4, 4, 8)
210 HVTAP(ssse3, 16, 6, 6, 4, 8)
211 
212 #define HVBILIN(OPT, ALIGN, SIZE, MAXHEIGHT) \
213 static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
214  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
215  ptrdiff_t srcstride, int height, int mx, int my) \
216 { \
217  LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + 2)]); \
218  ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT( \
219  tmp, SIZE, src, srcstride, height + 1, mx, my); \
220  ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT( \
221  dst, dststride, tmp, SIZE, height, mx, my); \
222 }
223 
224 HVBILIN(mmxext, 8, 4, 8)
225 #if ARCH_X86_32
226 HVBILIN(mmxext, 8, 8, 16)
227 HVBILIN(mmxext, 8, 16, 16)
228 #endif
229 HVBILIN(sse2, 8, 8, 16)
230 HVBILIN(sse2, 8, 16, 16)
231 HVBILIN(ssse3, 8, 4, 8)
232 HVBILIN(ssse3, 8, 8, 16)
233 HVBILIN(ssse3, 8, 16, 16)
234 
235 void ff_vp8_idct_dc_add_mmx(uint8_t *dst, int16_t block[16],
236  ptrdiff_t stride);
237 void ff_vp8_idct_dc_add_sse2(uint8_t *dst, int16_t block[16],
238  ptrdiff_t stride);
239 void ff_vp8_idct_dc_add_sse4(uint8_t *dst, int16_t block[16],
240  ptrdiff_t stride);
241 void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, int16_t block[4][16],
242  ptrdiff_t stride);
243 void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, int16_t block[4][16],
244  ptrdiff_t stride);
245 void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, int16_t block[2][16],
246  ptrdiff_t stride);
247 void ff_vp8_luma_dc_wht_mmx(int16_t block[4][4][16], int16_t dc[16]);
248 void ff_vp8_luma_dc_wht_sse(int16_t block[4][4][16], int16_t dc[16]);
249 void ff_vp8_idct_add_mmx(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
250 void ff_vp8_idct_add_sse(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
251 
252 #define DECLARE_LOOP_FILTER(NAME) \
253 void ff_vp8_v_loop_filter_simple_ ## NAME(uint8_t *dst, \
254  ptrdiff_t stride, \
255  int flim); \
256 void ff_vp8_h_loop_filter_simple_ ## NAME(uint8_t *dst, \
257  ptrdiff_t stride, \
258  int flim); \
259 void ff_vp8_v_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
260  ptrdiff_t stride, \
261  int e, int i, int hvt); \
262 void ff_vp8_h_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
263  ptrdiff_t stride, \
264  int e, int i, int hvt); \
265 void ff_vp8_v_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
266  uint8_t *dstV, \
267  ptrdiff_t s, \
268  int e, int i, int hvt); \
269 void ff_vp8_h_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
270  uint8_t *dstV, \
271  ptrdiff_t s, \
272  int e, int i, int hvt); \
273 void ff_vp8_v_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
274  ptrdiff_t stride, \
275  int e, int i, int hvt); \
276 void ff_vp8_h_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
277  ptrdiff_t stride, \
278  int e, int i, int hvt); \
279 void ff_vp8_v_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
280  uint8_t *dstV, \
281  ptrdiff_t s, \
282  int e, int i, int hvt); \
283 void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
284  uint8_t *dstV, \
285  ptrdiff_t s, \
286  int e, int i, int hvt);
287 
288 DECLARE_LOOP_FILTER(mmx)
289 DECLARE_LOOP_FILTER(mmxext)
290 DECLARE_LOOP_FILTER(sse2)
291 DECLARE_LOOP_FILTER(ssse3)
292 DECLARE_LOOP_FILTER(sse4)
293 
294 #endif /* HAVE_X86ASM */
295 
296 #define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \
297  c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \
298  c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \
299  c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT
300 
301 #define VP8_MC_FUNC(IDX, SIZE, OPT) \
302  c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \
303  c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \
304  c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \
305  c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \
306  c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \
307  VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
308 
309 #define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \
310  c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
311  c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
312  c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
313  c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
314  c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
315  c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
316  c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
317  c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT
318 
319 
321 {
322 #if HAVE_X86ASM
323  int cpu_flags = av_get_cpu_flags();
324 
325  if (EXTERNAL_MMX(cpu_flags)) {
326 #if ARCH_X86_32
327  c->put_vp8_epel_pixels_tab[0][0][0] =
328  c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
329 #endif
330  c->put_vp8_epel_pixels_tab[1][0][0] =
331  c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
332  }
333 
334  /* note that 4-tap width=16 functions are missing because w=16
335  * is only used for luma, and luma is always a copy or sixtap. */
336  if (EXTERNAL_MMXEXT(cpu_flags)) {
337  VP8_MC_FUNC(2, 4, mmxext);
338  VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
339 #if ARCH_X86_32
340  VP8_LUMA_MC_FUNC(0, 16, mmxext);
341  VP8_MC_FUNC(1, 8, mmxext);
342  VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
343  VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
344 #endif
345  }
346 
347  if (EXTERNAL_SSE(cpu_flags)) {
348  c->put_vp8_epel_pixels_tab[0][0][0] =
349  c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
350  }
351 
353  VP8_LUMA_MC_FUNC(0, 16, sse2);
354  VP8_MC_FUNC(1, 8, sse2);
355  VP8_BILINEAR_MC_FUNC(0, 16, sse2);
356  VP8_BILINEAR_MC_FUNC(1, 8, sse2);
357  }
358 
359  if (EXTERNAL_SSSE3(cpu_flags)) {
360  VP8_LUMA_MC_FUNC(0, 16, ssse3);
361  VP8_MC_FUNC(1, 8, ssse3);
362  VP8_MC_FUNC(2, 4, ssse3);
363  VP8_BILINEAR_MC_FUNC(0, 16, ssse3);
364  VP8_BILINEAR_MC_FUNC(1, 8, ssse3);
365  VP8_BILINEAR_MC_FUNC(2, 4, ssse3);
366  }
367 #endif /* HAVE_X86ASM */
368 }
369 
371 {
372 #if HAVE_X86ASM
373  int cpu_flags = av_get_cpu_flags();
374 
375  if (EXTERNAL_MMX(cpu_flags)) {
376  c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
377 #if ARCH_X86_32
378  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
379  c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
380  c->vp8_idct_add = ff_vp8_idct_add_mmx;
381  c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
382 
383  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
384  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
385 
386  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx;
387  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx;
388  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx;
389  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx;
390 
391  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx;
392  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
393  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
394  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
395 #endif
396  }
397 
398  /* note that 4-tap width=16 functions are missing because w=16
399  * is only used for luma, and luma is always a copy or sixtap. */
400  if (EXTERNAL_MMXEXT(cpu_flags)) {
401 #if ARCH_X86_32
402  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
403  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
404 
405  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext;
406  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext;
407  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext;
408  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext;
409 
410  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext;
411  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
412  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
413  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
414 #endif
415  }
416 
417  if (EXTERNAL_SSE(cpu_flags)) {
418  c->vp8_idct_add = ff_vp8_idct_add_sse;
419  c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
420  }
421 
423  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
424 
425  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
426  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2;
427 
428  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2;
429  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
430  }
431 
432  if (EXTERNAL_SSE2(cpu_flags)) {
433  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse2;
434  c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
435 
436  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
437 
438  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2;
439  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
440 
441  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2;
442  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
443  }
444 
445  if (EXTERNAL_SSSE3(cpu_flags)) {
446  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3;
447  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3;
448 
449  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3;
450  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3;
451  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3;
452  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3;
453 
454  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3;
455  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3;
456  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3;
457  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
458  }
459 
460  if (EXTERNAL_SSE4(cpu_flags)) {
461  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
462 
463  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;
464  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4;
465  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4;
466  }
467 #endif /* HAVE_X86ASM */
468 }
Macro definitions for various function/variable attributes.
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
uint8_t
static atomic_int cpu_flags
Definition: cpu.c:50
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:95
int stride
Definition: mace.c:144
Memory handling functions.
#define src
Definition: vp8dsp.c:255
static int16_t block[64]
Definition: dct.c:116
#define height
VP8 compatible video decoder.
#define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:296
av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c)
Definition: vp8dsp_init.c:370
#define VP8_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:301
av_cold void ff_vp78dsp_init_x86(VP8DSPContext *c)
Definition: vp8dsp_init.c:320
#define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:309
static double c[64]
#define EXTERNAL_SSE(flags)
Definition: cpu.h:58
#define EXTERNAL_MMXEXT(flags)
Definition: cpu.h:57
#define EXTERNAL_SSSE3(flags)
Definition: cpu.h:65
#define EXTERNAL_MMX(flags)
Definition: cpu.h:56
#define EXTERNAL_SSE2_SLOW(flags)
Definition: cpu.h:61
#define EXTERNAL_SSE4(flags)
Definition: cpu.h:68
#define EXTERNAL_SSE2(flags)
Definition: cpu.h:59