FFmpeg  4.4.7
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 #include "avcodec.h"
38 #include "get_bits.h"
39 #include "bytestream.h"
40 #include "adpcm.h"
41 #include "adpcm_data.h"
42 #include "internal.h"
43 
44 /**
45  * @file
46  * ADPCM decoders
47  * Features and limitations:
48  *
49  * Reference documents:
50  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
51  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
52  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
53  * http://openquicktime.sourceforge.net/
54  * XAnim sources (xa_codec.c) http://xanim.polter.net/
55  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
56  * SoX source code http://sox.sourceforge.net/
57  *
58  * CD-ROM XA:
59  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
60  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
61  * readstr http://www.geocities.co.jp/Playtown/2004/
62  */
63 
64 /* These are for CD-ROM XA ADPCM */
65 static const int8_t xa_adpcm_table[5][2] = {
66  { 0, 0 },
67  { 60, 0 },
68  { 115, -52 },
69  { 98, -55 },
70  { 122, -60 }
71 };
72 
73 static const int16_t ea_adpcm_table[] = {
74  0, 240, 460, 392,
75  0, 0, -208, -220,
76  0, 1, 3, 4,
77  7, 8, 10, 11,
78  0, -1, -3, -4
79 };
80 
81 // padded to zero where table size is less then 16
82 static const int8_t swf_index_tables[4][16] = {
83  /*2*/ { -1, 2 },
84  /*3*/ { -1, -1, 2, 4 },
85  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
86  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
87 };
88 
89 static const int8_t zork_index_table[8] = {
90  -1, -1, -1, 1, 4, 7, 10, 12,
91 };
92 
93 static const int8_t mtf_index_table[16] = {
94  8, 6, 4, 2, -1, -1, -1, -1,
95  -1, -1, -1, -1, 2, 4, 6, 8,
96 };
97 
98 /* end of tables */
99 
100 typedef struct ADPCMDecodeContext {
102  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
103  int has_status; /**< Status flag. Reset to 0 after a flush. */
105 
107 {
108  ADPCMDecodeContext *c = avctx->priv_data;
109  unsigned int min_channels = 1;
110  unsigned int max_channels = 2;
111 
112  switch(avctx->codec->id) {
114  max_channels = 1;
115  break;
118  min_channels = 2;
119  break;
126  max_channels = 6;
127  break;
129  min_channels = 2;
130  max_channels = 8;
131  if (avctx->channels & 1) {
132  avpriv_request_sample(avctx, "channel count %d", avctx->channels);
133  return AVERROR_PATCHWELCOME;
134  }
135  break;
137  max_channels = 8;
138  if (avctx->channels <= 0 || avctx->block_align % (16 * avctx->channels))
139  return AVERROR_INVALIDDATA;
140  break;
144  max_channels = 14;
145  break;
146  }
147  if (avctx->channels < min_channels || avctx->channels > max_channels) {
148  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
149  return AVERROR(EINVAL);
150  }
151 
152  switch(avctx->codec->id) {
154  c->status[0].step = c->status[1].step = 511;
155  break;
157  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
158  return AVERROR_INVALIDDATA;
159  break;
161  if (avctx->extradata && avctx->extradata_size >= 8) {
162  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
163  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
164  }
165  break;
167  if (avctx->extradata) {
168  if (avctx->extradata_size >= 28) {
169  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
170  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
171  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
172  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
173  } else if (avctx->extradata_size >= 16) {
174  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18);
175  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88);
176  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18);
177  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88);
178  }
179  }
180  break;
182  if (avctx->extradata && avctx->extradata_size >= 2)
183  c->vqa_version = AV_RL16(avctx->extradata);
184  break;
186  if (avctx->bits_per_coded_sample != 4 || avctx->block_align != 17 * avctx->channels)
187  return AVERROR_INVALIDDATA;
188  break;
190  if (avctx->bits_per_coded_sample != 8)
191  return AVERROR_INVALIDDATA;
192  break;
193  default:
194  break;
195  }
196 
197  switch (avctx->codec->id) {
218  break;
220  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
222  break;
224  avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
226  break;
227  default:
228  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
229  }
230 
231  return 0;
232 }
233 
234 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
235 {
236  int delta, pred, step, add;
237 
238  pred = c->predictor;
239  delta = nibble & 7;
240  step = c->step;
241  add = (delta * 2 + 1) * step;
242  if (add < 0)
243  add = add + 7;
244 
245  if ((nibble & 8) == 0)
246  pred = av_clip(pred + (add >> 3), -32767, 32767);
247  else
248  pred = av_clip(pred - (add >> 3), -32767, 32767);
249 
250  switch (delta) {
251  case 7:
252  step *= 0x99;
253  break;
254  case 6:
255  c->step = av_clip(c->step * 2, 127, 24576);
256  c->predictor = pred;
257  return pred;
258  case 5:
259  step *= 0x66;
260  break;
261  case 4:
262  step *= 0x4d;
263  break;
264  default:
265  step *= 0x39;
266  break;
267  }
268 
269  if (step < 0)
270  step += 0x3f;
271 
272  c->step = step >> 6;
273  c->step = av_clip(c->step, 127, 24576);
274  c->predictor = pred;
275  return pred;
276 }
277 
278 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
279 {
280  int step_index;
281  int predictor;
282  int sign, delta, diff, step;
283 
284  step = ff_adpcm_step_table[c->step_index];
285  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
286  step_index = av_clip(step_index, 0, 88);
287 
288  sign = nibble & 8;
289  delta = nibble & 7;
290  /* perform direct multiplication instead of series of jumps proposed by
291  * the reference ADPCM implementation since modern CPUs can do the mults
292  * quickly enough */
293  diff = ((2 * delta + 1) * step) >> shift;
294  predictor = c->predictor;
295  if (sign) predictor -= diff;
296  else predictor += diff;
297 
298  c->predictor = av_clip_int16(predictor);
299  c->step_index = step_index;
300 
301  return (int16_t)c->predictor;
302 }
303 
304 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
305 {
306  int step_index;
307  int predictor;
308  int sign, delta, diff, step;
309 
310  step = ff_adpcm_step_table[c->step_index];
311  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
312  step_index = av_clip(step_index, 0, 88);
313 
314  sign = nibble & 8;
315  delta = nibble & 7;
316  diff = (delta * step) >> shift;
317  predictor = c->predictor;
318  if (sign) predictor -= diff;
319  else predictor += diff;
320 
321  c->predictor = av_clip_int16(predictor);
322  c->step_index = step_index;
323 
324  return (int16_t)c->predictor;
325 }
326 
327 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
328 {
329  int step_index, step, delta, predictor;
330 
331  step = ff_adpcm_step_table[c->step_index];
332 
333  delta = step * (2 * nibble - 15);
334  predictor = c->predictor + delta;
335 
336  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
337  c->predictor = av_clip_int16(predictor >> 4);
338  c->step_index = av_clip(step_index, 0, 88);
339 
340  return (int16_t)c->predictor;
341 }
342 
343 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
344 {
345  int step_index;
346  int predictor;
347  int step;
348 
349  nibble = sign_extend(nibble & 0xF, 4);
350 
351  step = ff_adpcm_ima_cunning_step_table[c->step_index];
352  step_index = c->step_index + ff_adpcm_ima_cunning_index_table[abs(nibble)];
353  step_index = av_clip(step_index, 0, 60);
354 
355  predictor = c->predictor + step * nibble;
356 
357  c->predictor = av_clip_int16(predictor);
358  c->step_index = step_index;
359 
360  return c->predictor;
361 }
362 
364 {
365  int nibble, step_index, predictor, sign, delta, diff, step, shift;
366 
367  shift = bps - 1;
368  nibble = get_bits_le(gb, bps),
369  step = ff_adpcm_step_table[c->step_index];
370  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
371  step_index = av_clip(step_index, 0, 88);
372 
373  sign = nibble & (1 << shift);
374  delta = av_mod_uintp2(nibble, shift);
375  diff = ((2 * delta + 1) * step) >> shift;
376  predictor = c->predictor;
377  if (sign) predictor -= diff;
378  else predictor += diff;
379 
380  c->predictor = av_clip_int16(predictor);
381  c->step_index = step_index;
382 
383  return (int16_t)c->predictor;
384 }
385 
386 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
387 {
388  int step_index;
389  int predictor;
390  int diff, step;
391 
392  step = ff_adpcm_step_table[c->step_index];
393  step_index = c->step_index + ff_adpcm_index_table[nibble];
394  step_index = av_clip(step_index, 0, 88);
395 
396  diff = step >> 3;
397  if (nibble & 4) diff += step;
398  if (nibble & 2) diff += step >> 1;
399  if (nibble & 1) diff += step >> 2;
400 
401  if (nibble & 8)
402  predictor = c->predictor - diff;
403  else
404  predictor = c->predictor + diff;
405 
406  c->predictor = av_clip_int16(predictor);
407  c->step_index = step_index;
408 
409  return c->predictor;
410 }
411 
412 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
413 {
414  int predictor;
415 
416  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
417  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
418 
419  c->sample2 = c->sample1;
420  c->sample1 = av_clip_int16(predictor);
421  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
422  if (c->idelta < 16) c->idelta = 16;
423  if (c->idelta > INT_MAX/768) {
424  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
425  c->idelta = INT_MAX/768;
426  }
427 
428  return c->sample1;
429 }
430 
431 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
432 {
433  int step_index, predictor, sign, delta, diff, step;
434 
435  step = ff_adpcm_oki_step_table[c->step_index];
436  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
437  step_index = av_clip(step_index, 0, 48);
438 
439  sign = nibble & 8;
440  delta = nibble & 7;
441  diff = ((2 * delta + 1) * step) >> 3;
442  predictor = c->predictor;
443  if (sign) predictor -= diff;
444  else predictor += diff;
445 
446  c->predictor = av_clip_intp2(predictor, 11);
447  c->step_index = step_index;
448 
449  return c->predictor * 16;
450 }
451 
452 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
453 {
454  int sign, delta, diff;
455  int new_step;
456 
457  sign = nibble & 8;
458  delta = nibble & 7;
459  /* perform direct multiplication instead of series of jumps proposed by
460  * the reference ADPCM implementation since modern CPUs can do the mults
461  * quickly enough */
462  diff = ((2 * delta + 1) * c->step) >> 3;
463  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
464  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
465  c->predictor = av_clip_int16(c->predictor);
466  /* calculate new step and clamp it to range 511..32767 */
467  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
468  c->step = av_clip(new_step, 511, 32767);
469 
470  return (int16_t)c->predictor;
471 }
472 
473 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
474 {
475  int sign, delta, diff;
476 
477  sign = nibble & (1<<(size-1));
478  delta = nibble & ((1<<(size-1))-1);
479  diff = delta << (7 + c->step + shift);
480 
481  /* clamp result */
482  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
483 
484  /* calculate new step */
485  if (delta >= (2*size - 3) && c->step < 3)
486  c->step++;
487  else if (delta == 0 && c->step > 0)
488  c->step--;
489 
490  return (int16_t) c->predictor;
491 }
492 
494 {
495  if(!c->step) {
496  c->predictor = 0;
497  c->step = 127;
498  }
499 
500  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
501  c->predictor = av_clip_int16(c->predictor);
502  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
503  c->step = av_clip(c->step, 127, 24576);
504  return c->predictor;
505 }
506 
507 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
508 {
509  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
510  c->predictor = av_clip_int16(c->predictor);
511  c->step += ff_adpcm_index_table[nibble];
512  c->step = av_clip_uintp2(c->step, 5);
513  return c->predictor;
514 }
515 
516 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
517 {
518  int16_t index = c->step_index;
519  uint32_t lookup_sample = ff_adpcm_step_table[index];
520  int32_t sample = 0;
521 
522  if (nibble & 0x40)
523  sample += lookup_sample;
524  if (nibble & 0x20)
525  sample += lookup_sample >> 1;
526  if (nibble & 0x10)
527  sample += lookup_sample >> 2;
528  if (nibble & 0x08)
529  sample += lookup_sample >> 3;
530  if (nibble & 0x04)
531  sample += lookup_sample >> 4;
532  if (nibble & 0x02)
533  sample += lookup_sample >> 5;
534  if (nibble & 0x01)
535  sample += lookup_sample >> 6;
536  if (nibble & 0x80)
537  sample = -sample;
538 
539  sample += c->predictor;
541 
542  index += zork_index_table[(nibble >> 4) & 7];
543  index = av_clip(index, 0, 88);
544 
545  c->predictor = sample;
546  c->step_index = index;
547 
548  return sample;
549 }
550 
551 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
552  const uint8_t *in, ADPCMChannelStatus *left,
553  ADPCMChannelStatus *right, int channels, int sample_offset)
554 {
555  int i, j;
556  int shift,filter,f0,f1;
557  int s_1,s_2;
558  int d,s,t;
559 
560  out0 += sample_offset;
561  if (channels == 1)
562  out1 = out0 + 28;
563  else
564  out1 += sample_offset;
565 
566  for(i=0;i<4;i++) {
567  shift = 12 - (in[4+i*2] & 15);
568  filter = in[4+i*2] >> 4;
570  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
571  filter=0;
572  }
573  if (shift < 0) {
574  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
575  shift = 0;
576  }
577  f0 = xa_adpcm_table[filter][0];
578  f1 = xa_adpcm_table[filter][1];
579 
580  s_1 = left->sample1;
581  s_2 = left->sample2;
582 
583  for(j=0;j<28;j++) {
584  d = in[16+i+j*4];
585 
586  t = sign_extend(d, 4);
587  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
588  s_2 = s_1;
589  s_1 = av_clip_int16(s);
590  out0[j] = s_1;
591  }
592 
593  if (channels == 2) {
594  left->sample1 = s_1;
595  left->sample2 = s_2;
596  s_1 = right->sample1;
597  s_2 = right->sample2;
598  }
599 
600  shift = 12 - (in[5+i*2] & 15);
601  filter = in[5+i*2] >> 4;
602  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
603  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
604  filter=0;
605  }
606  if (shift < 0) {
607  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
608  shift = 0;
609  }
610 
611  f0 = xa_adpcm_table[filter][0];
612  f1 = xa_adpcm_table[filter][1];
613 
614  for(j=0;j<28;j++) {
615  d = in[16+i+j*4];
616 
617  t = sign_extend(d >> 4, 4);
618  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
619  s_2 = s_1;
620  s_1 = av_clip_int16(s);
621  out1[j] = s_1;
622  }
623 
624  if (channels == 2) {
625  right->sample1 = s_1;
626  right->sample2 = s_2;
627  } else {
628  left->sample1 = s_1;
629  left->sample2 = s_2;
630  }
631 
632  out0 += 28 * (3 - channels);
633  out1 += 28 * (3 - channels);
634  }
635 
636  return 0;
637 }
638 
639 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
640 {
641  ADPCMDecodeContext *c = avctx->priv_data;
642  GetBitContext gb;
643  const int8_t *table;
644  int k0, signmask, nb_bits, count;
645  int size = buf_size*8;
646  int i;
647 
648  init_get_bits(&gb, buf, size);
649 
650  //read bits & initial values
651  nb_bits = get_bits(&gb, 2)+2;
652  table = swf_index_tables[nb_bits-2];
653  k0 = 1 << (nb_bits-2);
654  signmask = 1 << (nb_bits-1);
655 
656  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
657  for (i = 0; i < avctx->channels; i++) {
658  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
659  c->status[i].step_index = get_bits(&gb, 6);
660  }
661 
662  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
663  int i;
664 
665  for (i = 0; i < avctx->channels; i++) {
666  // similar to IMA adpcm
667  int delta = get_bits(&gb, nb_bits);
668  int step = ff_adpcm_step_table[c->status[i].step_index];
669  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
670  int k = k0;
671 
672  do {
673  if (delta & k)
674  vpdiff += step;
675  step >>= 1;
676  k >>= 1;
677  } while(k);
678  vpdiff += step;
679 
680  if (delta & signmask)
681  c->status[i].predictor -= vpdiff;
682  else
683  c->status[i].predictor += vpdiff;
684 
685  c->status[i].step_index += table[delta & (~signmask)];
686 
687  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
688  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
689 
690  *samples++ = c->status[i].predictor;
691  }
692  }
693  }
694 }
695 
696 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
697 {
698  int sample = sign_extend(nibble, 4) * (1 << shift);
699 
700  if (flag)
701  sample += (8 * cs->sample1) - (4 * cs->sample2);
702  else
703  sample += 4 * cs->sample1;
704 
705  sample = av_clip_int16(sample >> 2);
706 
707  cs->sample2 = cs->sample1;
708  cs->sample1 = sample;
709 
710  return sample;
711 }
712 
713 /**
714  * Get the number of samples (per channel) that will be decoded from the packet.
715  * In one case, this is actually the maximum number of samples possible to
716  * decode with the given buf_size.
717  *
718  * @param[out] coded_samples set to the number of samples as coded in the
719  * packet, or 0 if the codec does not encode the
720  * number of samples in each frame.
721  * @param[out] approx_nb_samples set to non-zero if the number of samples
722  * returned is an approximation.
723  */
725  int buf_size, int *coded_samples, int *approx_nb_samples)
726 {
727  ADPCMDecodeContext *s = avctx->priv_data;
728  int nb_samples = 0;
729  int ch = avctx->channels;
730  int has_coded_samples = 0;
731  int header_size;
732 
733  *coded_samples = 0;
734  *approx_nb_samples = 0;
735 
736  if(ch <= 0)
737  return 0;
738  if (buf_size > INT_MAX / 2)
739  return 0;
740 
741  switch (avctx->codec->id) {
742  /* constant, only check buf_size */
744  if (buf_size < 76 * ch)
745  return 0;
746  nb_samples = 128;
747  break;
749  if (buf_size < 34 * ch)
750  return 0;
751  nb_samples = 64;
752  break;
753  /* simple 4-bit adpcm */
766  nb_samples = buf_size * 2 / ch;
767  break;
768  }
769  if (nb_samples)
770  return nb_samples;
771 
772  /* simple 4-bit adpcm, with header */
773  header_size = 0;
774  switch (avctx->codec->id) {
779  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
780  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
781  }
782  if (header_size > 0)
783  return (buf_size - header_size) * 2 / ch;
784 
785  /* more complex formats */
786  switch (avctx->codec->id) {
788  bytestream2_skip(gb, 4);
789  has_coded_samples = 1;
790  *coded_samples = bytestream2_get_le32u(gb);
791  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
792  bytestream2_seek(gb, -8, SEEK_CUR);
793  break;
795  has_coded_samples = 1;
796  *coded_samples = bytestream2_get_le32(gb);
797  *coded_samples -= *coded_samples % 28;
798  nb_samples = (buf_size - 12) / 30 * 28;
799  break;
801  has_coded_samples = 1;
802  *coded_samples = bytestream2_get_le32(gb);
803  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
804  break;
806  nb_samples = (buf_size - ch) / ch * 2;
807  break;
811  /* maximum number of samples */
812  /* has internal offsets and a per-frame switch to signal raw 16-bit */
813  has_coded_samples = 1;
814  switch (avctx->codec->id) {
816  header_size = 4 + 9 * ch;
817  *coded_samples = bytestream2_get_le32(gb);
818  break;
820  header_size = 4 + 5 * ch;
821  *coded_samples = bytestream2_get_le32(gb);
822  break;
824  header_size = 4 + 5 * ch;
825  *coded_samples = bytestream2_get_be32(gb);
826  break;
827  }
828  *coded_samples -= *coded_samples % 28;
829  nb_samples = (buf_size - header_size) * 2 / ch;
830  nb_samples -= nb_samples % 28;
831  *approx_nb_samples = 1;
832  break;
834  if (avctx->block_align > 0)
835  buf_size = FFMIN(buf_size, avctx->block_align);
836  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
837  break;
839  if (avctx->block_align > 0)
840  buf_size = FFMIN(buf_size, avctx->block_align);
841  if (buf_size < 4 * ch)
842  return AVERROR_INVALIDDATA;
843  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
844  break;
846  if (avctx->block_align > 0)
847  buf_size = FFMIN(buf_size, avctx->block_align);
848  nb_samples = (buf_size - 4 * ch) * 2 / ch;
849  break;
851  {
852  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
853  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
854  if (avctx->block_align > 0)
855  buf_size = FFMIN(buf_size, avctx->block_align);
856  if (buf_size < 4 * ch)
857  return AVERROR_INVALIDDATA;
858  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
859  break;
860  }
862  if (avctx->block_align > 0)
863  buf_size = FFMIN(buf_size, avctx->block_align);
864  nb_samples = (buf_size - 6 * ch) * 2 / ch;
865  break;
867  if (avctx->block_align > 0)
868  buf_size = FFMIN(buf_size, avctx->block_align);
869  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
870  break;
874  {
875  int samples_per_byte;
876  switch (avctx->codec->id) {
877  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
878  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
879  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
880  }
881  if (!s->status[0].step_index) {
882  if (buf_size < ch)
883  return AVERROR_INVALIDDATA;
884  nb_samples++;
885  buf_size -= ch;
886  }
887  nb_samples += buf_size * samples_per_byte / ch;
888  break;
889  }
891  {
892  int buf_bits = buf_size * 8 - 2;
893  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
894  int block_hdr_size = 22 * ch;
895  int block_size = block_hdr_size + nbits * ch * 4095;
896  int nblocks = buf_bits / block_size;
897  int bits_left = buf_bits - nblocks * block_size;
898  nb_samples = nblocks * 4096;
899  if (bits_left >= block_hdr_size)
900  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
901  break;
902  }
905  if (avctx->extradata) {
906  nb_samples = buf_size * 14 / (8 * ch);
907  break;
908  }
909  has_coded_samples = 1;
910  bytestream2_skip(gb, 4); // channel size
911  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
912  bytestream2_get_le32(gb) :
913  bytestream2_get_be32(gb);
914  buf_size -= 8 + 36 * ch;
915  buf_size /= ch;
916  nb_samples = buf_size / 8 * 14;
917  if (buf_size % 8 > 1)
918  nb_samples += (buf_size % 8 - 1) * 2;
919  *approx_nb_samples = 1;
920  break;
922  nb_samples = buf_size / (9 * ch) * 16;
923  break;
925  nb_samples = (buf_size / 128) * 224 / ch;
926  break;
929  nb_samples = buf_size / (16 * ch) * 28;
930  break;
932  nb_samples = buf_size / avctx->block_align * 32;
933  break;
935  nb_samples = buf_size / ch;
936  break;
937  }
938 
939  /* validate coded sample count */
940  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
941  return AVERROR_INVALIDDATA;
942 
943  return nb_samples;
944 }
945 
946 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
947  int *got_frame_ptr, AVPacket *avpkt)
948 {
949  AVFrame *frame = data;
950  const uint8_t *buf = avpkt->data;
951  int buf_size = avpkt->size;
952  ADPCMDecodeContext *c = avctx->priv_data;
953  ADPCMChannelStatus *cs;
954  int n, m, channel, i;
955  int16_t *samples;
956  int16_t **samples_p;
957  int st; /* stereo */
958  int count1, count2;
959  int nb_samples, coded_samples, approx_nb_samples, ret;
960  GetByteContext gb;
961 
962  bytestream2_init(&gb, buf, buf_size);
963  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
964  if (nb_samples <= 0) {
965  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
966  return AVERROR_INVALIDDATA;
967  }
968 
969  /* get output buffer */
970  frame->nb_samples = nb_samples;
971  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
972  return ret;
973  samples = (int16_t *)frame->data[0];
974  samples_p = (int16_t **)frame->extended_data;
975 
976  /* use coded_samples when applicable */
977  /* it is always <= nb_samples, so the output buffer will be large enough */
978  if (coded_samples) {
979  if (!approx_nb_samples && coded_samples != nb_samples)
980  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
981  frame->nb_samples = nb_samples = coded_samples;
982  }
983 
984  st = avctx->channels == 2 ? 1 : 0;
985 
986  switch(avctx->codec->id) {
988  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
989  Channel data is interleaved per-chunk. */
990  for (channel = 0; channel < avctx->channels; channel++) {
991  int predictor;
992  int step_index;
993  cs = &(c->status[channel]);
994  /* (pppppp) (piiiiiii) */
995 
996  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
997  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
998  step_index = predictor & 0x7F;
999  predictor &= ~0x7F;
1000 
1001  if (cs->step_index == step_index) {
1002  int diff = predictor - cs->predictor;
1003  if (diff < 0)
1004  diff = - diff;
1005  if (diff > 0x7f)
1006  goto update;
1007  } else {
1008  update:
1009  cs->step_index = step_index;
1010  cs->predictor = predictor;
1011  }
1012 
1013  if (cs->step_index > 88u){
1014  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1015  channel, cs->step_index);
1016  return AVERROR_INVALIDDATA;
1017  }
1018 
1019  samples = samples_p[channel];
1020 
1021  for (m = 0; m < 64; m += 2) {
1022  int byte = bytestream2_get_byteu(&gb);
1023  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1024  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1025  }
1026  }
1027  break;
1029  for(i=0; i<avctx->channels; i++){
1030  cs = &(c->status[i]);
1031  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1032 
1033  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1034  if (cs->step_index > 88u){
1035  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1036  i, cs->step_index);
1037  return AVERROR_INVALIDDATA;
1038  }
1039  }
1040 
1041  if (avctx->bits_per_coded_sample != 4) {
1042  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1043  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1045  GetBitContext g;
1046 
1047  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1048  for (i = 0; i < avctx->channels; i++) {
1049  int j;
1050 
1051  cs = &c->status[i];
1052  samples = &samples_p[i][1 + n * samples_per_block];
1053  for (j = 0; j < block_size; j++) {
1054  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
1055  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
1056  }
1057  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1058  if (ret < 0)
1059  return ret;
1060  for (m = 0; m < samples_per_block; m++) {
1061  samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
1062  avctx->bits_per_coded_sample);
1063  }
1064  }
1065  }
1066  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
1067  } else {
1068  for (n = 0; n < (nb_samples - 1) / 8; n++) {
1069  for (i = 0; i < avctx->channels; i++) {
1070  cs = &c->status[i];
1071  samples = &samples_p[i][1 + n * 8];
1072  for (m = 0; m < 8; m += 2) {
1073  int v = bytestream2_get_byteu(&gb);
1074  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1075  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1076  }
1077  }
1078  }
1079  }
1080  break;
1081  case AV_CODEC_ID_ADPCM_4XM:
1082  for (i = 0; i < avctx->channels; i++)
1083  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1084 
1085  for (i = 0; i < avctx->channels; i++) {
1086  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1087  if (c->status[i].step_index > 88u) {
1088  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1089  i, c->status[i].step_index);
1090  return AVERROR_INVALIDDATA;
1091  }
1092  }
1093 
1094  for (i = 0; i < avctx->channels; i++) {
1095  samples = (int16_t *)frame->data[i];
1096  cs = &c->status[i];
1097  for (n = nb_samples >> 1; n > 0; n--) {
1098  int v = bytestream2_get_byteu(&gb);
1099  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1100  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1101  }
1102  }
1103  break;
1104  case AV_CODEC_ID_ADPCM_AGM:
1105  for (i = 0; i < avctx->channels; i++)
1106  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1107  for (i = 0; i < avctx->channels; i++)
1108  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1109 
1110  for (n = 0; n < nb_samples >> (1 - st); n++) {
1111  int v = bytestream2_get_byteu(&gb);
1112  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1113  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1114  }
1115  break;
1116  case AV_CODEC_ID_ADPCM_MS:
1117  {
1118  int block_predictor;
1119 
1120  if (avctx->channels > 2) {
1121  for (channel = 0; channel < avctx->channels; channel++) {
1122  samples = samples_p[channel];
1123  block_predictor = bytestream2_get_byteu(&gb);
1124  if (block_predictor > 6) {
1125  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1126  channel, block_predictor);
1127  return AVERROR_INVALIDDATA;
1128  }
1129  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1130  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1131  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1132  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1133  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1134  *samples++ = c->status[channel].sample2;
1135  *samples++ = c->status[channel].sample1;
1136  for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1137  int byte = bytestream2_get_byteu(&gb);
1138  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1139  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1140  }
1141  }
1142  } else {
1143  block_predictor = bytestream2_get_byteu(&gb);
1144  if (block_predictor > 6) {
1145  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1146  block_predictor);
1147  return AVERROR_INVALIDDATA;
1148  }
1149  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1150  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1151  if (st) {
1152  block_predictor = bytestream2_get_byteu(&gb);
1153  if (block_predictor > 6) {
1154  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1155  block_predictor);
1156  return AVERROR_INVALIDDATA;
1157  }
1158  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1159  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1160  }
1161  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1162  if (st){
1163  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1164  }
1165 
1166  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1167  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1168  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1169  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1170 
1171  *samples++ = c->status[0].sample2;
1172  if (st) *samples++ = c->status[1].sample2;
1173  *samples++ = c->status[0].sample1;
1174  if (st) *samples++ = c->status[1].sample1;
1175  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1176  int byte = bytestream2_get_byteu(&gb);
1177  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1178  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1179  }
1180  }
1181  break;
1182  }
1184  for (channel = 0; channel < avctx->channels; channel+=2) {
1185  bytestream2_skipu(&gb, 4);
1186  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1187  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1188  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1189  bytestream2_skipu(&gb, 2);
1190  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1191  bytestream2_skipu(&gb, 2);
1192  for (n = 0; n < nb_samples; n+=2) {
1193  int v = bytestream2_get_byteu(&gb);
1194  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1195  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1196  }
1197  for (n = 0; n < nb_samples; n+=2) {
1198  int v = bytestream2_get_byteu(&gb);
1199  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1200  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1201  }
1202  }
1203  break;
1205  for (channel = 0; channel < avctx->channels; channel++) {
1206  cs = &c->status[channel];
1207  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1208  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1209  if (cs->step_index > 88u){
1210  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1211  channel, cs->step_index);
1212  return AVERROR_INVALIDDATA;
1213  }
1214  }
1215  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1216  int v = bytestream2_get_byteu(&gb);
1217  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1218  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1219  }
1220  break;
1222  {
1223  int last_byte = 0;
1224  int nibble;
1225  int decode_top_nibble_next = 0;
1226  int diff_channel;
1227  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1228 
1229  bytestream2_skipu(&gb, 10);
1230  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1231  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1232  c->status[0].step_index = bytestream2_get_byteu(&gb);
1233  c->status[1].step_index = bytestream2_get_byteu(&gb);
1234  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1235  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1236  c->status[0].step_index, c->status[1].step_index);
1237  return AVERROR_INVALIDDATA;
1238  }
1239  /* sign extend the predictors */
1240  diff_channel = c->status[1].predictor;
1241 
1242  /* DK3 ADPCM support macro */
1243 #define DK3_GET_NEXT_NIBBLE() \
1244  if (decode_top_nibble_next) { \
1245  nibble = last_byte >> 4; \
1246  decode_top_nibble_next = 0; \
1247  } else { \
1248  last_byte = bytestream2_get_byteu(&gb); \
1249  nibble = last_byte & 0x0F; \
1250  decode_top_nibble_next = 1; \
1251  }
1252 
1253  while (samples < samples_end) {
1254 
1255  /* for this algorithm, c->status[0] is the sum channel and
1256  * c->status[1] is the diff channel */
1257 
1258  /* process the first predictor of the sum channel */
1260  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1261 
1262  /* process the diff channel predictor */
1264  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1265 
1266  /* process the first pair of stereo PCM samples */
1267  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1268  *samples++ = c->status[0].predictor + c->status[1].predictor;
1269  *samples++ = c->status[0].predictor - c->status[1].predictor;
1270 
1271  /* process the second predictor of the sum channel */
1273  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1274 
1275  /* process the second pair of stereo PCM samples */
1276  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1277  *samples++ = c->status[0].predictor + c->status[1].predictor;
1278  *samples++ = c->status[0].predictor - c->status[1].predictor;
1279  }
1280 
1281  if ((bytestream2_tell(&gb) & 1))
1282  bytestream2_skip(&gb, 1);
1283  break;
1284  }
1286  for (channel = 0; channel < avctx->channels; channel++) {
1287  cs = &c->status[channel];
1288  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1289  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1290  if (cs->step_index > 88u){
1291  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1292  channel, cs->step_index);
1293  return AVERROR_INVALIDDATA;
1294  }
1295  }
1296 
1297  for (n = nb_samples >> (1 - st); n > 0; n--) {
1298  int v1, v2;
1299  int v = bytestream2_get_byteu(&gb);
1300  /* nibbles are swapped for mono */
1301  if (st) {
1302  v1 = v >> 4;
1303  v2 = v & 0x0F;
1304  } else {
1305  v2 = v >> 4;
1306  v1 = v & 0x0F;
1307  }
1308  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1309  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1310  }
1311  break;
1313  for (channel = 0; channel < avctx->channels; channel++) {
1314  cs = &c->status[channel];
1315  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1316  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1317  if (cs->step_index > 88u){
1318  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1319  channel, cs->step_index);
1320  return AVERROR_INVALIDDATA;
1321  }
1322  }
1323 
1324  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1325  for (channel = 0; channel < avctx->channels; channel++) {
1326  samples = samples_p[channel] + 256 * subframe;
1327  for (n = 0; n < 256; n += 2) {
1328  int v = bytestream2_get_byteu(&gb);
1329  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1330  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1331  }
1332  }
1333  }
1334  break;
1336  for (channel = 0; channel < avctx->channels; channel++) {
1337  cs = &c->status[channel];
1338  samples = samples_p[channel];
1339  bytestream2_skip(&gb, 4);
1340  for (n = 0; n < nb_samples; n += 2) {
1341  int v = bytestream2_get_byteu(&gb);
1342  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1343  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1344  }
1345  }
1346  break;
1348  for (n = nb_samples >> (1 - st); n > 0; n--) {
1349  int v = bytestream2_get_byteu(&gb);
1350  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1351  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1352  }
1353  break;
1355  for (n = nb_samples >> (1 - st); n > 0; n--) {
1356  int v = bytestream2_get_byteu(&gb);
1357  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1358  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1359  }
1360  break;
1362  for (n = nb_samples / 2; n > 0; n--) {
1363  for (channel = 0; channel < avctx->channels; channel++) {
1364  int v = bytestream2_get_byteu(&gb);
1365  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1366  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1367  }
1368  samples += avctx->channels;
1369  }
1370  break;
1372  for (n = nb_samples / 2; n > 0; n--) {
1373  for (channel = 0; channel < avctx->channels; channel++) {
1374  int v = bytestream2_get_byteu(&gb);
1375  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1376  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1377  }
1378  samples += avctx->channels;
1379  }
1380  break;
1382  for (channel = 0; channel < avctx->channels; channel++) {
1383  int16_t *smp = samples_p[channel];
1384  for (n = 0; n < nb_samples / 2; n++) {
1385  int v = bytestream2_get_byteu(&gb);
1386  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1387  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1388  }
1389  }
1390  break;
1392  for (n = nb_samples >> (1 - st); n > 0; n--) {
1393  int v = bytestream2_get_byteu(&gb);
1394  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1395  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1396  }
1397  break;
1399  for (channel = 0; channel < avctx->channels; channel++) {
1400  cs = &c->status[channel];
1401  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1402  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1403  if (cs->step_index > 88u){
1404  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1405  channel, cs->step_index);
1406  return AVERROR_INVALIDDATA;
1407  }
1408  }
1409  for (n = 0; n < nb_samples / 2; n++) {
1410  int byte[2];
1411 
1412  byte[0] = bytestream2_get_byteu(&gb);
1413  if (st)
1414  byte[1] = bytestream2_get_byteu(&gb);
1415  for(channel = 0; channel < avctx->channels; channel++) {
1416  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1417  }
1418  for(channel = 0; channel < avctx->channels; channel++) {
1419  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1420  }
1421  }
1422  break;
1424  if (c->vqa_version == 3) {
1425  for (channel = 0; channel < avctx->channels; channel++) {
1426  int16_t *smp = samples_p[channel];
1427 
1428  for (n = nb_samples / 2; n > 0; n--) {
1429  int v = bytestream2_get_byteu(&gb);
1430  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1431  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1432  }
1433  }
1434  } else {
1435  for (n = nb_samples / 2; n > 0; n--) {
1436  for (channel = 0; channel < avctx->channels; channel++) {
1437  int v = bytestream2_get_byteu(&gb);
1438  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1439  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1440  }
1441  samples += avctx->channels;
1442  }
1443  }
1444  bytestream2_seek(&gb, 0, SEEK_END);
1445  break;
1446  case AV_CODEC_ID_ADPCM_XA:
1447  {
1448  int16_t *out0 = samples_p[0];
1449  int16_t *out1 = samples_p[1];
1450  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1451  int sample_offset = 0;
1452  int bytes_remaining;
1453  while (bytestream2_get_bytes_left(&gb) >= 128) {
1454  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1455  &c->status[0], &c->status[1],
1456  avctx->channels, sample_offset)) < 0)
1457  return ret;
1458  bytestream2_skipu(&gb, 128);
1459  sample_offset += samples_per_block;
1460  }
1461  /* Less than a full block of data left, e.g. when reading from
1462  * 2324 byte per sector XA; the remainder is padding */
1463  bytes_remaining = bytestream2_get_bytes_left(&gb);
1464  if (bytes_remaining > 0) {
1465  bytestream2_skip(&gb, bytes_remaining);
1466  }
1467  break;
1468  }
1470  for (i=0; i<=st; i++) {
1471  c->status[i].step_index = bytestream2_get_le32u(&gb);
1472  if (c->status[i].step_index > 88u) {
1473  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1474  i, c->status[i].step_index);
1475  return AVERROR_INVALIDDATA;
1476  }
1477  }
1478  for (i=0; i<=st; i++) {
1479  c->status[i].predictor = bytestream2_get_le32u(&gb);
1480  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1481  return AVERROR_INVALIDDATA;
1482  }
1483 
1484  for (n = nb_samples >> (1 - st); n > 0; n--) {
1485  int byte = bytestream2_get_byteu(&gb);
1486  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1487  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1488  }
1489  break;
1491  for (n = nb_samples >> (1 - st); n > 0; n--) {
1492  int byte = bytestream2_get_byteu(&gb);
1493  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1494  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1495  }
1496  break;
1497  case AV_CODEC_ID_ADPCM_EA:
1498  {
1499  int previous_left_sample, previous_right_sample;
1500  int current_left_sample, current_right_sample;
1501  int next_left_sample, next_right_sample;
1502  int coeff1l, coeff2l, coeff1r, coeff2r;
1503  int shift_left, shift_right;
1504 
1505  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1506  each coding 28 stereo samples. */
1507 
1508  if(avctx->channels != 2)
1509  return AVERROR_INVALIDDATA;
1510 
1511  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1512  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1513  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1514  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1515 
1516  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1517  int byte = bytestream2_get_byteu(&gb);
1518  coeff1l = ea_adpcm_table[ byte >> 4 ];
1519  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1520  coeff1r = ea_adpcm_table[ byte & 0x0F];
1521  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1522 
1523  byte = bytestream2_get_byteu(&gb);
1524  shift_left = 20 - (byte >> 4);
1525  shift_right = 20 - (byte & 0x0F);
1526 
1527  for (count2 = 0; count2 < 28; count2++) {
1528  byte = bytestream2_get_byteu(&gb);
1529  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1530  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1531 
1532  next_left_sample = (next_left_sample +
1533  (current_left_sample * coeff1l) +
1534  (previous_left_sample * coeff2l) + 0x80) >> 8;
1535  next_right_sample = (next_right_sample +
1536  (current_right_sample * coeff1r) +
1537  (previous_right_sample * coeff2r) + 0x80) >> 8;
1538 
1539  previous_left_sample = current_left_sample;
1540  current_left_sample = av_clip_int16(next_left_sample);
1541  previous_right_sample = current_right_sample;
1542  current_right_sample = av_clip_int16(next_right_sample);
1543  *samples++ = current_left_sample;
1544  *samples++ = current_right_sample;
1545  }
1546  }
1547 
1548  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1549 
1550  break;
1551  }
1553  {
1554  int coeff[2][2], shift[2];
1555 
1556  for(channel = 0; channel < avctx->channels; channel++) {
1557  int byte = bytestream2_get_byteu(&gb);
1558  for (i=0; i<2; i++)
1559  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1560  shift[channel] = 20 - (byte & 0x0F);
1561  }
1562  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1563  int byte[2];
1564 
1565  byte[0] = bytestream2_get_byteu(&gb);
1566  if (st) byte[1] = bytestream2_get_byteu(&gb);
1567  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1568  for(channel = 0; channel < avctx->channels; channel++) {
1569  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1570  sample = (sample +
1571  c->status[channel].sample1 * coeff[channel][0] +
1572  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1573  c->status[channel].sample2 = c->status[channel].sample1;
1574  c->status[channel].sample1 = av_clip_int16(sample);
1575  *samples++ = c->status[channel].sample1;
1576  }
1577  }
1578  }
1579  bytestream2_seek(&gb, 0, SEEK_END);
1580  break;
1581  }
1584  case AV_CODEC_ID_ADPCM_EA_R3: {
1585  /* channel numbering
1586  2chan: 0=fl, 1=fr
1587  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1588  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1589  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1590  int previous_sample, current_sample, next_sample;
1591  int coeff1, coeff2;
1592  int shift;
1593  unsigned int channel;
1594  uint16_t *samplesC;
1595  int count = 0;
1596  int offsets[6];
1597 
1598  for (channel=0; channel<avctx->channels; channel++)
1599  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1600  bytestream2_get_le32(&gb)) +
1601  (avctx->channels + 1) * 4;
1602 
1603  for (channel=0; channel<avctx->channels; channel++) {
1604  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1605  samplesC = samples_p[channel];
1606 
1607  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1608  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1609  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1610  } else {
1611  current_sample = c->status[channel].predictor;
1612  previous_sample = c->status[channel].prev_sample;
1613  }
1614 
1615  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1616  int byte = bytestream2_get_byte(&gb);
1617  if (byte == 0xEE) { /* only seen in R2 and R3 */
1618  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1619  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1620 
1621  for (count2=0; count2<28; count2++)
1622  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1623  } else {
1624  coeff1 = ea_adpcm_table[ byte >> 4 ];
1625  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1626  shift = 20 - (byte & 0x0F);
1627 
1628  for (count2=0; count2<28; count2++) {
1629  if (count2 & 1)
1630  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1631  else {
1632  byte = bytestream2_get_byte(&gb);
1633  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1634  }
1635 
1636  next_sample += (current_sample * coeff1) +
1637  (previous_sample * coeff2);
1638  next_sample = av_clip_int16(next_sample >> 8);
1639 
1640  previous_sample = current_sample;
1641  current_sample = next_sample;
1642  *samplesC++ = current_sample;
1643  }
1644  }
1645  }
1646  if (!count) {
1647  count = count1;
1648  } else if (count != count1) {
1649  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1650  count = FFMAX(count, count1);
1651  }
1652 
1653  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1654  c->status[channel].predictor = current_sample;
1655  c->status[channel].prev_sample = previous_sample;
1656  }
1657  }
1658 
1659  frame->nb_samples = count * 28;
1660  bytestream2_seek(&gb, 0, SEEK_END);
1661  break;
1662  }
1664  for (channel=0; channel<avctx->channels; channel++) {
1665  int coeff[2][4], shift[4];
1666  int16_t *s = samples_p[channel];
1667  for (n = 0; n < 4; n++, s += 32) {
1668  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1669  for (i=0; i<2; i++)
1670  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1671  s[0] = val & ~0x0F;
1672 
1673  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1674  shift[n] = 20 - (val & 0x0F);
1675  s[1] = val & ~0x0F;
1676  }
1677 
1678  for (m=2; m<32; m+=2) {
1679  s = &samples_p[channel][m];
1680  for (n = 0; n < 4; n++, s += 32) {
1681  int level, pred;
1682  int byte = bytestream2_get_byteu(&gb);
1683 
1684  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1685  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1686  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1687 
1688  level = sign_extend(byte, 4) * (1 << shift[n]);
1689  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1690  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1691  }
1692  }
1693  }
1694  break;
1696  av_assert0(avctx->channels == 1);
1697 
1698  /*
1699  * Header format:
1700  * int16_t predictor;
1701  * uint8_t step_index;
1702  * uint8_t reserved;
1703  * uint32_t frame_size;
1704  *
1705  * Some implementations have step_index as 16-bits, but others
1706  * only use the lower 8 and store garbage in the upper 8.
1707  */
1708  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1709  c->status[0].step_index = bytestream2_get_byteu(&gb);
1710  bytestream2_skipu(&gb, 5);
1711  if (c->status[0].step_index > 88u) {
1712  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1713  c->status[0].step_index);
1714  return AVERROR_INVALIDDATA;
1715  }
1716 
1717  for (n = nb_samples >> 1; n > 0; n--) {
1718  int v = bytestream2_get_byteu(&gb);
1719 
1720  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1721  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1722  }
1723 
1724  if (nb_samples & 1) {
1725  int v = bytestream2_get_byteu(&gb);
1726  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1727 
1728  if (v & 0x0F) {
1729  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1730  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1731  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1732  }
1733  }
1734  break;
1736  for (i = 0; i < avctx->channels; i++) {
1737  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1738  c->status[i].step_index = bytestream2_get_byteu(&gb);
1739  bytestream2_skipu(&gb, 1);
1740  if (c->status[i].step_index > 88u) {
1741  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1742  c->status[i].step_index);
1743  return AVERROR_INVALIDDATA;
1744  }
1745  }
1746 
1747  for (n = nb_samples >> (1 - st); n > 0; n--) {
1748  int v = bytestream2_get_byteu(&gb);
1749 
1750  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1751  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1752  }
1753  break;
1754  case AV_CODEC_ID_ADPCM_CT:
1755  for (n = nb_samples >> (1 - st); n > 0; n--) {
1756  int v = bytestream2_get_byteu(&gb);
1757  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1758  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1759  }
1760  break;
1764  if (!c->status[0].step_index) {
1765  /* the first byte is a raw sample */
1766  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1767  if (st)
1768  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1769  c->status[0].step_index = 1;
1770  nb_samples--;
1771  }
1772  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1773  for (n = nb_samples >> (1 - st); n > 0; n--) {
1774  int byte = bytestream2_get_byteu(&gb);
1775  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1776  byte >> 4, 4, 0);
1777  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1778  byte & 0x0F, 4, 0);
1779  }
1780  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1781  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1782  int byte = bytestream2_get_byteu(&gb);
1783  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1784  byte >> 5 , 3, 0);
1785  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1786  (byte >> 2) & 0x07, 3, 0);
1787  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1788  byte & 0x03, 2, 0);
1789  }
1790  } else {
1791  for (n = nb_samples >> (2 - st); n > 0; n--) {
1792  int byte = bytestream2_get_byteu(&gb);
1793  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1794  byte >> 6 , 2, 2);
1795  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1796  (byte >> 4) & 0x03, 2, 2);
1797  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1798  (byte >> 2) & 0x03, 2, 2);
1799  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1800  byte & 0x03, 2, 2);
1801  }
1802  }
1803  break;
1804  case AV_CODEC_ID_ADPCM_SWF:
1805  adpcm_swf_decode(avctx, buf, buf_size, samples);
1806  bytestream2_seek(&gb, 0, SEEK_END);
1807  break;
1809  for (n = nb_samples >> (1 - st); n > 0; n--) {
1810  int v = bytestream2_get_byteu(&gb);
1811  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1812  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1813  }
1814  break;
1816  for (channel = 0; channel < avctx->channels; channel++) {
1817  samples = samples_p[channel];
1818  for (n = nb_samples >> 1; n > 0; n--) {
1819  int v = bytestream2_get_byteu(&gb);
1820  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1821  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1822  }
1823  }
1824  break;
1825  case AV_CODEC_ID_ADPCM_AFC:
1826  {
1827  int samples_per_block;
1828  int blocks;
1829 
1830  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1831  samples_per_block = avctx->extradata[0] / 16;
1832  blocks = nb_samples / avctx->extradata[0];
1833  } else {
1834  samples_per_block = nb_samples / 16;
1835  blocks = 1;
1836  }
1837 
1838  for (m = 0; m < blocks; m++) {
1839  for (channel = 0; channel < avctx->channels; channel++) {
1840  int prev1 = c->status[channel].sample1;
1841  int prev2 = c->status[channel].sample2;
1842 
1843  samples = samples_p[channel] + m * 16;
1844  /* Read in every sample for this channel. */
1845  for (i = 0; i < samples_per_block; i++) {
1846  int byte = bytestream2_get_byteu(&gb);
1847  int scale = 1 << (byte >> 4);
1848  int index = byte & 0xf;
1849  int factor1 = ff_adpcm_afc_coeffs[0][index];
1850  int factor2 = ff_adpcm_afc_coeffs[1][index];
1851 
1852  /* Decode 16 samples. */
1853  for (n = 0; n < 16; n++) {
1854  int32_t sampledat;
1855 
1856  if (n & 1) {
1857  sampledat = sign_extend(byte, 4);
1858  } else {
1859  byte = bytestream2_get_byteu(&gb);
1860  sampledat = sign_extend(byte >> 4, 4);
1861  }
1862 
1863  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1864  sampledat * scale;
1865  *samples = av_clip_int16(sampledat);
1866  prev2 = prev1;
1867  prev1 = *samples++;
1868  }
1869  }
1870 
1871  c->status[channel].sample1 = prev1;
1872  c->status[channel].sample2 = prev2;
1873  }
1874  }
1875  bytestream2_seek(&gb, 0, SEEK_END);
1876  break;
1877  }
1878  case AV_CODEC_ID_ADPCM_THP:
1880  {
1881  int table[14][16];
1882  int ch;
1883 
1884 #define THP_GET16(g) \
1885  sign_extend( \
1886  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1887  bytestream2_get_le16u(&(g)) : \
1888  bytestream2_get_be16u(&(g)), 16)
1889 
1890  if (avctx->extradata) {
1892  if (avctx->extradata_size < 32 * avctx->channels) {
1893  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1894  return AVERROR_INVALIDDATA;
1895  }
1896 
1897  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1898  for (i = 0; i < avctx->channels; i++)
1899  for (n = 0; n < 16; n++)
1900  table[i][n] = THP_GET16(tb);
1901  } else {
1902  for (i = 0; i < avctx->channels; i++)
1903  for (n = 0; n < 16; n++)
1904  table[i][n] = THP_GET16(gb);
1905 
1906  if (!c->has_status) {
1907  /* Initialize the previous sample. */
1908  for (i = 0; i < avctx->channels; i++) {
1909  c->status[i].sample1 = THP_GET16(gb);
1910  c->status[i].sample2 = THP_GET16(gb);
1911  }
1912  c->has_status = 1;
1913  } else {
1914  bytestream2_skip(&gb, avctx->channels * 4);
1915  }
1916  }
1917 
1918  for (ch = 0; ch < avctx->channels; ch++) {
1919  samples = samples_p[ch];
1920 
1921  /* Read in every sample for this channel. */
1922  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1923  int byte = bytestream2_get_byteu(&gb);
1924  int index = (byte >> 4) & 7;
1925  unsigned int exp = byte & 0x0F;
1926  int64_t factor1 = table[ch][index * 2];
1927  int64_t factor2 = table[ch][index * 2 + 1];
1928 
1929  /* Decode 14 samples. */
1930  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1931  int32_t sampledat;
1932 
1933  if (n & 1) {
1934  sampledat = sign_extend(byte, 4);
1935  } else {
1936  byte = bytestream2_get_byteu(&gb);
1937  sampledat = sign_extend(byte >> 4, 4);
1938  }
1939 
1940  sampledat = ((c->status[ch].sample1 * factor1
1941  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
1942  *samples = av_clip_int16(sampledat);
1943  c->status[ch].sample2 = c->status[ch].sample1;
1944  c->status[ch].sample1 = *samples++;
1945  }
1946  }
1947  }
1948  break;
1949  }
1950  case AV_CODEC_ID_ADPCM_DTK:
1951  for (channel = 0; channel < avctx->channels; channel++) {
1952  samples = samples_p[channel];
1953 
1954  /* Read in every sample for this channel. */
1955  for (i = 0; i < nb_samples / 28; i++) {
1956  int byte, header;
1957  if (channel)
1958  bytestream2_skipu(&gb, 1);
1959  header = bytestream2_get_byteu(&gb);
1960  bytestream2_skipu(&gb, 3 - channel);
1961 
1962  /* Decode 28 samples. */
1963  for (n = 0; n < 28; n++) {
1964  int32_t sampledat, prev;
1965 
1966  switch (header >> 4) {
1967  case 1:
1968  prev = (c->status[channel].sample1 * 0x3c);
1969  break;
1970  case 2:
1971  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1972  break;
1973  case 3:
1974  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1975  break;
1976  default:
1977  prev = 0;
1978  }
1979 
1980  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1981 
1982  byte = bytestream2_get_byteu(&gb);
1983  if (!channel)
1984  sampledat = sign_extend(byte, 4);
1985  else
1986  sampledat = sign_extend(byte >> 4, 4);
1987 
1988  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1989  *samples++ = av_clip_int16(sampledat >> 6);
1990  c->status[channel].sample2 = c->status[channel].sample1;
1991  c->status[channel].sample1 = sampledat;
1992  }
1993  }
1994  if (!channel)
1995  bytestream2_seek(&gb, 0, SEEK_SET);
1996  }
1997  break;
1998  case AV_CODEC_ID_ADPCM_PSX:
1999  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * avctx->channels); block++) {
2000  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * avctx->channels) / (16 * avctx->channels);
2001  for (channel = 0; channel < avctx->channels; channel++) {
2002  samples = samples_p[channel] + block * nb_samples_per_block;
2003  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2004 
2005  /* Read in every sample for this channel. */
2006  for (i = 0; i < nb_samples_per_block / 28; i++) {
2007  int filter, shift, flag, byte;
2008 
2009  filter = bytestream2_get_byteu(&gb);
2010  shift = filter & 0xf;
2011  filter = filter >> 4;
2013  return AVERROR_INVALIDDATA;
2014  flag = bytestream2_get_byteu(&gb);
2015 
2016  /* Decode 28 samples. */
2017  for (n = 0; n < 28; n++) {
2018  int sample = 0, scale;
2019 
2020  if (flag < 0x07) {
2021  if (n & 1) {
2022  scale = sign_extend(byte >> 4, 4);
2023  } else {
2024  byte = bytestream2_get_byteu(&gb);
2025  scale = sign_extend(byte, 4);
2026  }
2027 
2028  scale = scale * (1 << 12);
2029  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2030  }
2031  *samples++ = av_clip_int16(sample);
2032  c->status[channel].sample2 = c->status[channel].sample1;
2033  c->status[channel].sample1 = sample;
2034  }
2035  }
2036  }
2037  }
2038  break;
2040  /*
2041  * The format of each block:
2042  * uint8_t left_control;
2043  * uint4_t left_samples[nb_samples];
2044  * ---- and if stereo ----
2045  * uint8_t right_control;
2046  * uint4_t right_samples[nb_samples];
2047  *
2048  * Format of the control byte:
2049  * MSB [SSSSRDRR] LSB
2050  * S = (Shift Amount - 2)
2051  * D = Decoder flag.
2052  * R = Reserved
2053  *
2054  * Each block relies on the previous two samples of each channel.
2055  * They should be 0 initially.
2056  */
2057  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2058  for (channel = 0; channel < avctx->channels; channel++) {
2059  int control, shift;
2060 
2061  samples = samples_p[channel] + block * 32;
2062  cs = c->status + channel;
2063 
2064  /* Get the control byte and decode the samples, 2 at a time. */
2065  control = bytestream2_get_byteu(&gb);
2066  shift = (control >> 4) + 2;
2067 
2068  for (n = 0; n < 16; n++) {
2069  int sample = bytestream2_get_byteu(&gb);
2070  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2071  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2072  }
2073  }
2074  }
2075  break;
2077  for (n = 0; n < nb_samples * avctx->channels; n++) {
2078  int v = bytestream2_get_byteu(&gb);
2079  *samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
2080  }
2081  break;
2083  for (n = nb_samples / 2; n > 0; n--) {
2084  for (channel = 0; channel < avctx->channels; channel++) {
2085  int v = bytestream2_get_byteu(&gb);
2086  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2087  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2088  }
2089  samples += avctx->channels;
2090  }
2091  break;
2092  default:
2093  av_assert0(0); // unsupported codec_id should not happen
2094  }
2095 
2096  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2097  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2098  return AVERROR_INVALIDDATA;
2099  }
2100 
2101  *got_frame_ptr = 1;
2102 
2103  if (avpkt->size < bytestream2_tell(&gb)) {
2104  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2105  return avpkt->size;
2106  }
2107 
2108  return bytestream2_tell(&gb);
2109 }
2110 
2111 static void adpcm_flush(AVCodecContext *avctx)
2112 {
2113  ADPCMDecodeContext *c = avctx->priv_data;
2114 
2115  switch(avctx->codec_id) {
2117  for (int channel = 0; channel < avctx->channels; channel++)
2118  c->status[channel].step = 0;
2119  break;
2120 
2122  for (int channel = 0; channel < avctx->channels; channel++) {
2123  c->status[channel].sample1 = 0;
2124  c->status[channel].sample2 = 0;
2125  }
2126  break;
2127 
2132  for (int channel = 0; channel < avctx->channels; channel++) {
2133  c->status[channel].predictor = 0;
2134  c->status[channel].step_index = 0;
2135  }
2136  break;
2137 
2138  default:
2139  /* Other codecs may want to handle this during decoding. */
2140  c->has_status = 0;
2141  return;
2142  }
2143 
2144  c->has_status = 1;
2145 }
2146 
2147 
2148 static const enum AVSampleFormat sample_fmts_s16[] = { AV_SAMPLE_FMT_S16,
2150 static const enum AVSampleFormat sample_fmts_s16p[] = { AV_SAMPLE_FMT_S16P,
2152 static const enum AVSampleFormat sample_fmts_both[] = { AV_SAMPLE_FMT_S16,
2155 
2156 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
2157 AVCodec ff_ ## name_ ## _decoder = { \
2158  .name = #name_, \
2159  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2160  .type = AVMEDIA_TYPE_AUDIO, \
2161  .id = id_, \
2162  .priv_data_size = sizeof(ADPCMDecodeContext), \
2163  .init = adpcm_decode_init, \
2164  .decode = adpcm_decode_frame, \
2165  .flush = adpcm_flush, \
2166  .capabilities = AV_CODEC_CAP_DR1, \
2167  .sample_fmts = sample_fmts_, \
2168  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
2169 }
2170 
2171 /* Note: Do not forget to add new entries to the Makefile as well. */
2173 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
2174 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
2175 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
2176 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games");
2177 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
2178 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
2179 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
2180 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
2181 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
2182 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
2183 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
2184 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
2185 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
2186 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
2187 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM");
2188 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments");
2189 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
2190 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
2191 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
2192 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
2193 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
2194 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
2195 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX");
2196 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework");
2197 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
2198 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
2199 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
2200 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive");
2201 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
2202 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP");
2204 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
2205 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
2207 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
2208 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
2209 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
2210 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
2211 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
2212 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
2213 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:696
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:82
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2148
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:278
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:507
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:431
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:327
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:946
#define THP_GET16(g)
#define DK3_GET_NEXT_NIBBLE()
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:363
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:473
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:639
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:65
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:724
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:234
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:304
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:551
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:73
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:516
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2150
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:493
static const int8_t zork_index_table[8]
Definition: adpcm.c:89
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:386
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:343
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:412
static const int8_t mtf_index_table[16]
Definition: adpcm.c:93
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:452
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:106
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2111
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2152
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:2156
ADPCM encoder/decoder common header.
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
const int8_t ff_adpcm_ima_cunning_index_table[9]
Definition: adpcm_data.c:187
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
const int16_t ff_adpcm_ima_cunning_step_table[61]
Definition: adpcm_data.c:197
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
ADPCM tables.
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
static double val(void *priv, double ch)
Definition: aeval.c:76
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
channels
Definition: aptx.h:33
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int32_t
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Libavcodec external API header.
#define AV_RL16
Definition: intreadwrite.h:42
#define AV_RL32
Definition: intreadwrite.h:146
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
#define flag(name)
Definition: cbs_av1.c:564
#define s(width, name)
Definition: cbs_vp9.c:257
static av_always_inline void filter(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhddsp.c:27
#define av_clip_intp2
Definition: common.h:143
#define FFMIN(a, b)
Definition: common.h:105
#define av_mod_uintp2
Definition: common.h:149
#define av_clip
Definition: common.h:122
#define av_clip_int16
Definition: common.h:137
#define FFMAX(a, b)
Definition: common.h:103
#define av_clip_uintp2
Definition: common.h:146
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
#define abs(x)
Definition: cuda_runtime.h:35
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1900
static AVFrame * frame
static float add(float src0, float src1)
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
int8_t exp
Definition: eval.c:74
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:163
int
#define sample
bitstream reader API header.
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:366
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:365
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:357
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:373
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:360
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:386
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:377
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:370
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:387
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:376
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:391
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:361
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:367
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:369
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:375
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:380
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:359
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:398
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:368
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:379
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:396
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:382
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:372
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:378
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:395
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:402
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:356
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:355
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:393
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:353
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:363
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:358
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:394
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:392
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:401
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:399
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:371
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:385
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:354
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:390
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:400
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:374
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:388
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:397
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:403
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
Definition: avcodec.h:215
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
int index
Definition: gxfenc.c:89
for(j=16;j >0;--j)
static const int offsets[]
Definition: hevc_pel.c:34
int i
Definition: input.c:407
common internal API header
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
unsigned bps
Definition: movenc.c:1612
const char data[16]
Definition: mxf.c:142
static const uint16_t table[]
Definition: prosumer.c:206
#define tb
Definition: regdef.h:68
static const uint8_t header[24]
Definition: sdr2.c:67
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
Definition: siprdata.h:259
static int shift(int a, int b)
Definition: sonic.c:82
int16_t step_index
Definition: adpcm.h:33
int vqa_version
VQA version.
Definition: adpcm.c:102
ADPCMChannelStatus status[14]
Definition: adpcm.c:101
int has_status
Status flag.
Definition: adpcm.c:103
main external API structure.
Definition: avcodec.h:536
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1204
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1744
const struct AVCodec * codec
Definition: avcodec.h:545
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
int channels
number of audio channels
Definition: avcodec.h:1197
enum AVCodecID codec_id
Definition: avcodec.h:546
int extradata_size
Definition: avcodec.h:638
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1233
void * priv_data
Definition: avcodec.h:563
enum AVCodecID id
Definition: codec.h:211
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:365
This structure stores compressed data.
Definition: packet.h:346
int size
Definition: packet.h:370
uint8_t * data
Definition: packet.h:369
uint8_t level
Definition: svq3.c:206
#define avpriv_request_sample(...)
#define av_log(a,...)
static int16_t block[64]
Definition: dct.c:116
int size
const char * g
Definition: vf_curves.c:117
else temp
Definition: vf_mcdeint.c:259
static const double coeff[2][5]
Definition: vf_owdenoise.c:73
static av_always_inline int diff(const uint32_t a, const uint32_t b)
float delta
static double c[64]