85 { -1, -1, -1, -1, 2, 4, 6, 8 },
86 { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
90 -1, -1, -1, 1, 4, 7, 10, 12,
94 8, 6, 4, 2, -1, -1, -1, -1,
95 -1, -1, -1, -1, 2, 4, 6, 8,
109 unsigned int min_channels = 1;
110 unsigned int max_channels = 2;
154 c->status[0].step =
c->status[1].step = 511;
245 if ((nibble & 8) == 0)
255 c->step =
av_clip(
c->step * 2, 127, 24576);
286 step_index =
av_clip(step_index, 0, 88);
299 c->step_index = step_index;
301 return (int16_t)
c->predictor;
312 step_index =
av_clip(step_index, 0, 88);
322 c->step_index = step_index;
324 return (int16_t)
c->predictor;
333 delta = step * (2 * nibble - 15);
338 c->step_index =
av_clip(step_index, 0, 88);
340 return (int16_t)
c->predictor;
353 step_index =
av_clip(step_index, 0, 60);
358 c->step_index = step_index;
371 step_index =
av_clip(step_index, 0, 88);
373 sign = nibble & (1 <<
shift);
381 c->step_index = step_index;
383 return (int16_t)
c->predictor;
394 step_index =
av_clip(step_index, 0, 88);
397 if (nibble & 4)
diff += step;
398 if (nibble & 2)
diff += step >> 1;
399 if (nibble & 1)
diff += step >> 2;
407 c->step_index = step_index;
416 predictor = (((
c->sample1) * (
c->coeff1)) + ((
c->sample2) * (
c->coeff2))) / 64;
417 predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) *
c->idelta;
419 c->sample2 =
c->sample1;
422 if (
c->idelta < 16)
c->idelta = 16;
423 if (
c->idelta > INT_MAX/768) {
425 c->idelta = INT_MAX/768;
437 step_index =
av_clip(step_index, 0, 48);
447 c->step_index = step_index;
449 return c->predictor * 16;
464 c->predictor = ((
c->predictor * 254) >> 8) + (sign ? -
diff :
diff);
468 c->step =
av_clip(new_step, 511, 32767);
470 return (int16_t)
c->predictor;
477 sign = nibble & (1<<(
size-1));
487 else if (
delta == 0 &&
c->step > 0)
490 return (int16_t)
c->predictor;
518 int16_t
index =
c->step_index;
525 sample += lookup_sample >> 1;
527 sample += lookup_sample >> 2;
529 sample += lookup_sample >> 3;
531 sample += lookup_sample >> 4;
533 sample += lookup_sample >> 5;
535 sample += lookup_sample >> 6;
560 out0 += sample_offset;
564 out1 += sample_offset;
587 s = t*(1<<
shift) + ((s_1*f0 + s_2*f1+32)>>6);
618 s = t*(1<<
shift) + ((s_1*f0 + s_2*f1+32)>>6);
644 int k0, signmask, nb_bits, count;
645 int size = buf_size*8;
653 k0 = 1 << (nb_bits-2);
654 signmask = 1 << (nb_bits-1);
658 *samples++ =
c->status[
i].predictor =
get_sbits(&gb, 16);
680 if (
delta & signmask)
681 c->status[
i].predictor -= vpdiff;
683 c->status[
i].predictor += vpdiff;
687 c->status[
i].step_index =
av_clip(
c->status[
i].step_index, 0, 88);
690 *samples++ =
c->status[
i].predictor;
725 int buf_size,
int *coded_samples,
int *approx_nb_samples)
730 int has_coded_samples = 0;
734 *approx_nb_samples = 0;
742 if (buf_size < 76 * ch)
747 if (buf_size < 34 * ch)
764 nb_samples = buf_size * 2 / ch;
781 return (buf_size - header_size) * 2 / ch;
787 has_coded_samples = 1;
788 *coded_samples = bytestream2_get_le32u(gb);
789 nb_samples =
FFMIN((buf_size - 8) * 2, *coded_samples);
793 has_coded_samples = 1;
794 *coded_samples = bytestream2_get_le32(gb);
795 *coded_samples -= *coded_samples % 28;
796 nb_samples = (buf_size - 12) / 30 * 28;
799 has_coded_samples = 1;
800 *coded_samples = bytestream2_get_le32(gb);
801 nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
804 nb_samples = (buf_size - ch) / ch * 2;
811 has_coded_samples = 1;
814 header_size = 4 + 9 * ch;
815 *coded_samples = bytestream2_get_le32(gb);
818 header_size = 4 + 5 * ch;
819 *coded_samples = bytestream2_get_le32(gb);
822 header_size = 4 + 5 * ch;
823 *coded_samples = bytestream2_get_be32(gb);
826 *coded_samples -= *coded_samples % 28;
827 nb_samples = (buf_size - header_size) * 2 / ch;
828 nb_samples -= nb_samples % 28;
829 *approx_nb_samples = 1;
834 nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
839 if (buf_size < 4 * ch)
841 nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
846 nb_samples = (buf_size - 4 * ch) * 2 / ch;
854 if (buf_size < 4 * ch)
856 nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
862 nb_samples = (buf_size - 6 * ch) * 2 / ch;
867 nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
873 int samples_per_byte;
879 if (!
s->status[0].step_index) {
885 nb_samples += buf_size * samples_per_byte / ch;
890 int buf_bits = buf_size * 8 - 2;
891 int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
892 int block_hdr_size = 22 * ch;
893 int block_size = block_hdr_size + nbits * ch * 4095;
894 int nblocks = buf_bits / block_size;
895 int bits_left = buf_bits - nblocks * block_size;
896 nb_samples = nblocks * 4096;
897 if (bits_left >= block_hdr_size)
898 nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
904 nb_samples = buf_size * 14 / (8 * ch);
907 has_coded_samples = 1;
910 bytestream2_get_le32(gb) :
911 bytestream2_get_be32(gb);
912 buf_size -= 8 + 36 * ch;
914 nb_samples = buf_size / 8 * 14;
915 if (buf_size % 8 > 1)
916 nb_samples += (buf_size % 8 - 1) * 2;
917 *approx_nb_samples = 1;
920 nb_samples = buf_size / (9 * ch) * 16;
923 nb_samples = (buf_size / 128) * 224 / ch;
927 nb_samples = buf_size / (16 * ch) * 28;
933 nb_samples = buf_size / ch;
938 if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
945 int *got_frame_ptr,
AVPacket *avpkt)
949 int buf_size = avpkt->
size;
957 int nb_samples, coded_samples, approx_nb_samples, ret;
961 nb_samples =
get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
962 if (nb_samples <= 0) {
977 if (!approx_nb_samples && coded_samples != nb_samples)
1019 for (m = 0; m < 64; m += 2) {
1020 int byte = bytestream2_get_byteu(&gb);
1028 cs = &(
c->status[
i]);
1045 for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1050 samples = &samples_p[
i][1 + n * samples_per_block];
1051 for (j = 0; j < block_size; j++) {
1053 (j % 4) + (j / 4) * (avctx->
channels * 4) +
i * 4];
1058 for (m = 0; m < samples_per_block; m++) {
1066 for (n = 0; n < (nb_samples - 1) / 8; n++) {
1069 samples = &samples_p[
i][1 + n * 8];
1070 for (m = 0; m < 8; m += 2) {
1071 int v = bytestream2_get_byteu(&gb);
1081 c->status[
i].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1084 c->status[
i].step_index =
sign_extend(bytestream2_get_le16u(&gb), 16);
1085 if (
c->status[
i].step_index > 88u) {
1087 i,
c->status[
i].step_index);
1095 for (n = nb_samples >> 1; n > 0; n--) {
1096 int v = bytestream2_get_byteu(&gb);
1104 c->status[
i].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1106 c->status[
i].step =
sign_extend(bytestream2_get_le16u(&gb), 16);
1108 for (n = 0; n < nb_samples >> (1 - st); n++) {
1109 int v = bytestream2_get_byteu(&gb);
1116 int block_predictor;
1121 block_predictor = bytestream2_get_byteu(&gb);
1122 if (block_predictor > 6) {
1132 *samples++ =
c->status[
channel].sample2;
1133 *samples++ =
c->status[
channel].sample1;
1134 for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1135 int byte = bytestream2_get_byteu(&gb);
1141 block_predictor = bytestream2_get_byteu(&gb);
1142 if (block_predictor > 6) {
1150 block_predictor = bytestream2_get_byteu(&gb);
1151 if (block_predictor > 6) {
1159 c->status[0].idelta =
sign_extend(bytestream2_get_le16u(&gb), 16);
1161 c->status[1].idelta =
sign_extend(bytestream2_get_le16u(&gb), 16);
1164 c->status[0].sample1 =
sign_extend(bytestream2_get_le16u(&gb), 16);
1165 if (st)
c->status[1].sample1 =
sign_extend(bytestream2_get_le16u(&gb), 16);
1166 c->status[0].sample2 =
sign_extend(bytestream2_get_le16u(&gb), 16);
1167 if (st)
c->status[1].sample2 =
sign_extend(bytestream2_get_le16u(&gb), 16);
1169 *samples++ =
c->status[0].sample2;
1170 if (st) *samples++ =
c->status[1].sample2;
1171 *samples++ =
c->status[0].sample1;
1172 if (st) *samples++ =
c->status[1].sample1;
1173 for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1174 int byte = bytestream2_get_byteu(&gb);
1184 c->status[
channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1185 c->status[
channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1190 for (n = 0; n < nb_samples; n+=2) {
1191 int v = bytestream2_get_byteu(&gb);
1195 for (n = 0; n < nb_samples; n+=2) {
1196 int v = bytestream2_get_byteu(&gb);
1213 for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1214 int v = bytestream2_get_byteu(&gb);
1223 int decode_top_nibble_next = 0;
1225 const int16_t *samples_end = samples + avctx->
channels * nb_samples;
1228 c->status[0].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1229 c->status[1].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1230 c->status[0].step_index = bytestream2_get_byteu(&gb);
1231 c->status[1].step_index = bytestream2_get_byteu(&gb);
1232 if (
c->status[0].step_index > 88u ||
c->status[1].step_index > 88u){
1234 c->status[0].step_index,
c->status[1].step_index);
1238 diff_channel =
c->status[1].predictor;
1241 #define DK3_GET_NEXT_NIBBLE() \
1242 if (decode_top_nibble_next) { \
1243 nibble = last_byte >> 4; \
1244 decode_top_nibble_next = 0; \
1246 last_byte = bytestream2_get_byteu(&gb); \
1247 nibble = last_byte & 0x0F; \
1248 decode_top_nibble_next = 1; \
1251 while (samples < samples_end) {
1265 diff_channel = (diff_channel +
c->status[1].predictor) / 2;
1266 *samples++ =
c->status[0].predictor +
c->status[1].predictor;
1267 *samples++ =
c->status[0].predictor -
c->status[1].predictor;
1274 diff_channel = (diff_channel +
c->status[1].predictor) / 2;
1275 *samples++ =
c->status[0].predictor +
c->status[1].predictor;
1276 *samples++ =
c->status[0].predictor -
c->status[1].predictor;
1295 for (n = nb_samples >> (1 - st); n > 0; n--) {
1297 int v = bytestream2_get_byteu(&gb);
1322 for (
int subframe = 0; subframe < nb_samples / 256; subframe++) {
1324 samples = samples_p[
channel] + 256 * subframe;
1325 for (n = 0; n < 256; n += 2) {
1326 int v = bytestream2_get_byteu(&gb);
1338 for (n = 0; n < nb_samples; n += 2) {
1339 int v = bytestream2_get_byteu(&gb);
1346 for (n = nb_samples >> (1 - st); n > 0; n--) {
1347 int v = bytestream2_get_byteu(&gb);
1353 for (n = nb_samples >> (1 - st); n > 0; n--) {
1354 int v = bytestream2_get_byteu(&gb);
1360 for (n = nb_samples / 2; n > 0; n--) {
1362 int v = bytestream2_get_byteu(&gb);
1370 for (n = nb_samples / 2; n > 0; n--) {
1372 int v = bytestream2_get_byteu(&gb);
1381 int16_t *smp = samples_p[
channel];
1382 for (n = 0; n < nb_samples / 2; n++) {
1383 int v = bytestream2_get_byteu(&gb);
1390 for (n = nb_samples >> (1 - st); n > 0; n--) {
1391 int v = bytestream2_get_byteu(&gb);
1407 for (n = 0; n < nb_samples / 2; n++) {
1410 byte[0] = bytestream2_get_byteu(&gb);
1412 byte[1] = bytestream2_get_byteu(&gb);
1422 if (
c->vqa_version == 3) {
1424 int16_t *smp = samples_p[
channel];
1426 for (n = nb_samples / 2; n > 0; n--) {
1427 int v = bytestream2_get_byteu(&gb);
1433 for (n = nb_samples / 2; n > 0; n--) {
1435 int v = bytestream2_get_byteu(&gb);
1446 int16_t *out0 = samples_p[0];
1447 int16_t *out1 = samples_p[1];
1448 int samples_per_block = 28 * (3 - avctx->
channels) * 4;
1449 int sample_offset = 0;
1450 int bytes_remaining;
1453 &
c->status[0], &
c->status[1],
1454 avctx->
channels, sample_offset)) < 0)
1457 sample_offset += samples_per_block;
1462 if (bytes_remaining > 0) {
1468 for (
i=0;
i<=st;
i++) {
1469 c->status[
i].step_index = bytestream2_get_le32u(&gb);
1470 if (
c->status[
i].step_index > 88u) {
1472 i,
c->status[
i].step_index);
1476 for (
i=0;
i<=st;
i++) {
1477 c->status[
i].predictor = bytestream2_get_le32u(&gb);
1478 if (
FFABS((int64_t)
c->status[
i].predictor) > (1<<16))
1482 for (n = nb_samples >> (1 - st); n > 0; n--) {
1483 int byte = bytestream2_get_byteu(&gb);
1489 for (n = nb_samples >> (1 - st); n > 0; n--) {
1490 int byte = bytestream2_get_byteu(&gb);
1497 int previous_left_sample, previous_right_sample;
1498 int current_left_sample, current_right_sample;
1499 int next_left_sample, next_right_sample;
1500 int coeff1l, coeff2l, coeff1r, coeff2r;
1501 int shift_left, shift_right;
1509 current_left_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1510 previous_left_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1511 current_right_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1512 previous_right_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1514 for (count1 = 0; count1 < nb_samples / 28; count1++) {
1515 int byte = bytestream2_get_byteu(&gb);
1521 byte = bytestream2_get_byteu(&gb);
1522 shift_left = 20 - (
byte >> 4);
1523 shift_right = 20 - (
byte & 0x0F);
1525 for (count2 = 0; count2 < 28; count2++) {
1526 byte = bytestream2_get_byteu(&gb);
1527 next_left_sample =
sign_extend(
byte >> 4, 4) * (1 << shift_left);
1528 next_right_sample =
sign_extend(
byte, 4) * (1 << shift_right);
1530 next_left_sample = (next_left_sample +
1531 (current_left_sample * coeff1l) +
1532 (previous_left_sample * coeff2l) + 0x80) >> 8;
1533 next_right_sample = (next_right_sample +
1534 (current_right_sample * coeff1r) +
1535 (previous_right_sample * coeff2r) + 0x80) >> 8;
1537 previous_left_sample = current_left_sample;
1539 previous_right_sample = current_right_sample;
1541 *samples++ = current_left_sample;
1542 *samples++ = current_right_sample;
1555 int byte = bytestream2_get_byteu(&gb);
1560 for (count1 = 0; count1 < nb_samples / 2; count1++) {
1563 byte[0] = bytestream2_get_byteu(&gb);
1564 if (st)
byte[1] = bytestream2_get_byteu(&gb);
1565 for(
i = 4;
i >= 0;
i-=4) {
1573 *samples++ =
c->status[
channel].sample1;
1588 int previous_sample, current_sample, next_sample;
1598 bytestream2_get_le32(&gb)) +
1603 samplesC = samples_p[
channel];
1606 current_sample =
sign_extend(bytestream2_get_le16(&gb), 16);
1607 previous_sample =
sign_extend(bytestream2_get_le16(&gb), 16);
1609 current_sample =
c->status[
channel].predictor;
1610 previous_sample =
c->status[
channel].prev_sample;
1613 for (count1 = 0; count1 < nb_samples / 28; count1++) {
1614 int byte = bytestream2_get_byte(&gb);
1616 current_sample =
sign_extend(bytestream2_get_be16(&gb), 16);
1617 previous_sample =
sign_extend(bytestream2_get_be16(&gb), 16);
1619 for (count2=0; count2<28; count2++)
1620 *samplesC++ =
sign_extend(bytestream2_get_be16(&gb), 16);
1624 shift = 20 - (
byte & 0x0F);
1626 for (count2=0; count2<28; count2++) {
1630 byte = bytestream2_get_byte(&gb);
1634 next_sample += (current_sample * coeff1) +
1635 (previous_sample * coeff2);
1638 previous_sample = current_sample;
1639 current_sample = next_sample;
1640 *samplesC++ = current_sample;
1646 }
else if (count != count1) {
1648 count =
FFMAX(count, count1);
1652 c->status[
channel].predictor = current_sample;
1653 c->status[
channel].prev_sample = previous_sample;
1665 for (n = 0; n < 4; n++,
s += 32) {
1676 for (m=2; m<32; m+=2) {
1678 for (n = 0; n < 4; n++,
s += 32) {
1680 int byte = bytestream2_get_byteu(&gb);
1706 c->status[0].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1707 c->status[0].step_index = bytestream2_get_byteu(&gb);
1709 if (
c->status[0].step_index > 88u) {
1711 c->status[0].step_index);
1715 for (n = nb_samples >> 1; n > 0; n--) {
1716 int v = bytestream2_get_byteu(&gb);
1722 if (nb_samples & 1) {
1723 int v = bytestream2_get_byteu(&gb);
1735 c->status[
i].predictor =
sign_extend(bytestream2_get_be16u(&gb), 16);
1736 c->status[
i].step_index = bytestream2_get_byteu(&gb);
1738 if (
c->status[
i].step_index > 88u) {
1740 c->status[
i].step_index);
1745 for (n = nb_samples >> (1 - st); n > 0; n--) {
1746 int v = bytestream2_get_byteu(&gb);
1753 for (n = nb_samples >> (1 - st); n > 0; n--) {
1754 int v = bytestream2_get_byteu(&gb);
1762 if (!
c->status[0].step_index) {
1764 *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1766 *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1767 c->status[0].step_index = 1;
1771 for (n = nb_samples >> (1 - st); n > 0; n--) {
1772 int byte = bytestream2_get_byteu(&gb);
1779 for (n = (nb_samples<<st) / 3; n > 0; n--) {
1780 int byte = bytestream2_get_byteu(&gb);
1784 (
byte >> 2) & 0x07, 3, 0);
1789 for (n = nb_samples >> (2 - st); n > 0; n--) {
1790 int byte = bytestream2_get_byteu(&gb);
1794 (
byte >> 4) & 0x03, 2, 2);
1796 (
byte >> 2) & 0x03, 2, 2);
1807 for (n = nb_samples >> (1 - st); n > 0; n--) {
1808 int v = bytestream2_get_byteu(&gb);
1816 for (n = nb_samples >> 1; n > 0; n--) {
1817 int v = bytestream2_get_byteu(&gb);
1825 int samples_per_block;
1829 samples_per_block = avctx->
extradata[0] / 16;
1830 blocks = nb_samples / avctx->
extradata[0];
1832 samples_per_block = nb_samples / 16;
1836 for (m = 0; m < blocks; m++) {
1838 int prev1 =
c->status[
channel].sample1;
1839 int prev2 =
c->status[
channel].sample2;
1841 samples = samples_p[
channel] + m * 16;
1843 for (
i = 0;
i < samples_per_block;
i++) {
1844 int byte = bytestream2_get_byteu(&gb);
1845 int scale = 1 << (
byte >> 4);
1846 int index =
byte & 0xf;
1851 for (n = 0; n < 16; n++) {
1857 byte = bytestream2_get_byteu(&gb);
1861 sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1869 c->status[
channel].sample1 = prev1;
1870 c->status[
channel].sample2 = prev2;
1882 #define THP_GET16(g) \
1884 avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1885 bytestream2_get_le16u(&(g)) : \
1886 bytestream2_get_be16u(&(g)), 16)
1897 for (n = 0; n < 16; n++)
1901 for (n = 0; n < 16; n++)
1904 if (!
c->has_status) {
1916 for (ch = 0; ch < avctx->
channels; ch++) {
1917 samples = samples_p[ch];
1920 for (
i = 0;
i < (nb_samples + 13) / 14;
i++) {
1921 int byte = bytestream2_get_byteu(&gb);
1922 int index = (
byte >> 4) & 7;
1923 unsigned int exp =
byte & 0x0F;
1928 for (n = 0; n < 14 && (
i * 14 + n < nb_samples); n++) {
1934 byte = bytestream2_get_byteu(&gb);
1938 sampledat = ((
c->status[ch].sample1 * factor1
1939 +
c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 <<
exp);
1941 c->status[ch].sample2 =
c->status[ch].sample1;
1942 c->status[ch].sample1 = *samples++;
1953 for (
i = 0;
i < nb_samples / 28;
i++) {
1957 header = bytestream2_get_byteu(&gb);
1961 for (n = 0; n < 28; n++) {
1966 prev = (
c->status[
channel].sample1 * 0x3c);
1969 prev = (
c->status[
channel].sample1 * 0x73) - (
c->status[
channel].sample2 * 0x34);
1972 prev = (
c->status[
channel].sample1 * 0x62) - (
c->status[
channel].sample2 * 0x37);
1980 byte = bytestream2_get_byteu(&gb);
1986 sampledat = ((sampledat * (1 << 12)) >> (
header & 0xf)) * (1 << 6) + prev;
1989 c->status[
channel].sample1 = sampledat;
2000 samples = samples_p[
channel] +
block * nb_samples_per_block;
2004 for (
i = 0;
i < nb_samples_per_block / 28;
i++) {
2007 filter = bytestream2_get_byteu(&gb);
2012 flag = bytestream2_get_byteu(&gb);
2015 for (n = 0; n < 28; n++) {
2022 byte = bytestream2_get_byteu(&gb);
2026 scale = scale * (1 << 12);
2063 control = bytestream2_get_byteu(&gb);
2064 shift = (control >> 4) + 2;
2066 for (n = 0; n < 16; n++) {
2067 int sample = bytestream2_get_byteu(&gb);
2075 for (n = 0; n < nb_samples * avctx->
channels; n++) {
2076 int v = bytestream2_get_byteu(&gb);
2081 for (n = nb_samples / 2; n > 0; n--) {
2083 int v = bytestream2_get_byteu(&gb);
2154 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
2155 AVCodec ff_ ## name_ ## _decoder = { \
2157 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2158 .type = AVMEDIA_TYPE_AUDIO, \
2160 .priv_data_size = sizeof(ADPCMDecodeContext), \
2161 .init = adpcm_decode_init, \
2162 .decode = adpcm_decode_frame, \
2163 .flush = adpcm_flush, \
2164 .capabilities = AV_CODEC_CAP_DR1, \
2165 .sample_fmts = sample_fmts_, \
2166 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
static const int8_t swf_index_tables[4][16]
static enum AVSampleFormat sample_fmts_s16[]
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
#define DK3_GET_NEXT_NIBBLE()
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
static const int8_t xa_adpcm_table[5][2]
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
static const int16_t ea_adpcm_table[]
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
static enum AVSampleFormat sample_fmts_s16p[]
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
static const int8_t zork_index_table[8]
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
static const int8_t mtf_index_table[16]
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
static void adpcm_flush(AVCodecContext *avctx)
static enum AVSampleFormat sample_fmts_both[]
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
ADPCM encoder/decoder common header.
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
const int8_t ff_adpcm_index_table[16]
const int16_t ff_adpcm_mtaf_stepsize[32][16]
const int8_t ff_adpcm_ima_cunning_index_table[9]
const int8_t ff_adpcm_yamaha_difflookup[]
const int16_t ff_adpcm_step_table[89]
This is the step table.
const int16_t ff_adpcm_ima_cunning_step_table[61]
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
const int16_t ff_adpcm_yamaha_indexscale[]
const uint16_t ff_adpcm_afc_coeffs[2][16]
const int16_t ff_adpcm_AdaptationTable[]
const int8_t *const ff_adpcm_index_tables[4]
const int16_t ff_adpcm_oki_step_table[49]
static const uint8_t ff_adpcm_ima_block_sizes[4]
static const uint8_t ff_adpcm_ima_block_samples[4]
static double val(void *priv, double ch)
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Libavcodec external API header.
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
static av_always_inline int bytestream2_tell(GetByteContext *g)
static av_always_inline void filter(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int len, int clip)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
static float add(float src0, float src1)
channel
Use these values when setting the channel map with ebur128_set_channel().
static void predictor(uint8_t *src, ptrdiff_t size)
bitstream reader API header.
static unsigned int get_bits_le(GetBitContext *s, int n)
static int get_sbits(GetBitContext *s, int n)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static int get_bits_count(const GetBitContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
@ AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
@ AV_SAMPLE_FMT_S16
signed 16 bits
static const int offsets[]
common internal API header
static av_const int sign_extend(int val, unsigned bits)
static const uint16_t table[]
static const uint8_t header[24]
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
static int shift(int a, int b)
int vqa_version
VQA version.
ADPCMChannelStatus status[14]
int has_status
Status flag.
main external API structure.
enum AVSampleFormat sample_fmt
audio sample format
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
const struct AVCodec * codec
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int channels
number of audio channels
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
This structure describes decoded (raw) audio or video data.
int nb_samples
number of audio samples (per channel) described by this frame
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
uint8_t ** extended_data
pointers to the data planes/channels.
This structure stores compressed data.
#define avpriv_request_sample(...)
static const double coeff[2][5]
static av_always_inline int diff(const uint32_t a, const uint32_t b)