64 #define FREEZE_INTERVAL 128
82 (
s->block_size & (
s->block_size - 1))) {
88 int frontier, max_paths;
90 if ((
unsigned)avctx->
trellis > 16U) {
106 frontier = 1 << avctx->
trellis;
142 bytestream_put_le16(&extradata, avctx->
frame_size);
143 bytestream_put_le16(&extradata, 7);
144 for (
i = 0;
i < 7;
i++) {
231 const int sign = (
delta < 0) * 8;
234 int diff = (step * nibble) >> 2;
238 nibble = sign | nibble;
240 c->prev_sample +=
diff;
251 int nibble = 8*(
delta < 0);
273 c->prev_sample -=
diff;
275 c->prev_sample +=
diff;
289 ((
c->sample2) * (
c->coeff2))) / 64;
293 bias =
c->idelta / 2;
295 bias = -
c->idelta / 2;
297 nibble = (nibble + bias) /
c->idelta;
300 predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) *
c->idelta;
302 c->sample2 =
c->sample1;
335 const int16_t *samples,
uint8_t *dst,
340 const int frontier = 1 << avctx->
trellis;
347 int pathn = 0, froze = -1,
i, j, k, generation = 0;
349 memset(
hash, 0xff, 65536 *
sizeof(*
hash));
351 memset(nodep_buf, 0, 2 * frontier *
sizeof(*nodep_buf));
352 nodes[0] = node_buf + frontier;
355 nodes[0]->
step =
c->step_index;
364 nodes[0]->
step =
c->idelta;
367 nodes[0]->
step = 127;
370 nodes[0]->
step =
c->step;
375 for (
i = 0;
i < n;
i++) {
380 memset(nodes_next, 0, frontier *
sizeof(
TrellisNode*));
381 for (j = 0; j < frontier && nodes[j]; j++) {
384 const int range = (j < frontier / 2) ? 1 : 0;
385 const int step = nodes[j]->step;
388 const int predictor = ((nodes[j]->sample1 *
c->coeff1) +
389 (nodes[j]->sample2 *
c->coeff2)) / 64;
391 const int nmin =
av_clip(div-range, -8, 6);
392 const int nmax =
av_clip(div+range, -7, 7);
393 for (nidx = nmin; nidx <= nmax; nidx++) {
394 const int nibble = nidx & 0xf;
395 int dec_sample =
predictor + nidx * step;
396 #define STORE_NODE(NAME, STEP_INDEX)\
402 dec_sample = av_clip_int16(dec_sample);\
403 d = sample - dec_sample;\
404 ssd = nodes[j]->ssd + d*(unsigned)d;\
409 if (ssd < nodes[j]->ssd)\
422 h = &hash[(uint16_t) dec_sample];\
423 if (*h == generation)\
425 if (heap_pos < frontier) {\
430 pos = (frontier >> 1) +\
431 (heap_pos & ((frontier >> 1) - 1));\
432 if (ssd > nodes_next[pos]->ssd)\
437 u = nodes_next[pos];\
439 av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\
441 nodes_next[pos] = u;\
445 u->step = STEP_INDEX;\
446 u->sample2 = nodes[j]->sample1;\
447 u->sample1 = dec_sample;\
448 paths[u->path].nibble = nibble;\
449 paths[u->path].prev = nodes[j]->path;\
453 int parent = (pos - 1) >> 1;\
454 if (nodes_next[parent]->ssd <= ssd)\
456 FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
467 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
468 const int predictor = nodes[j]->sample1;\
469 const int div = (sample - predictor) * 4 / STEP_TABLE;\
470 int nmin = av_clip(div - range, -7, 6);\
471 int nmax = av_clip(div + range, -6, 7);\
476 for (nidx = nmin; nidx <= nmax; nidx++) {\
477 const int nibble = nidx < 0 ? 7 - nidx : nidx;\
478 int dec_sample = predictor +\
480 ff_adpcm_yamaha_difflookup[nibble]) / 8;\
481 STORE_NODE(NAME, STEP_INDEX);\
499 if (generation == 255) {
500 memset(
hash, 0xff, 65536 *
sizeof(*
hash));
505 if (nodes[0]->ssd > (1 << 28)) {
506 for (j = 1; j < frontier && nodes[j]; j++)
507 nodes[j]->ssd -= nodes[0]->ssd;
513 p = &paths[nodes[0]->path];
514 for (k =
i; k > froze; k--) {
523 memset(nodes + 1, 0, (frontier - 1) *
sizeof(
TrellisNode*));
527 p = &paths[nodes[0]->
path];
528 for (
i = n - 1;
i > froze;
i--) {
536 c->step_index = nodes[0]->
step;
537 c->step = nodes[0]->
step;
538 c->idelta = nodes[0]->
step;
551 return (nibble >>
shift) & 0x0F;
555 const int16_t *samples,
int nsamples,
567 for (
int n = 0; n < nsamples; n++) {
584 int n,
i, ch, st, pkt_size, ret;
585 const int16_t *samples;
591 samples = (
const int16_t *)
frame->
data[0];
612 for (ch = 0; ch < avctx->
channels; ch++) {
626 for (ch = 0; ch < avctx->
channels; ch++) {
628 buf + ch * blocks * 8, &
c->status[ch],
631 for (
i = 0;
i < blocks;
i++) {
632 for (ch = 0; ch < avctx->
channels; ch++) {
633 uint8_t *buf1 = buf + ch * blocks * 8 +
i * 8;
634 for (j = 0; j < 8; j += 2)
635 *dst++ = buf1[j] | (buf1[j + 1] << 4);
640 for (
i = 0;
i < blocks;
i++) {
641 for (ch = 0; ch < avctx->
channels; ch++) {
643 const int16_t *smp = &samples_p[ch][1 +
i * 8];
644 for (j = 0; j < 8; j += 2) {
659 for (ch = 0; ch < avctx->
channels; ch++) {
667 for (
i = 0;
i < 64;
i++)
671 for (
i = 0;
i < 64;
i += 2) {
692 for (ch = 0; ch < avctx->
channels; ch++) {
708 for (ch = 0; ch < avctx->
channels; ch++) {
734 c->status[
i].prev_sample = samples[
i];
744 buf + n, &
c->status[1], n,
746 for (
i = 0;
i < n;
i++) {
758 samples[2 *
i + 1]));
772 if (
c->status[
i].idelta < 16)
773 c->status[
i].idelta = 16;
774 bytestream_put_le16(&dst,
c->status[
i].idelta);
777 c->status[
i].sample2= *samples++;
779 c->status[
i].sample1 = *samples++;
780 bytestream_put_le16(&dst,
c->status[
i].sample1);
783 bytestream_put_le16(&dst,
c->status[
i].sample2);
792 for (
i = 0;
i < n;
i += 2)
793 *dst++ = (buf[
i] << 4) | buf[
i + 1];
799 for (
i = 0;
i < n;
i++)
800 *dst++ = (buf[
i] << 4) | buf[n +
i];
804 for (
i = 7 * avctx->
channels; i < avctx->block_align;
i++) {
821 for (
i = 0;
i < n;
i += 2)
822 *dst++ = buf[
i] | (buf[
i + 1] << 4);
828 for (
i = 0;
i < n;
i++)
829 *dst++ = buf[
i] | (buf[n +
i] << 4);
833 for (n *= avctx->
channels; n > 0; n--) {
848 for (ch = 0; ch < avctx->
channels; ch++) {
862 c->status[0].prev_sample = *samples;
863 bytestream_put_le16(&dst,
c->status[0].prev_sample);
864 bytestream_put_byte(&dst,
c->status[0].step_index);
865 bytestream_put_byte(&dst, 0);
875 for (
i = 0;
i < n;
i++)
876 bytestream_put_byte(&dst, (buf[2 *
i] << 4) | buf[2 *
i + 1]);
884 bytestream_put_byte(&dst, nibble);
889 bytestream_put_byte(&dst, nibble);
900 for (ch = 0; ch < avctx->
channels; ch++) {
901 int64_t
error = INT64_MAX, tmperr = INT64_MAX;
903 int saved1 =
c->status[ch].sample1;
904 int saved2 =
c->status[ch].sample2;
907 for (
int s = 2;
s < 18 && tmperr != 0;
s++) {
908 for (
int f = 0;
f < 2 && tmperr != 0;
f++) {
909 c->status[ch].sample1 = saved1;
910 c->status[ch].sample2 = saved2;
913 if (tmperr <
error) {
922 c->status[ch].sample1 = saved1;
923 c->status[ch].sample2 = saved2;
935 avpkt->
size = pkt_size;
950 .
name =
"block_size",
951 .help =
"set the block size",
954 .default_val = {.i64 = 1024},
962 #define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_) \
963 static const AVClass name_ ## _encoder_class = { \
964 .class_name = #name_, \
965 .item_name = av_default_item_name, \
967 .version = LIBAVUTIL_VERSION_INT, \
970 AVCodec ff_ ## name_ ## _encoder = { \
972 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
973 .type = AVMEDIA_TYPE_AUDIO, \
975 .priv_data_size = sizeof(ADPCMEncodeContext), \
976 .init = adpcm_encode_init, \
977 .encode2 = adpcm_encode_frame, \
978 .close = adpcm_encode_close, \
979 .sample_fmts = sample_fmts_, \
980 .capabilities = capabilities_, \
981 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE, \
982 .priv_class = &name_ ## _encoder_class, \
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
ADPCM encoder/decoder common header.
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
const int8_t ff_adpcm_index_table[16]
const int8_t ff_adpcm_yamaha_difflookup[]
const int16_t ff_adpcm_step_table[89]
This is the step table.
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
const int16_t ff_adpcm_yamaha_indexscale[]
const int16_t ff_adpcm_AdaptationTable[]
static enum AVSampleFormat sample_fmts[]
#define STORE_NODE(NAME, STEP_INDEX)
static int adpcm_argo_compress_nibble(const ADPCMChannelStatus *cs, int16_t s, int shift, int flag)
static uint8_t adpcm_ima_alp_compress_sample(ADPCMChannelStatus *c, int16_t sample)
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
static const AVOption options[]
static int64_t adpcm_argo_compress_block(ADPCMChannelStatus *cs, PutBitContext *pb, const int16_t *samples, int nsamples, int shift, int flag)
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
#define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_)
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
static enum AVSampleFormat sample_fmts_p[]
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Libavcodec external API header.
#define u(width, name, range_min, range_max)
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static void predictor(uint8_t *src, ptrdiff_t size)
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
@ AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_SSI
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
@ AV_SAMPLE_FMT_S16
signed 16 bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
common internal API header
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
#define AV_OPT_FLAG_AUDIO_PARAM
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
static void put_sbits(PutBitContext *pb, int n, int32_t value)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static int shift(int a, int b)
ADPCMChannelStatus status[6]
Describe the class of an AVClass context structure.
main external API structure.
int trellis
trellis RD quantization
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
const struct AVCodec * codec
int sample_rate
samples per second
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int channels
number of audio channels
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
int frame_size
Number of samples per channel in an audio frame.
This structure describes decoded (raw) audio or video data.
int nb_samples
number of audio samples (per channel) described by this frame
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
uint8_t ** extended_data
pointers to the data planes/channels.
This structure stores compressed data.
static void error(const char *err)
static av_always_inline int diff(const uint32_t a, const uint32_t b)