Modify av_audio_convert() to use AVAudioConvert context struct; add av_audio_convert_alloc() and av_audio_convert_free() support functions.

Originally committed as revision 14496 to svn://svn.ffmpeg.org/ffmpeg/trunk
pull/126/head
Peter Ross 17 years ago
parent 8a464e7580
commit 82cee279a5
  1. 46
      libavcodec/audioconvert.c
  2. 34
      libavcodec/audioconvert.h

@ -70,27 +70,51 @@ void avcodec_sample_fmt_string (char *buf, int buf_size, int sample_fmt)
} }
} }
int av_audio_convert(void *maybe_dspcontext_or_something_av_convert_specific, struct AVAudioConvert {
void *out[6], int out_stride[6], enum SampleFormat out_fmt, int in_channels, out_channels;
void * in[6], int in_stride[6], enum SampleFormat in_fmt, int len){ int fmt_pair;
};
AVAudioConvert *av_audio_convert_alloc(enum SampleFormat out_fmt, int out_channels,
enum SampleFormat in_fmt, int in_channels,
const const float *matrix, int flags)
{
AVAudioConvert *ctx;
if (in_channels!=out_channels)
return NULL; /* FIXME: not supported */
ctx = av_malloc(sizeof(AVAudioConvert));
if (!ctx)
return NULL;
ctx->in_channels = in_channels;
ctx->out_channels = out_channels;
ctx->fmt_pair = out_fmt + SAMPLE_FMT_NB*in_fmt;
return ctx;
}
void av_audio_convert_free(AVAudioConvert *ctx)
{
av_free(ctx);
}
int av_audio_convert(AVAudioConvert *ctx,
void * const out[6], const int out_stride[6],
const void * const in[6], const int in_stride[6], int len)
{
int ch; int ch;
const int isize= FFMIN( in_fmt+1, 4);
const int osize= FFMIN(out_fmt+1, 4);
const int fmt_pair= out_fmt + 5*in_fmt;
//FIXME optimize common cases //FIXME optimize common cases
for(ch=0; ch<6; ch++){ for(ch=0; ch<ctx->out_channels; ch++){
const int is= in_stride[ch] * isize; const int is= in_stride[ch];
const int os= out_stride[ch] * osize; const int os= out_stride[ch];
uint8_t *pi= in[ch]; uint8_t *pi= in[ch];
uint8_t *po= out[ch]; uint8_t *po= out[ch];
uint8_t *end= po + os; uint8_t *end= po + os*len;
if(!out[ch]) if(!out[ch])
continue; continue;
#define CONV(ofmt, otype, ifmt, expr)\ #define CONV(ofmt, otype, ifmt, expr)\
if(fmt_pair == ofmt + 5*ifmt){\ if(ctx->fmt_pair == ofmt + SAMPLE_FMT_NB*ifmt){\
do{\ do{\
*(otype*)po = expr; pi += is; po += os;\ *(otype*)po = expr; pi += is; po += os;\
}while(po < end);\ }while(po < end);\

@ -54,4 +54,38 @@ const char *avcodec_get_sample_fmt_name(int sample_fmt);
*/ */
enum SampleFormat avcodec_get_sample_fmt(const char* name); enum SampleFormat avcodec_get_sample_fmt(const char* name);
struct AVAudioConvert;
typedef struct AVAudioConvert AVAudioConvert;
/**
* Create an audio sample format converter context
* @param out_fmt Output sample format
* @param out_channels Number of output channels
* @param in_fmt Input sample format
* @param in_channels Number of input channels
* @param[in] matrix Channel mixing matrix (of dimension in_channel*out_channels). Set to NULL to ignore.
* @param flags See FF_MM_xx
* @return NULL on error
*/
AVAudioConvert *av_audio_convert_alloc(enum SampleFormat out_fmt, int out_channels,
enum SampleFormat in_fmt, int in_channels,
const float *matrix, int flags);
/**
* Free audio sample format converter context
*/
void av_audio_convert_free(AVAudioConvert *ctx);
/**
* Convert between audio sample formats
* @param[in] out array of output buffers for each channel. set to NULL to ignore processing of the given channel.
* @param[in] out_stride distance between consecutive input samples (measured in bytes)
* @param[in] in array of input buffers for each channel
* @param[in] in_stride distance between consecutive output samples (measured in bytes)
* @param len length of audio frame size (measured in samples)
*/
int av_audio_convert(AVAudioConvert *ctx,
void * const out[6], const int out_stride[6],
const void * const in[6], const int in_stride[6], int len);
#endif /* FFMPEG_AUDIOCONVERT_H */ #endif /* FFMPEG_AUDIOCONVERT_H */

Loading…
Cancel
Save