miscellaneous typo fixes

pull/8/head
Diego Biurrun 12 years ago
parent 6906b19346
commit 511cf612ac
  1. 2
      configure
  2. 2
      doc/Doxyfile
  3. 2
      doc/developer.texi
  4. 2
      doc/indevs.texi
  5. 2
      doc/rate_distortion.txt
  6. 4
      doc/viterbi.txt
  7. 2
      libavcodec/4xm.c
  8. 4
      libavcodec/aacpsy.c
  9. 2
      libavcodec/ac3dec.c
  10. 2
      libavcodec/ac3enc.c
  11. 2
      libavcodec/acelp_filters.h
  12. 2
      libavcodec/avcodec.h
  13. 2
      libavcodec/bitstream.c
  14. 2
      libavcodec/eac3dec.c
  15. 2
      libavcodec/ffv1dec.c
  16. 2
      libavcodec/flicvideo.c
  17. 2
      libavcodec/g726.c
  18. 2
      libavcodec/h264_direct.c
  19. 4
      libavcodec/indeo3data.h
  20. 4
      libavcodec/lagarith.c
  21. 2
      libavcodec/libfdk-aacenc.c
  22. 2
      libavcodec/libtheoraenc.c
  23. 4
      libavcodec/mpeg4videoenc.c
  24. 2
      libavcodec/parser.c
  25. 2
      libavcodec/pngenc.c
  26. 2
      libavcodec/ratecontrol.c
  27. 2
      libavcodec/resample.c
  28. 2
      libavcodec/rv10.c
  29. 3
      libavcodec/shorten.c
  30. 2
      libavcodec/thread.h
  31. 2
      libavcodec/vda_h264.c
  32. 2
      libavcodec/vorbisdec.c
  33. 2
      libavcodec/vp8dsp.h
  34. 4
      libavcodec/wmaprodec.c
  35. 2
      libavdevice/dv1394.h
  36. 2
      libavformat/avformat.h
  37. 2
      libavformat/aviobuf.c
  38. 6
      libavformat/dvenc.c
  39. 2
      libavformat/hls.c
  40. 2
      libavformat/hlsproto.c
  41. 2
      libavformat/http.h
  42. 2
      libavformat/rtpdec_jpeg.c
  43. 2
      libavformat/smoothstreamingenc.c
  44. 2
      libavformat/spdifenc.c
  45. 2
      libavformat/wtv.c
  46. 2
      libavformat/xmv.c
  47. 2
      libavresample/avresample-test.c
  48. 2
      libswscale/ppc/yuv2yuv_altivec.c
  49. 2
      libswscale/swscale.c
  50. 2
      tests/audiogen.c
  51. 4
      tools/patcheck

2
configure vendored

@ -1305,7 +1305,7 @@ HAVE_LIST="
xmm_clobbers xmm_clobbers
" "
# options emitted with CONFIG_ prefix but not available on command line # options emitted with CONFIG_ prefix but not available on the command line
CONFIG_EXTRA=" CONFIG_EXTRA="
aandcttables aandcttables
ac3dsp ac3dsp

@ -288,7 +288,7 @@ TYPEDEF_HIDES_STRUCT = NO
# causing a significant performance penality. # causing a significant performance penality.
# If the system has enough physical memory increasing the cache will improve the # If the system has enough physical memory increasing the cache will improve the
# performance by keeping more symbols in memory. Note that the value works on # performance by keeping more symbols in memory. Note that the value works on
# a logarithmic scale so increasing the size by one will rougly double the # a logarithmic scale so increasing the size by one will roughly double the
# memory usage. The cache size is given by this formula: # memory usage. The cache size is given by this formula:
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols # corresponding to a cache size of 2^16 = 65536 symbols

@ -201,7 +201,7 @@ For exported names, each library has its own prefixes. Just check the existing
code and name accordingly. code and name accordingly.
@end itemize @end itemize
@subsection Miscellanous conventions @subsection Miscellaneous conventions
@itemize @bullet @itemize @bullet
@item @item
fprintf and printf are forbidden in libavformat and libavcodec, fprintf and printf are forbidden in libavformat and libavcodec,

@ -300,7 +300,7 @@ The filename passed as input has the syntax:
@var{hostname}:@var{display_number}.@var{screen_number} specifies the @var{hostname}:@var{display_number}.@var{screen_number} specifies the
X11 display name of the screen to grab from. @var{hostname} can be X11 display name of the screen to grab from. @var{hostname} can be
ommitted, and defaults to "localhost". The environment variable omitted, and defaults to "localhost". The environment variable
@env{DISPLAY} contains the default display name. @env{DISPLAY} contains the default display name.
@var{x_offset} and @var{y_offset} specify the offsets of the grabbed @var{x_offset} and @var{y_offset} specify the offsets of the grabbed

@ -23,7 +23,7 @@ Let's consider the problem of minimizing:
rate is the filesize rate is the filesize
distortion is the quality distortion is the quality
lambda is a fixed value choosen as a tradeoff between quality and filesize lambda is a fixed value chosen as a tradeoff between quality and filesize
Is this equivalent to finding the best quality for a given max Is this equivalent to finding the best quality for a given max
filesize? The answer is yes. For each filesize limit there is some lambda filesize? The answer is yes. For each filesize limit there is some lambda
factor for which minimizing above will get you the best quality (using your factor for which minimizing above will get you the best quality (using your

@ -85,8 +85,8 @@ here are some edges we could choose from:
/ \ / \
O-----2--4--O O-----2--4--O
Finding the new best pathes and scores for each point of our new column is Finding the new best paths and scores for each point of our new column is
trivial given we know the previous column best pathes and scores: trivial given we know the previous column best paths and scores:
O-----0-----8 O-----0-----8
\ \

@ -796,7 +796,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE); cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
// explicit check needed as memcpy below might not catch a NULL // explicit check needed as memcpy below might not catch a NULL
if (!cfrm->data) { if (!cfrm->data) {
av_log(f->avctx, AV_LOG_ERROR, "realloc falure"); av_log(f->avctx, AV_LOG_ERROR, "realloc failure");
return -1; return -1;
} }

@ -592,7 +592,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
for (w = 0; w < wi->num_windows*16; w += 16) { for (w = 0; w < wi->num_windows*16; w += 16) {
AacPsyBand *bands = &pch->band[w]; AacPsyBand *bands = &pch->band[w];
//5.4.2.3 "Spreading" & 5.4.3 "Spreaded Energy Calculation" /* 5.4.2.3 "Spreading" & 5.4.3 "Spread Energy Calculation" */
spread_en[0] = bands[0].energy; spread_en[0] = bands[0].energy;
for (g = 1; g < num_bands; g++) { for (g = 1; g < num_bands; g++) {
bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]); bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]);
@ -612,7 +612,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr, band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr,
PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet)); PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet));
/* 5.6.1.3.1 "Prepatory steps of the perceptual entropy calculation" */ /* 5.6.1.3.1 "Preparatory steps of the perceptual entropy calculation" */
pe += calc_pe_3gpp(band); pe += calc_pe_3gpp(band);
a += band->pe_const; a += band->pe_const;
active_lines += band->active_lines; active_lines += band->active_lines;

@ -546,7 +546,7 @@ static void decode_transform_coeffs(AC3DecodeContext *s, int blk)
for (ch = 1; ch <= s->channels; ch++) { for (ch = 1; ch <= s->channels; ch++) {
/* transform coefficients for full-bandwidth channel */ /* transform coefficients for full-bandwidth channel */
decode_transform_coeffs_ch(s, blk, ch, &m); decode_transform_coeffs_ch(s, blk, ch, &m);
/* tranform coefficients for coupling channel come right after the /* transform coefficients for coupling channel come right after the
coefficients for the first coupled channel*/ coefficients for the first coupled channel*/
if (s->channel_in_cpl[ch]) { if (s->channel_in_cpl[ch]) {
if (!got_cplchan) { if (!got_cplchan) {

@ -659,7 +659,7 @@ static void count_frame_bits_fixed(AC3EncodeContext *s)
* bit allocation parameters do not change between blocks * bit allocation parameters do not change between blocks
* no delta bit allocation * no delta bit allocation
* no skipped data * no skipped data
* no auxilliary data * no auxiliary data
* no E-AC-3 metadata * no E-AC-3 metadata
*/ */

@ -32,7 +32,7 @@
* the coefficients are scaled by 2^15. * the coefficients are scaled by 2^15.
* This array only contains the right half of the filter. * This array only contains the right half of the filter.
* This filter is likely identical to the one used in G.729, though this * This filter is likely identical to the one used in G.729, though this
* could not be determined from the original comments with certainity. * could not be determined from the original comments with certainty.
*/ */
extern const int16_t ff_acelp_interp_filter[61]; extern const int16_t ff_acelp_interp_filter[61];

@ -2292,7 +2292,7 @@ typedef struct AVCodecContext {
/** /**
* ratecontrol qmin qmax limiting method * ratecontrol qmin qmax limiting method
* 0-> clipping, 1-> use a nice continous function to limit qscale wthin qmin/qmax. * 0-> clipping, 1-> use a nice continuous function to limit qscale wthin qmin/qmax.
* - encoding: Set by user. * - encoding: Set by user.
* - decoding: unused * - decoding: unused
*/ */

@ -169,7 +169,7 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
table[i][0] = -1; //codes table[i][0] = -1; //codes
} }
/* first pass: map codes and compute auxillary table sizes */ /* first pass: map codes and compute auxiliary table sizes */
for (i = 0; i < nb_codes; i++) { for (i = 0; i < nb_codes; i++) {
n = codes[i].bits; n = codes[i].bits;
code = codes[i].code; code = codes[i].code;

@ -491,7 +491,7 @@ int ff_eac3_parse_header(AC3DecodeContext *s)
s->skip_syntax = get_bits1(gbc); s->skip_syntax = get_bits1(gbc);
parse_spx_atten_data = get_bits1(gbc); parse_spx_atten_data = get_bits1(gbc);
/* coupling strategy occurance and coupling use per block */ /* coupling strategy occurrence and coupling use per block */
num_cpl_blocks = 0; num_cpl_blocks = 0;
if (s->channel_mode > 1) { if (s->channel_mode > 1) {
for (blk = 0; blk < s->num_blocks; blk++) { for (blk = 0; blk < s->num_blocks; blk++) {

@ -824,7 +824,7 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
} else { } else {
if (!f->key_frame_ok) { if (!f->key_frame_ok) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"Cant decode non keyframe without valid keyframe\n"); "Cannot decode non-keyframe without valid keyframe\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
p->key_frame = 0; p->key_frame = 0;

@ -581,7 +581,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
} }
/* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed. /* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed.
* This does not give us any good oportunity to perform word endian conversion * This does not give us any good opportunity to perform word endian conversion
* during decompression. So if it is required (i.e., this is not a LE target, we do * during decompression. So if it is required (i.e., this is not a LE target, we do
* a second pass over the line here, swapping the bytes. * a second pass over the line here, swapping the bytes.
*/ */

@ -34,7 +34,7 @@
/** /**
* G.726 11bit float. * G.726 11bit float.
* G.726 Standard uses rather odd 11bit floating point arithmentic for * G.726 Standard uses rather odd 11bit floating point arithmentic for
* numerous occasions. It's a mistery to me why they did it this way * numerous occasions. It's a mystery to me why they did it this way
* instead of simply using 32bit integer arithmetic. * instead of simply using 32bit integer arithmetic.
*/ */
typedef struct Float11 { typedef struct Float11 {

@ -86,7 +86,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field,
if (!interl) if (!interl)
poc |= 3; poc |= 3;
else if( interl && (poc&3) == 3) //FIXME store all MBAFF references so this isnt needed else if( interl && (poc&3) == 3) // FIXME: store all MBAFF references so this is not needed
poc= (poc&~3) + rfield + 1; poc= (poc&~3) + rfield + 1;
for(j=start; j<end; j++){ for(j=start; j<end; j++){

@ -235,7 +235,7 @@
/** /**
* Pack two delta values (a,b) into one 16bit word * Pack two delta values (a,b) into one 16bit word
* according with endianess of the host machine. * according with endianness of the host machine.
*/ */
#if HAVE_BIGENDIAN #if HAVE_BIGENDIAN
#define PD(a,b) (((a) << 8) + (b)) #define PD(a,b) (((a) << 8) + (b))
@ -282,7 +282,7 @@ static const int16_t delta_tab_3_5[79] = { TAB_3_5 };
/** /**
* Pack four delta values (a,a,b,b) into one 32bit word * Pack four delta values (a,a,b,b) into one 32bit word
* according with endianess of the host machine. * according with endianness of the host machine.
*/ */
#if HAVE_BIGENDIAN #if HAVE_BIGENDIAN
#define PD(a,b) (((a) << 24) + ((a) << 16) + ((b) << 8) + (b)) #define PD(a,b) (((a) << 24) + ((a) << 16) + ((b) << 8) + (b))

@ -198,8 +198,8 @@ static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
} }
/* Comment from reference source: /* Comment from reference source:
* if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
* // since the compression change is negligable and fixing it * // since the compression change is negligible and fixing it
* // breaks backwards compatibilty * // breaks backwards compatibility
* b =- (signed int)b; * b =- (signed int)b;
* b &= 0xFF; * b &= 0xFF;
* } else { * } else {

@ -257,7 +257,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
} }
if ((err = aacEncoder_SetParam(s->handle, AACENC_BANDWIDTH, if ((err = aacEncoder_SetParam(s->handle, AACENC_BANDWIDTH,
avctx->cutoff)) != AACENC_OK) { avctx->cutoff)) != AACENC_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to set the encoder bandwith to %d: %s\n", av_log(avctx, AV_LOG_ERROR, "Unable to set the encoder bandwidth to %d: %s\n",
avctx->cutoff, aac_get_error(err)); avctx->cutoff, aac_get_error(err));
goto error; goto error;
} }

@ -338,7 +338,7 @@ static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt,
memcpy(pkt->data, o_packet.packet, o_packet.bytes); memcpy(pkt->data, o_packet.packet, o_packet.bytes);
// HACK: assumes no encoder delay, this is true until libtheora becomes // HACK: assumes no encoder delay, this is true until libtheora becomes
// multithreaded (which will be disabled unless explictly requested) // multithreaded (which will be disabled unless explicitly requested)
pkt->pts = pkt->dts = frame->pts; pkt->pts = pkt->dts = frame->pts;
avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask); avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask);
if (avc_context->coded_frame->key_frame) if (avc_context->coded_frame->key_frame)

@ -89,7 +89,7 @@ static inline int get_block_rate(MpegEncContext * s, DCTELEM block[64], int bloc
* @param[in,out] block MB coefficients, these will be restored * @param[in,out] block MB coefficients, these will be restored
* @param[in] dir ac prediction direction for each 8x8 block * @param[in] dir ac prediction direction for each 8x8 block
* @param[out] st scantable for each 8x8 block * @param[out] st scantable for each 8x8 block
* @param[in] zigzag_last_index index refering to the last non zero coefficient in zigzag order * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
*/ */
static inline void restore_ac_coeffs(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], const int zigzag_last_index[6]) static inline void restore_ac_coeffs(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], const int zigzag_last_index[6])
{ {
@ -120,7 +120,7 @@ static inline void restore_ac_coeffs(MpegEncContext * s, DCTELEM block[6][64], c
* @param[in,out] block MB coefficients, these will be updated if 1 is returned * @param[in,out] block MB coefficients, these will be updated if 1 is returned
* @param[in] dir ac prediction direction for each 8x8 block * @param[in] dir ac prediction direction for each 8x8 block
* @param[out] st scantable for each 8x8 block * @param[out] st scantable for each 8x8 block
* @param[out] zigzag_last_index index refering to the last non zero coefficient in zigzag order * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
*/ */
static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], int zigzag_last_index[6]) static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], int zigzag_last_index[6])
{ {

@ -96,7 +96,7 @@ void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove){
if ( s->cur_offset + off >= s->cur_frame_offset[i] if ( s->cur_offset + off >= s->cur_frame_offset[i]
&& (s->frame_offset < s->cur_frame_offset[i] || && (s->frame_offset < s->cur_frame_offset[i] ||
(!s->frame_offset && !s->next_frame_offset)) // first field/frame (!s->frame_offset && !s->next_frame_offset)) // first field/frame
//check is disabled because mpeg-ts doesnt send complete PES packets // check disabled since MPEG-TS does not send complete PES packets
&& /*s->next_frame_offset + off <*/ s->cur_frame_end[i]){ && /*s->next_frame_offset + off <*/ s->cur_frame_end[i]){
s->dts= s->cur_frame_dts[i]; s->dts= s->cur_frame_dts[i];
s->pts= s->cur_frame_pts[i]; s->pts= s->cur_frame_pts[i];

@ -367,7 +367,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
int pass; int pass;
for(pass = 0; pass < NB_PASSES; pass++) { for(pass = 0; pass < NB_PASSES; pass++) {
/* NOTE: a pass is completely omited if no pixels would be /* NOTE: a pass is completely omitted if no pixels would be
output */ output */
pass_row_size = ff_png_pass_row_size(pass, bits_per_pixel, avctx->width); pass_row_size = ff_png_pass_row_size(pass, bits_per_pixel, avctx->width);
if (pass_row_size > 0) { if (pass_row_size > 0) {

@ -799,7 +799,7 @@ static int init_pass2(MpegEncContext *s)
AVCodecContext *a= s->avctx; AVCodecContext *a= s->avctx;
int i, toobig; int i, toobig;
double fps= 1/av_q2d(s->avctx->time_base); double fps= 1/av_q2d(s->avctx->time_base);
double complexity[5]={0,0,0,0,0}; // aproximate bits at quant=1 double complexity[5]={0,0,0,0,0}; // approximate bits at quant=1
uint64_t const_bits[5]={0,0,0,0,0}; // quantizer independent bits uint64_t const_bits[5]={0,0,0,0,0}; // quantizer independent bits
uint64_t all_const_bits; uint64_t all_const_bits;
uint64_t all_available_bits= (uint64_t)(s->bit_rate*(double)rcc->num_entries/fps); uint64_t all_available_bits= (uint64_t)(s->bit_rate*(double)rcc->num_entries/fps);

@ -350,7 +350,7 @@ int audio_resample(ReSampleContext *s, short *output, short *input, int nb_sampl
if (av_audio_convert(s->convert_ctx[1], obuf, ostride, if (av_audio_convert(s->convert_ctx[1], obuf, ostride,
ibuf, istride, nb_samples1 * s->output_channels) < 0) { ibuf, istride, nb_samples1 * s->output_channels) < 0) {
av_log(s->resample_context, AV_LOG_ERROR, av_log(s->resample_context, AV_LOG_ERROR,
"Audio sample format convertion failed\n"); "Audio sample format conversion failed\n");
return 0; return 0;
} }
} }

@ -706,7 +706,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
*got_frame = 1; *got_frame = 1;
ff_print_debug_info(s, pict); ff_print_debug_info(s, pict);
} }
s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...) s->current_picture_ptr= NULL; // so we can detect if frame_end was not called (find some nicer solution...)
} }
return avpkt->size; return avpkt->size;

@ -528,7 +528,8 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
/* get Rice code for residual decoding */ /* get Rice code for residual decoding */
if (cmd != FN_ZERO) { if (cmd != FN_ZERO) {
residual_size = get_ur_golomb_shorten(&s->gb, ENERGYSIZE); residual_size = get_ur_golomb_shorten(&s->gb, ENERGYSIZE);
/* this is a hack as version 0 differed in defintion of get_sr_golomb_shorten */ /* This is a hack as version 0 differed in the definition
* of get_sr_golomb_shorten(). */
if (s->version == 0) if (s->version == 0)
residual_size--; residual_size--;
} }

@ -43,7 +43,7 @@ void ff_thread_flush(AVCodecContext *avctx);
* Returns the next available frame in picture. *got_picture_ptr * Returns the next available frame in picture. *got_picture_ptr
* will be 0 if none is available. * will be 0 if none is available.
* The return value on success is the size of the consumed packet for * The return value on success is the size of the consumed packet for
* compatiblity with avcodec_decode_video2(). This means the decoder * compatibility with avcodec_decode_video2(). This means the decoder
* has to consume the full packet. * has to consume the full packet.
* *
* Parameters are the same as avcodec_decode_video2(). * Parameters are the same as avcodec_decode_video2().

@ -281,7 +281,7 @@ int ff_vda_create_decoder(struct vda_context *vda_ctx,
#endif #endif
/* Each VCL NAL in the bistream sent to the decoder /* Each VCL NAL in the bistream sent to the decoder
* is preceeded by a 4 bytes length header. * is preceded by a 4 bytes length header.
* Change the avcC atom header if needed, to signal headers of 4 bytes. */ * Change the avcC atom header if needed, to signal headers of 4 bytes. */
if (extradata_size >= 4 && (extradata[4] & 0x03) != 0x03) { if (extradata_size >= 4 && (extradata[4] & 0x03) != 0x03) {
uint8_t *rw_extradata; uint8_t *rw_extradata;

@ -1233,7 +1233,7 @@ static int vorbis_floor1_decode(vorbis_context *vc,
if (highroom < lowroom) { if (highroom < lowroom) {
room = highroom * 2; room = highroom * 2;
} else { } else {
room = lowroom * 2; // SPEC mispelling room = lowroom * 2; // SPEC misspelling
} }
if (val) { if (val) {
floor1_flag[low_neigh_offs] = 1; floor1_flag[low_neigh_offs] = 1;

@ -73,7 +73,7 @@ typedef struct VP8DSPContext {
* second dimension: 0 if no vertical interpolation is needed; * second dimension: 0 if no vertical interpolation is needed;
* 1 4-tap vertical interpolation filter (my & 1) * 1 4-tap vertical interpolation filter (my & 1)
* 2 6-tap vertical interpolation filter (!(my & 1)) * 2 6-tap vertical interpolation filter (!(my & 1))
* third dimension: same as second dimention, for horizontal interpolation * third dimension: same as second dimension, for horizontal interpolation
* so something like put_vp8_epel_pixels_tab[width>>3][2*!!my-(my&1)][2*!!mx-(mx&1)](..., mx, my) * so something like put_vp8_epel_pixels_tab[width>>3][2*!!my-(my&1)][2*!!mx-(mx&1)](..., mx, my)
*/ */
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]; vp8_mc_func put_vp8_epel_pixels_tab[3][3][3];

@ -533,7 +533,7 @@ static int decode_tilehdr(WMAProDecodeCtx *s)
int c; int c;
/* Should never consume more than 3073 bits (256 iterations for the /* Should never consume more than 3073 bits (256 iterations for the
* while loop when always the minimum amount of 128 samples is substracted * while loop when always the minimum amount of 128 samples is subtracted
* from missing samples in the 8 channel case). * from missing samples in the 8 channel case).
* 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4) * 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4)
*/ */
@ -1089,7 +1089,7 @@ static int decode_subframe(WMAProDecodeCtx *s)
s->channels_for_cur_subframe = 0; s->channels_for_cur_subframe = 0;
for (i = 0; i < s->avctx->channels; i++) { for (i = 0; i < s->avctx->channels; i++) {
const int cur_subframe = s->channel[i].cur_subframe; const int cur_subframe = s->channel[i].cur_subframe;
/** substract already processed samples */ /** subtract already processed samples */
total_samples -= s->channel[i].decoded_samples; total_samples -= s->channel[i].decoded_samples;
/** and count if there are multiple subframes that match our profile */ /** and count if there are multiple subframes that match our profile */

@ -186,7 +186,7 @@
where copy_DV_frame() reads or writes on the dv1394 file descriptor where copy_DV_frame() reads or writes on the dv1394 file descriptor
(read/write mode) or copies data to/from the mmap ringbuffer and (read/write mode) or copies data to/from the mmap ringbuffer and
then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
frames are availble (mmap mode). frames are available (mmap mode).
reset_dv1394() is called in the event of a buffer reset_dv1394() is called in the event of a buffer
underflow/overflow or a halt in the DV stream (e.g. due to a 1394 underflow/overflow or a halt in the DV stream (e.g. due to a 1394

@ -1532,7 +1532,7 @@ enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
* @ingroup libavf * @ingroup libavf
* @{ * @{
* *
* Miscelaneous utility functions related to both muxing and demuxing * Miscellaneous utility functions related to both muxing and demuxing
* (or neither). * (or neither).
*/ */

@ -368,7 +368,7 @@ static void fill_buffer(AVIOContext *s)
int max_buffer_size = s->max_packet_size ? int max_buffer_size = s->max_packet_size ?
s->max_packet_size : IO_BUFFER_SIZE; s->max_packet_size : IO_BUFFER_SIZE;
/* can't fill the buffer without read_packet, just set EOF if appropiate */ /* can't fill the buffer without read_packet, just set EOF if appropriate */
if (!s->read_packet && s->buf_ptr >= s->buf_end) if (!s->read_packet && s->buf_ptr >= s->buf_end)
s->eof_reached = 1; s->eof_reached = 1;

@ -47,9 +47,9 @@ struct DVMuxContext {
AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */ AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */
int frames; /* current frame number */ int frames; /* current frame number */
int64_t start_time; /* recording start time */ int64_t start_time; /* recording start time */
int has_audio; /* frame under contruction has audio */ int has_audio; /* frame under construction has audio */
int has_video; /* frame under contruction has video */ int has_video; /* frame under construction has video */
uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under contruction */ uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under construction */
}; };
static const int dv_aaux_packs_dist[12][9] = { static const int dv_aaux_packs_dist[12][9] = {

@ -42,7 +42,7 @@
* An apple http stream consists of a playlist with media segment files, * An apple http stream consists of a playlist with media segment files,
* played sequentially. There may be several playlists with the same * played sequentially. There may be several playlists with the same
* video content, in different bandwidth variants, that are played in * video content, in different bandwidth variants, that are played in
* parallel (preferrably only one bandwidth variant at a time). In this case, * parallel (preferably only one bandwidth variant at a time). In this case,
* the user supplied the url to a main playlist that only lists the variant * the user supplied the url to a main playlist that only lists the variant
* playlists. * playlists.
* *

@ -36,7 +36,7 @@
* An apple http stream consists of a playlist with media segment files, * An apple http stream consists of a playlist with media segment files,
* played sequentially. There may be several playlists with the same * played sequentially. There may be several playlists with the same
* video content, in different bandwidth variants, that are played in * video content, in different bandwidth variants, that are played in
* parallel (preferrably only one bandwidth variant at a time). In this case, * parallel (preferably only one bandwidth variant at a time). In this case,
* the user supplied the url to a main playlist that only lists the variant * the user supplied the url to a main playlist that only lists the variant
* playlists. * playlists.
* *

@ -40,7 +40,7 @@ void ff_http_init_auth_state(URLContext *dest, const URLContext *src);
* *
* @param h pointer to the ressource * @param h pointer to the ressource
* @param uri uri used to perform the request * @param uri uri used to perform the request
* @return a negative value if an error condition occured, 0 * @return a negative value if an error condition occurred, 0
* otherwise * otherwise
*/ */
int ff_http_do_new_request(URLContext *h, const char *uri); int ff_http_do_new_request(URLContext *h, const char *uri);

@ -370,7 +370,7 @@ static int jpeg_parse_packet(AVFormatContext *ctx, PayloadContext *jpeg,
/* Prepare the JPEG packet. */ /* Prepare the JPEG packet. */
if ((ret = ff_rtp_finalize_packet(pkt, &jpeg->frame, st->index)) < 0) { if ((ret = ff_rtp_finalize_packet(pkt, &jpeg->frame, st->index)) < 0) {
av_log(ctx, AV_LOG_ERROR, av_log(ctx, AV_LOG_ERROR,
"Error occured when getting frame buffer.\n"); "Error occurred when getting frame buffer.\n");
return ret; return ret;
} }

@ -51,7 +51,7 @@ typedef struct {
char dirname[1024]; char dirname[1024];
uint8_t iobuf[32768]; uint8_t iobuf[32768];
URLContext *out; // Current output stream where all output is written URLContext *out; // Current output stream where all output is written
URLContext *out2; // Auxillary output stream where all output also is written URLContext *out2; // Auxiliary output stream where all output is also written
URLContext *tail_out; // The actual main output stream, if we're currently seeked back to write elsewhere URLContext *tail_out; // The actual main output stream, if we're currently seeked back to write elsewhere
int64_t tail_pos, cur_pos, cur_start_pos; int64_t tail_pos, cur_pos, cur_start_pos;
int packets_written; int packets_written;

@ -339,7 +339,7 @@ static int spdif_header_mpeg(AVFormatContext *s, AVPacket *pkt)
ctx->data_type = mpeg_data_type [version & 1][layer]; ctx->data_type = mpeg_data_type [version & 1][layer];
ctx->pkt_offset = spdif_mpeg_pkt_offset[version & 1][layer]; ctx->pkt_offset = spdif_mpeg_pkt_offset[version & 1][layer];
} }
// TODO Data type dependant info (normal/karaoke, dynamic range control) // TODO Data type dependent info (normal/karaoke, dynamic range control)
return 0; return 0;
} }

@ -221,7 +221,7 @@ static AVIOContext * wtvfile_open_sector(int first_sector, uint64_t length, int
} }
wf->length = length; wf->length = length;
/* seek to intial sector */ /* seek to initial sector */
wf->position = 0; wf->position = 0;
if (avio_seek(s->pb, (int64_t)wf->sectors[0] << WTV_SECTOR_BITS, SEEK_SET) < 0) { if (avio_seek(s->pb, (int64_t)wf->sectors[0] << WTV_SECTOR_BITS, SEEK_SET) < 0) {
av_free(wf->sectors); av_free(wf->sectors);

@ -298,7 +298,7 @@ static int xmv_process_packet_header(AVFormatContext *s)
* short for every audio track. But as playing around with XMV files with * short for every audio track. But as playing around with XMV files with
* ADPCM audio showed, taking the extra 4 bytes from the audio data gives * ADPCM audio showed, taking the extra 4 bytes from the audio data gives
* you either completely distorted audio or click (when skipping the * you either completely distorted audio or click (when skipping the
* remaining 68 bytes of the ADPCM block). Substracting 4 bytes for every * remaining 68 bytes of the ADPCM block). Subtracting 4 bytes for every
* audio track from the video data works at least for the audio. Probably * audio track from the video data works at least for the audio. Probably
* some alignment thing? * some alignment thing?
* The video data has (always?) lots of padding, so it should work out... * The video data has (always?) lots of padding, so it should work out...

@ -100,7 +100,7 @@ static void audiogen(AVLFG *rnd, void **data, enum AVSampleFormat sample_fmt,
a += M_PI * 1000.0 * 2.0 / sample_rate; a += M_PI * 1000.0 * 2.0 / sample_rate;
} }
/* 1 second of varing frequency between 100 and 10000 Hz */ /* 1 second of varying frequency between 100 and 10000 Hz */
a = 0; a = 0;
for (i = 0; i < 1 * sample_rate && k < nb_samples; i++, k++) { for (i = 0; i < 1 * sample_rate && k < nb_samples; i++, k++) {
v = sin(a) * 0.30; v = sin(a) * 0.30;

@ -1,5 +1,5 @@
/* /*
* AltiVec-enhanced yuv-to-yuv convertion routines. * AltiVec-enhanced yuv-to-yuv conversion routines.
* *
* Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org> * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
* based on the equivalent C code in swscale.c * based on the equivalent C code in swscale.c

@ -163,7 +163,7 @@ static void hScale8To19_c(SwsContext *c, int16_t *_dst, int dstW,
} }
} }
// FIXME all pal and rgb srcFormats could do this convertion as well // FIXME all pal and rgb srcFormats could do this conversion as well
// FIXME all scalers more complex than bilinear could do half of this transform // FIXME all scalers more complex than bilinear could do half of this transform
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width) static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width)
{ {

@ -189,7 +189,7 @@ int main(int argc, char **argv)
a += (1000 * FRAC_ONE) / sample_rate; a += (1000 * FRAC_ONE) / sample_rate;
} }
/* 1 second of varing frequency between 100 and 10000 Hz */ /* 1 second of varying frequency between 100 and 10000 Hz */
a = 0; a = 0;
for (i = 0; i < 1 * sample_rate; i++) { for (i = 0; i < 1 * sample_rate; i++) {
v = (int_cos(a) * 10000) >> FRAC_BITS; v = (int_cos(a) * 10000) >> FRAC_BITS;

@ -19,7 +19,7 @@ echo This tool is intended to help a human check/review patches it is very far f
echo being free of false positives and negatives, its output are just hints of what echo being free of false positives and negatives, its output are just hints of what
echo may or may not be bad. When you use it and it misses something or detects echo may or may not be bad. When you use it and it misses something or detects
echo something wrong, fix it and send a patch to the libav-devel mailing list. echo something wrong, fix it and send a patch to the libav-devel mailing list.
echo License:GPL Autor: Michael Niedermayer echo License:GPL Author: Michael Niedermayer
ERE_PRITYP='(unsigned *|)(char|short|long|int|long *int|short *int|void|float|double|(u|)int(8|16|32|64)_t)' ERE_PRITYP='(unsigned *|)(char|short|long|int|long *int|short *int|void|float|double|(u|)int(8|16|32|64)_t)'
ERE_TYPES='(const|static|av_cold|inline| *)*('$ERE_PRITYP'|[a-zA-Z][a-zA-Z0-9_]*)[* ]{1,}[a-zA-Z][a-zA-Z0-9_]*' ERE_TYPES='(const|static|av_cold|inline| *)*('$ERE_PRITYP'|[a-zA-Z][a-zA-Z0-9_]*)[* ]{1,}[a-zA-Z][a-zA-Z0-9_]*'
@ -158,7 +158,7 @@ cat $* | tr '\n' '@' | $EGREP --color=always -o '[^a-zA-Z0-9_]([a-zA-Z0-9_]*) *=
cat $TMP | tr '@' '\n' cat $TMP | tr '@' '\n'
# doesnt work # does not work
#cat $* | tr '\n' '@' | $EGREP -o '[^a-zA-Z_0-9]([a-zA-Z][a-zA-Z_0-9]*) *=[^=].*\1' | $EGREP -o '[^a-zA-Z_0-9]([a-zA-Z][a-zA-Z_0-9]*) *=[^=].*\1 *=[^=]' >$TMP && printf "\nPossibly written 2x before read\n" #cat $* | tr '\n' '@' | $EGREP -o '[^a-zA-Z_0-9]([a-zA-Z][a-zA-Z_0-9]*) *=[^=].*\1' | $EGREP -o '[^a-zA-Z_0-9]([a-zA-Z][a-zA-Z_0-9]*) *=[^=].*\1 *=[^=]' >$TMP && printf "\nPossibly written 2x before read\n"
#cat $TMP | tr '@' '\n' #cat $TMP | tr '@' '\n'

Loading…
Cancel
Save