|
|
|
/*
|
|
|
|
* simple math operations
|
|
|
|
* Copyright (c) 2001, 2002 Fabrice Bellard
|
|
|
|
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
|
|
|
|
*
|
|
|
|
* This file is part of FFmpeg.
|
|
|
|
*
|
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
#ifndef AVCODEC_MATHOPS_H
|
|
|
|
#define AVCODEC_MATHOPS_H
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
|
|
#include "libavutil/common.h"
|
|
|
|
#include "libavutil/reverse.h"
|
|
|
|
#include "config.h"
|
|
|
|
|
|
|
|
#define MAX_NEG_CROP 1024
|
|
|
|
|
|
|
|
extern const uint32_t ff_inverse[257];
|
|
|
|
extern const uint8_t ff_sqrt_tab[256];
|
|
|
|
extern const uint8_t ff_crop_tab[256 + 2 * MAX_NEG_CROP];
|
|
|
|
extern const uint8_t ff_zigzag_direct[64];
|
|
|
|
extern const uint8_t ff_zigzag_scan[16+1];
|
|
|
|
|
|
|
|
#if ARCH_ARM
|
|
|
|
# include "arm/mathops.h"
|
|
|
|
#elif ARCH_AVR32
|
|
|
|
# include "avr32/mathops.h"
|
|
|
|
#elif ARCH_MIPS
|
|
|
|
# include "mips/mathops.h"
|
|
|
|
#elif ARCH_PPC
|
|
|
|
# include "ppc/mathops.h"
|
|
|
|
#elif ARCH_X86
|
|
|
|
# include "x86/mathops.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* generic implementation */
|
|
|
|
|
|
|
|
#ifndef MUL64
|
|
|
|
# define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MULL
|
|
|
|
# define MULL(a,b,s) (MUL64(a, b) >> (s))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MULH
|
|
|
|
static av_always_inline int MULH(int a, int b){
|
|
|
|
return MUL64(a, b) >> 32;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef UMULH
|
|
|
|
static av_always_inline unsigned UMULH(unsigned a, unsigned b){
|
|
|
|
return ((uint64_t)(a) * (uint64_t)(b))>>32;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAC64
|
|
|
|
# define MAC64(d, a, b) ((d) += MUL64(a, b))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MLS64
|
|
|
|
# define MLS64(d, a, b) ((d) -= MUL64(a, b))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* signed 16x16 -> 32 multiply add accumulate */
|
|
|
|
#ifndef MAC16
|
|
|
|
# define MAC16(rt, ra, rb) rt += (ra) * (rb)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* signed 16x16 -> 32 multiply */
|
|
|
|
#ifndef MUL16
|
|
|
|
# define MUL16(ra, rb) ((ra) * (rb))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MLS16
|
|
|
|
# define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* median of 3 */
|
|
|
|
#ifndef mid_pred
|
|
|
|
#define mid_pred mid_pred
|
|
|
|
static inline av_const int mid_pred(int a, int b, int c)
|
|
|
|
{
|
|
|
|
#if 0
|
|
|
|
int t= (a-b)&((a-b)>>31);
|
|
|
|
a-=t;
|
|
|
|
b+=t;
|
|
|
|
b-= (b-c)&((b-c)>>31);
|
|
|
|
b+= (a-b)&((a-b)>>31);
|
|
|
|
|
|
|
|
return b;
|
|
|
|
#else
|
|
|
|
if(a>b){
|
|
|
|
if(c>b){
|
|
|
|
if(c>a) b=a;
|
|
|
|
else b=c;
|
|
|
|
}
|
|
|
|
}else{
|
|
|
|
if(b>c){
|
|
|
|
if(c>a) b=c;
|
|
|
|
else b=a;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return b;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef median4
|
|
|
|
#define median4 median4
|
|
|
|
static inline av_const int median4(int a, int b, int c, int d)
|
|
|
|
{
|
|
|
|
if (a < b) {
|
|
|
|
if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
|
|
|
|
else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
|
|
|
|
} else {
|
|
|
|
if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
|
|
|
|
else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef sign_extend
|
|
|
|
static inline av_const int sign_extend(int val, unsigned bits)
|
|
|
|
{
|
|
|
|
unsigned shift = 8 * sizeof(int) - bits;
|
|
|
|
union { unsigned u; int s; } v = { (unsigned) val << shift };
|
|
|
|
return v.s >> shift;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef zero_extend
|
|
|
|
static inline av_const unsigned zero_extend(unsigned val, unsigned bits)
|
|
|
|
{
|
|
|
|
return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef COPY3_IF_LT
|
|
|
|
#define COPY3_IF_LT(x, y, a, b, c, d)\
|
|
|
|
if ((y) < (x)) {\
|
|
|
|
(x) = (y);\
|
|
|
|
(a) = (b);\
|
|
|
|
(c) = (d);\
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MASK_ABS
|
|
|
|
#define MASK_ABS(mask, level) do { \
|
|
|
|
mask = level >> 31; \
|
|
|
|
level = (level ^ mask) - mask; \
|
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef NEG_SSR32
|
|
|
|
# define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s)))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef NEG_USR32
|
|
|
|
# define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s)))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if HAVE_BIGENDIAN
|
|
|
|
# ifndef PACK_2U8
|
|
|
|
# define PACK_2U8(a,b) (((a) << 8) | (b))
|
|
|
|
# endif
|
|
|
|
# ifndef PACK_4U8
|
|
|
|
# define PACK_4U8(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
|
|
|
# endif
|
|
|
|
# ifndef PACK_2U16
|
|
|
|
# define PACK_2U16(a,b) (((a) << 16) | (b))
|
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
# ifndef PACK_2U8
|
|
|
|
# define PACK_2U8(a,b) (((b) << 8) | (a))
|
|
|
|
# endif
|
|
|
|
# ifndef PACK_4U2
|
|
|
|
# define PACK_4U8(a,b,c,d) (((d) << 24) | ((c) << 16) | ((b) << 8) | (a))
|
|
|
|
# endif
|
|
|
|
# ifndef PACK_2U16
|
|
|
|
# define PACK_2U16(a,b) (((b) << 16) | (a))
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef PACK_2S8
|
|
|
|
# define PACK_2S8(a,b) PACK_2U8((a)&255, (b)&255)
|
|
|
|
#endif
|
|
|
|
#ifndef PACK_4S8
|
|
|
|
# define PACK_4S8(a,b,c,d) PACK_4U8((a)&255, (b)&255, (c)&255, (d)&255)
|
|
|
|
#endif
|
|
|
|
#ifndef PACK_2S16
|
|
|
|
# define PACK_2S16(a,b) PACK_2U16((a)&0xffff, (b)&0xffff)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef FASTDIV
|
|
|
|
# define FASTDIV(a,b) ((uint32_t)((((uint64_t)a) * ff_inverse[b]) >> 32))
|
|
|
|
#endif /* FASTDIV */
|
|
|
|
|
|
|
|
#ifndef ff_sqrt
|
|
|
|
#define ff_sqrt ff_sqrt
|
|
|
|
static inline av_const unsigned int ff_sqrt(unsigned int a)
|
|
|
|
{
|
|
|
|
unsigned int b;
|
|
|
|
|
|
|
|
if (a < 255) return (ff_sqrt_tab[a + 1] - 1) >> 4;
|
|
|
|
else if (a < (1 << 12)) b = ff_sqrt_tab[a >> 4] >> 2;
|
|
|
|
#if !CONFIG_SMALL
|
|
|
|
else if (a < (1 << 14)) b = ff_sqrt_tab[a >> 6] >> 1;
|
|
|
|
else if (a < (1 << 16)) b = ff_sqrt_tab[a >> 8] ;
|
|
|
|
#endif
|
|
|
|
else {
|
|
|
|
int s = av_log2_16bit(a >> 16) >> 1;
|
|
|
|
unsigned int c = a >> (s + 2);
|
|
|
|
b = ff_sqrt_tab[c >> (s + 8)];
|
|
|
|
b = FASTDIV(c,b) + (b << s);
|
|
|
|
}
|
|
|
|
|
|
|
|
return b - (a < b * b);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
AAC encoder: Extensive improvements
This finalizes merging of the work in the patches in ticket #2686.
Improvements to twoloop and RC logic are extensive.
The non-exhaustive list of twoloop improvments includes:
- Tweaks to distortion limits on the RD optimization phase of twoloop
- Deeper search in twoloop
- PNS information marking to let twoloop decide when to use it
(turned out having the decision made separately wasn't working)
- Tonal band detection and priorization
- Better band energy conservation rules
- Strict hole avoidance
For rate control:
- Use psymodel's bit allocation to allow proper use of the bit
reservoir. Don't work against the bit reservoir by moving lambda
in the opposite direction when psymodel decides to allocate more/less
bits to a frame.
- Retry the encode if the effective rate lies outside a reasonable
margin of psymodel's allocation or the selected ABR.
- Log average lambda at the end. Useful info for everyone, but especially
for tuning of the various encoder constants that relate to lambda
feedback.
Psy:
- Do not apply lowpass with a FIR filter, instead just let the coder
zero bands above the cutoff. The FIR filter induces group delay,
and while zeroing bands causes ripple, it's lost in the quantization
noise.
- Experimental VBR bit allocation code
- Tweak automatic lowpass filter threshold to maximize audio bandwidth
at all bitrates while still providing acceptable, stable quality.
I/S:
- Phase decision fixes. Unrelated to #2686, but the bugs only surfaced
when the merge was finalized. Measure I/S band energy accounting for
phase, and prevent I/S and M/S from being applied both.
PNS:
- Avoid marking short bands with PNS when they're part of a window
group in which there's a large variation of energy from one window
to the next. PNS can't preserve those and the effect is extremely
noticeable.
M/S:
- Implement BMLD protection similar to the specified in
ISO-IEC/13818:7-2003, Appendix C Section 6.1. Since M/S decision
doesn't conform to section 6.1, a different method had to be
implemented, but should provide equivalent protection.
- Move the decision logic closer to the method specified in
ISO-IEC/13818:7-2003, Appendix C Section 6.1. Specifically,
make sure M/S needs less bits than dual stereo.
- Don't apply M/S in bands that are using I/S
Now, this of course needed adjustments in the compare targets and
fuzz factors of the AAC encoder's fate tests, but if wondering why
the targets go up (more distortion), consider the previous coder
was using too many bits on LF content (far more than required by
psy), and thus those signals will now be more distorted, not less.
The extra distortion isn't audible though, I carried extensive
ABX testing to make sure.
A very similar patch was also extensively tested by Kamendo2 in
the context of #2686.
9 years ago
|
|
|
static inline av_const float ff_sqrf(float a)
|
|
|
|
{
|
|
|
|
return a*a;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int8_t ff_u8_to_s8(uint8_t a)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
uint8_t u8;
|
|
|
|
int8_t s8;
|
|
|
|
} b;
|
|
|
|
b.u8 = a;
|
|
|
|
return b.s8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static av_always_inline uint32_t bitswap_32(uint32_t x)
|
|
|
|
{
|
|
|
|
return (uint32_t)ff_reverse[ x & 0xFF] << 24 |
|
|
|
|
(uint32_t)ff_reverse[(x >> 8) & 0xFF] << 16 |
|
|
|
|
(uint32_t)ff_reverse[(x >> 16) & 0xFF] << 8 |
|
|
|
|
(uint32_t)ff_reverse[ x >> 24];
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* AVCODEC_MATHOPS_H */
|