@ -34,11 +34,11 @@
static av_always_inline av_const int FASTDIV ( int a , int b )
{
int r , t ;
__asm__ volatile ( " cmp %3, #2 \n \t "
" ldr %1, [%4, %3, lsl #2] \n \t "
" lsrle %0, %2, #1 \n \t "
" smmulgt %0, %1, %2 \n \t "
: " =&r " ( r ) , " =&r " ( t ) : " r " ( a ) , " r " ( b ) , " r " ( ff_inverse ) ) ;
__asm__ ( " cmp %3, #2 \n \t "
" ldr %1, [%4, %3, lsl #2] \n \t "
" lsrle %0, %2, #1 \n \t "
" smmulgt %0, %1, %2 \n \t "
: " =&r " ( r ) , " =&r " ( t ) : " r " ( a ) , " r " ( b ) , " r " ( ff_inverse ) ) ;
return r ;
}
@ -46,7 +46,7 @@ static av_always_inline av_const int FASTDIV(int a, int b)
static av_always_inline av_const uint8_t av_clip_uint8_arm ( int a )
{
unsigned x ;
__asm__ volatile ( " usat %0, #8, %1 " : " =r " ( x ) : " r " ( a ) ) ;
__asm__ ( " usat %0, #8, %1 " : " =r " ( x ) : " r " ( a ) ) ;
return x ;
}
@ -54,7 +54,7 @@ static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
static av_always_inline av_const uint8_t av_clip_int8_arm ( int a )
{
unsigned x ;
__asm__ volatile ( " ssat %0, #8, %1 " : " =r " ( x ) : " r " ( a ) ) ;
__asm__ ( " ssat %0, #8, %1 " : " =r " ( x ) : " r " ( a ) ) ;
return x ;
}
@ -62,7 +62,7 @@ static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
static av_always_inline av_const uint16_t av_clip_uint16_arm ( int a )
{
unsigned x ;
__asm__ volatile ( " usat %0, #16, %1 " : " =r " ( x ) : " r " ( a ) ) ;
__asm__ ( " usat %0, #16, %1 " : " =r " ( x ) : " r " ( a ) ) ;
return x ;
}
@ -70,7 +70,7 @@ static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
static av_always_inline av_const int16_t av_clip_int16_arm ( int a )
{
int x ;
__asm__ volatile ( " ssat %0, #16, %1 " : " =r " ( x ) : " r " ( a ) ) ;
__asm__ ( " ssat %0, #16, %1 " : " =r " ( x ) : " r " ( a ) ) ;
return x ;
}
@ -80,8 +80,8 @@ static av_always_inline av_const int16_t av_clip_int16_arm(int a)
static av_always_inline av_const int FASTDIV ( int a , int b )
{
int r , t ;
__asm__ volatile ( " umull %1, %0, %2, %3 "
: " =&r " ( r ) , " =&r " ( t ) : " r " ( a ) , " r " ( ff_inverse [ b ] ) ) ;
__asm__ ( " umull %1, %0, %2, %3 "
: " =&r " ( r ) , " =&r " ( t ) : " r " ( a ) , " r " ( ff_inverse [ b ] ) ) ;
return r ;
}
@ -91,11 +91,11 @@ static av_always_inline av_const int FASTDIV(int a, int b)
static av_always_inline av_const int32_t av_clipl_int32_arm ( int64_t a )
{
int x , y ;
__asm__ volatile ( " adds %1, %R2, %Q2, lsr #31 \n \t "
" mvnne %1, #1<<31 \n \t "
" moveq %0, %Q2 \n \t "
" eorne %0, %1, %R2, asr #31 \n \t "
: " =r " ( x ) , " =&r " ( y ) : " r " ( a ) ) ;
__asm__ ( " adds %1, %R2, %Q2, lsr #31 \n \t "
" mvnne %1, #1<<31 \n \t "
" moveq %0, %Q2 \n \t "
" eorne %0, %1, %R2, asr #31 \n \t "
: " =r " ( x ) , " =&r " ( y ) : " r " ( a ) ) ;
return x ;
}