diff --git a/libavcodec/arm/mathops.h b/libavcodec/arm/mathops.h index 858e73d677..3a7a1f3ee9 100644 --- a/libavcodec/arm/mathops.h +++ b/libavcodec/arm/mathops.h @@ -97,7 +97,7 @@ static inline av_const int MUL16(int ra, int rb) static inline av_const int mid_pred(int a, int b, int c) { int m; - __asm__ volatile ( + __asm__ ( "mov %0, %2 \n\t" "cmp %1, %2 \n\t" "movgt %0, %1 \n\t" diff --git a/libavcodec/arm/vp56_arith.h b/libavcodec/arm/vp56_arith.h index 8785a77860..9ce3fd0d91 100644 --- a/libavcodec/arm/vp56_arith.h +++ b/libavcodec/arm/vp56_arith.h @@ -31,25 +31,25 @@ static inline int vp56_rac_get_prob_armv6(VP56RangeCoder *c, int pr) unsigned high = c->high << shift; unsigned bit; - __asm__ volatile ("adds %3, %3, %0 \n" - "cmpcs %7, %4 \n" - "ldrcsh %2, [%4], #2 \n" - "rsb %0, %6, #256 \n" - "smlabb %0, %5, %6, %0 \n" - "rev16cs %2, %2 \n" - "orrcs %1, %1, %2, lsl %3 \n" - "subcs %3, %3, #16 \n" - "lsr %0, %0, #8 \n" - "cmp %1, %0, lsl #16 \n" - "subge %1, %1, %0, lsl #16 \n" - "subge %0, %5, %0 \n" - "movge %2, #1 \n" - "movlt %2, #0 \n" - : "=&r"(c->high), "=&r"(c->code_word), "=&r"(bit), - "+&r"(c->bits), "+&r"(c->buffer) - : "r"(high), "r"(pr), "r"(c->end - 1), - "0"(shift), "1"(code_word) - : "cc"); + __asm__ ("adds %3, %3, %0 \n" + "cmpcs %7, %4 \n" + "ldrcsh %2, [%4], #2 \n" + "rsb %0, %6, #256 \n" + "smlabb %0, %5, %6, %0 \n" + "rev16cs %2, %2 \n" + "orrcs %1, %1, %2, lsl %3 \n" + "subcs %3, %3, #16 \n" + "lsr %0, %0, #8 \n" + "cmp %1, %0, lsl #16 \n" + "subge %1, %1, %0, lsl #16 \n" + "subge %0, %5, %0 \n" + "movge %2, #1 \n" + "movlt %2, #0 \n" + : "=&r"(c->high), "=&r"(c->code_word), "=&r"(bit), + "+&r"(c->bits), "+&r"(c->buffer) + : "r"(high), "r"(pr), "r"(c->end - 1), + "0"(shift), "1"(code_word) + : "cc"); return bit; } @@ -63,20 +63,20 @@ static inline int vp56_rac_get_prob_branchy_armv6(VP56RangeCoder *c, int pr) unsigned low; unsigned tmp; - __asm__ volatile ("adds %3, %3, %0 \n" - "cmpcs %7, %4 \n" - "ldrcsh %2, [%4], #2 \n" - "rsb %0, %6, #256 \n" - "smlabb %0, %5, %6, %0 \n" - "rev16cs %2, %2 \n" - "orrcs %1, %1, %2, lsl %3 \n" - "subcs %3, %3, #16 \n" - "lsr %0, %0, #8 \n" - "lsl %2, %0, #16 \n" - : "=&r"(low), "+&r"(code_word), "=&r"(tmp), - "+&r"(c->bits), "+&r"(c->buffer) - : "r"(high), "r"(pr), "r"(c->end - 1), "0"(shift) - : "cc"); + __asm__ ("adds %3, %3, %0 \n" + "cmpcs %7, %4 \n" + "ldrcsh %2, [%4], #2 \n" + "rsb %0, %6, #256 \n" + "smlabb %0, %5, %6, %0 \n" + "rev16cs %2, %2 \n" + "orrcs %1, %1, %2, lsl %3 \n" + "subcs %3, %3, #16 \n" + "lsr %0, %0, #8 \n" + "lsl %2, %0, #16 \n" + : "=&r"(low), "+&r"(code_word), "=&r"(tmp), + "+&r"(c->bits), "+&r"(c->buffer) + : "r"(high), "r"(pr), "r"(c->end - 1), "0"(shift) + : "cc"); if (code_word >= tmp) { c->high = high - low;