From 42d32cf53cd0aa0da7cf7a89c8b46adaf761936c Mon Sep 17 00:00:00 2001
From: Janne Grunau <janne-libav@jannau.net>
Date: Sat, 24 Sep 2011 13:05:55 +0200
Subject: [PATCH 1/5] rv34: NEON optimised inverse transform functions

Signed-off-by: Mans Rullgard <mans@mansr.com>
---
 libavcodec/arm/Makefile            |   6 ++
 libavcodec/arm/rv34dsp_init_neon.c |  33 +++++++++
 libavcodec/arm/rv34dsp_neon.S      | 109 +++++++++++++++++++++++++++++
 libavcodec/rv34dsp.c               |   3 +
 libavcodec/rv34dsp.h               |   2 +
 5 files changed, 153 insertions(+)
 create mode 100644 libavcodec/arm/rv34dsp_init_neon.c
 create mode 100644 libavcodec/arm/rv34dsp_neon.S

diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
index 3374f0e2bd..9199faea3b 100644
--- a/libavcodec/arm/Makefile
+++ b/libavcodec/arm/Makefile
@@ -62,6 +62,12 @@ NEON-OBJS-$(CONFIG_AC3DSP)             += arm/ac3dsp_neon.o
 NEON-OBJS-$(CONFIG_DCA_DECODER)        += arm/dcadsp_neon.o             \
                                           arm/synth_filter_neon.o       \
 
+NEON-OBJS-$(CONFIG_RV30_DECODER)       += arm/rv34dsp_init_neon.o       \
+                                          arm/rv34dsp_neon.o            \
+
+NEON-OBJS-$(CONFIG_RV40_DECODER)       += arm/rv34dsp_init_neon.o       \
+                                          arm/rv34dsp_neon.o            \
+
 NEON-OBJS-$(CONFIG_VP3_DECODER)        += arm/vp3dsp_neon.o
 
 NEON-OBJS-$(CONFIG_VP5_DECODER)        += arm/vp56dsp_neon.o            \
diff --git a/libavcodec/arm/rv34dsp_init_neon.c b/libavcodec/arm/rv34dsp_init_neon.c
new file mode 100644
index 0000000000..9a09fde7a9
--- /dev/null
+++ b/libavcodec/arm/rv34dsp_init_neon.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavcodec/avcodec.h"
+#include "libavcodec/rv34dsp.h"
+
+void ff_rv34_inv_transform_neon(DCTELEM *block);
+void ff_rv34_inv_transform_noround_neon(DCTELEM *block);
+
+void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
+{
+    c->rv34_inv_transform_tab[0] = ff_rv34_inv_transform_neon;
+    c->rv34_inv_transform_tab[1] = ff_rv34_inv_transform_noround_neon;
+}
diff --git a/libavcodec/arm/rv34dsp_neon.S b/libavcodec/arm/rv34dsp_neon.S
new file mode 100644
index 0000000000..f700f5c321
--- /dev/null
+++ b/libavcodec/arm/rv34dsp_neon.S
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "asm.S"
+
+.macro rv34_inv_transform
+        mov             r1,  #16
+        vld1.16         {d28}, [r0,:64], r1     @ block[i+8*0]
+        vld1.16         {d29}, [r0,:64], r1     @ block[i+8*1]
+        vld1.16         {d30}, [r0,:64], r1     @ block[i+8*2]
+        vld1.16         {d31}, [r0,:64], r1     @ block[i+8*3]
+        vmov.s16        d0,  #13
+        vshll.s16       q12, d29, #3
+        vshll.s16       q13, d29, #4
+        vshll.s16       q9,  d31, #3
+        vshll.s16       q1,  d31, #4
+        vmull.s16       q10, d28, d0
+        vmlal.s16       q10, d30, d0
+        vmull.s16       q11, d28, d0
+        vmlsl.s16       q11, d30, d0
+        vsubw.s16       q12, q12, d29   @ z2 = block[i+8*1]*7
+        vaddw.s16       q13, q13, d29   @ z3 = block[i+8*1]*17
+        vsubw.s16       q9,  q9,  d31
+        vaddw.s16       q1,  q1,  d31
+        vadd.s32        q13, q13, q9    @ z3 = 17*block[i+8*1] +  7*block[i+8*3]
+        vsub.s32        q12, q12, q1    @ z2 = 7*block[i+8*1]  - 17*block[i+8*3]
+        vadd.s32        q1,  q10, q13   @ z0 + z3
+        vadd.s32        q2,  q11, q12   @ z1 + z2
+        vsub.s32        q8,  q10, q13   @ z0 - z3
+        vsub.s32        q3,  q11, q12   @ z1 - z2
+        vtrn.32         q1,  q2
+        vtrn.32         q3,  q8
+        vswp            d3,  d6
+        vswp            d5,  d16
+        vmov.s32        d0,  #13
+        vadd.s32        q10, q1,  q3
+        vsub.s32        q11, q1,  q3
+        vshl.s32        q12, q2,  #3
+        vshl.s32        q9,  q2,  #4
+        vmul.s32        q13, q11, d0[0]
+        vshl.s32        q11, q8,  #4
+        vadd.s32        q9,  q9,  q2
+        vshl.s32        q15, q8,  #3
+        vsub.s32        q12, q12, q2
+        vadd.s32        q11, q11, q8
+        vmul.s32        q14, q10, d0[0]
+        vsub.s32        q8,  q15, q8
+        vsub.s32        q12, q12, q11
+        vadd.s32        q9,  q9,  q8
+        vadd.s32        q2,  q13, q12   @ z1 + z2
+        vadd.s32        q1,  q14, q9    @ z0 + z3
+        vsub.s32        q3,  q13, q12   @ z1 - z2
+        vsub.s32        q15, q14, q9    @ z0 - z3
+.endm
+
+/* void ff_rv34_inv_transform_neon(DCTELEM *block); */
+function ff_rv34_inv_transform_neon, export=1
+        mov             r2,  r0
+        rv34_inv_transform
+        vrshrn.s32      d1,  q2,  #10   @ (z1 + z2) >> 10
+        vrshrn.s32      d0,  q1,  #10   @ (z0 + z3) >> 10
+        vrshrn.s32      d2,  q3,  #10   @ (z1 - z2) >> 10
+        vrshrn.s32      d3,  q15, #10   @ (z0 - z3) >> 10
+        vst4.16         {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1
+        vst4.16         {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1
+        vst4.16         {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1
+        vst4.16         {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1
+        bx              lr
+endfunc
+
+/* void rv34_inv_transform_noround_neon(DCTELEM *block); */
+function ff_rv34_inv_transform_noround_neon, export=1
+        mov             r2,  r0
+        rv34_inv_transform
+        vshl.s32        q11, q2,  #1
+        vshl.s32        q10, q1,  #1
+        vshl.s32        q12, q3,  #1
+        vshl.s32        q13, q15, #1
+        vadd.s32        q11, q11, q2
+        vadd.s32        q10, q10, q1
+        vadd.s32        q12, q12, q3
+        vadd.s32        q13, q13, q15
+        vshrn.s32       d0,  q10, #11   @ (z0 + z3)*3 >> 11
+        vshrn.s32       d1,  q11, #11   @ (z1 + z2)*3 >> 11
+        vshrn.s32       d2,  q12, #11   @ (z1 - z2)*3 >> 11
+        vshrn.s32       d3,  q13, #11   @ (z0 - z3)*3 >> 11
+        vst4.16         {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1
+        vst4.16         {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1
+        vst4.16         {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1
+        vst4.16         {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1
+        bx              lr
+endfunc
diff --git a/libavcodec/rv34dsp.c b/libavcodec/rv34dsp.c
index 59038a7a31..1f4cea8544 100644
--- a/libavcodec/rv34dsp.c
+++ b/libavcodec/rv34dsp.c
@@ -103,4 +103,7 @@ static void rv34_inv_transform_noround_c(DCTELEM *block){
 av_cold void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp) {
     c->rv34_inv_transform_tab[0] = rv34_inv_transform_c;
     c->rv34_inv_transform_tab[1] = rv34_inv_transform_noround_c;
+
+    if (HAVE_NEON)
+        ff_rv34dsp_init_neon(c, dsp);
 }
diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h
index 4ade05060f..a1636e6eb5 100644
--- a/libavcodec/rv34dsp.h
+++ b/libavcodec/rv34dsp.h
@@ -56,6 +56,8 @@ void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp);
 void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp);
 void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp);
 
+void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext *dsp);
+
 void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp);
 
 #endif /* AVCODEC_RV34DSP_H */

From f054a82727728e813861851648e109cd24574178 Mon Sep 17 00:00:00 2001
From: Mans Rullgard <mans@mansr.com>
Date: Tue, 6 Dec 2011 12:44:05 +0000
Subject: [PATCH 2/5] ARM: move NEON H264 chroma mc to a separate file

This allows sharing code with the rv40 version of these functions.

Signed-off-by: Mans Rullgard <mans@mansr.com>
---
 libavcodec/arm/Makefile       |   1 +
 libavcodec/arm/h264cmc_neon.S | 360 ++++++++++++++++++++++++++++++++++
 libavcodec/arm/h264dsp_neon.S | 339 --------------------------------
 3 files changed, 361 insertions(+), 339 deletions(-)
 create mode 100644 libavcodec/arm/h264cmc_neon.S

diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
index 9199faea3b..c125a59078 100644
--- a/libavcodec/arm/Makefile
+++ b/libavcodec/arm/Makefile
@@ -54,6 +54,7 @@ NEON-OBJS-$(CONFIG_RDFT)               += arm/rdft_neon.o               \
 
 NEON-OBJS-$(CONFIG_H264DSP)            += arm/h264dsp_neon.o            \
                                           arm/h264idct_neon.o           \
+                                          arm/h264cmc_neon.o            \
 
 NEON-OBJS-$(CONFIG_H264PRED)           += arm/h264pred_neon.o           \
 
diff --git a/libavcodec/arm/h264cmc_neon.S b/libavcodec/arm/h264cmc_neon.S
new file mode 100644
index 0000000000..e10adaca10
--- /dev/null
+++ b/libavcodec/arm/h264cmc_neon.S
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "asm.S"
+
+/* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
+.macro  h264_chroma_mc8 type
+function ff_\type\()_h264_chroma_mc8_neon, export=1
+        push            {r4-r7, lr}
+        ldrd            r4,  [sp, #20]
+  .ifc \type,avg
+        mov             lr,  r0
+  .endif
+        pld             [r1]
+        pld             [r1, r2]
+
+A       muls            r7,  r4,  r5
+T       mul             r7,  r4,  r5
+T       cmp             r7,  #0
+        rsb             r6,  r7,  r5,  lsl #3
+        rsb             r12, r7,  r4,  lsl #3
+        sub             r4,  r7,  r4,  lsl #3
+        sub             r4,  r4,  r5,  lsl #3
+        add             r4,  r4,  #64
+
+        beq             2f
+
+        add             r5,  r1,  r2
+
+        vdup.8          d0,  r4
+        lsl             r4,  r2,  #1
+        vdup.8          d1,  r12
+        vld1.8          {d4, d5}, [r1], r4
+        vdup.8          d2,  r6
+        vld1.8          {d6, d7}, [r5], r4
+        vdup.8          d3,  r7
+
+        vext.8          d5,  d4,  d5,  #1
+        vext.8          d7,  d6,  d7,  #1
+
+1:      pld             [r5]
+        vmull.u8        q8,  d4,  d0
+        vmlal.u8        q8,  d5,  d1
+        vld1.8          {d4, d5}, [r1], r4
+        vmlal.u8        q8,  d6,  d2
+        vext.8          d5,  d4,  d5,  #1
+        vmlal.u8        q8,  d7,  d3
+        vmull.u8        q9,  d6,  d0
+        subs            r3,  r3,  #2
+        vmlal.u8        q9,  d7,  d1
+        vmlal.u8        q9,  d4,  d2
+        vmlal.u8        q9,  d5,  d3
+        vrshrn.u16      d16, q8,  #6
+        vld1.8          {d6, d7}, [r5], r4
+        pld             [r1]
+        vrshrn.u16      d17, q9,  #6
+  .ifc \type,avg
+        vld1.8          {d20}, [lr,:64], r2
+        vld1.8          {d21}, [lr,:64], r2
+        vrhadd.u8       q8,  q8,  q10
+  .endif
+        vext.8          d7,  d6,  d7,  #1
+        vst1.8          {d16}, [r0,:64], r2
+        vst1.8          {d17}, [r0,:64], r2
+        bgt             1b
+
+        pop             {r4-r7, pc}
+
+2:      tst             r6,  r6
+        add             r12, r12, r6
+        vdup.8          d0,  r4
+        vdup.8          d1,  r12
+
+        beq             4f
+
+        add             r5,  r1,  r2
+        lsl             r4,  r2,  #1
+        vld1.8          {d4}, [r1], r4
+        vld1.8          {d6}, [r5], r4
+
+3:      pld             [r5]
+        vmull.u8        q8,  d4,  d0
+        vmlal.u8        q8,  d6,  d1
+        vld1.8          {d4}, [r1], r4
+        vmull.u8        q9,  d6,  d0
+        vmlal.u8        q9,  d4,  d1
+        vld1.8          {d6}, [r5], r4
+        vrshrn.u16      d16, q8,  #6
+        vrshrn.u16      d17, q9,  #6
+  .ifc \type,avg
+        vld1.8          {d20}, [lr,:64], r2
+        vld1.8          {d21}, [lr,:64], r2
+        vrhadd.u8       q8,  q8,  q10
+  .endif
+        subs            r3,  r3,  #2
+        pld             [r1]
+        vst1.8          {d16}, [r0,:64], r2
+        vst1.8          {d17}, [r0,:64], r2
+        bgt             3b
+
+        pop             {r4-r7, pc}
+
+4:      vld1.8          {d4, d5}, [r1], r2
+        vld1.8          {d6, d7}, [r1], r2
+        vext.8          d5,  d4,  d5,  #1
+        vext.8          d7,  d6,  d7,  #1
+
+5:      pld             [r1]
+        subs            r3,  r3,  #2
+        vmull.u8        q8,  d4,  d0
+        vmlal.u8        q8,  d5,  d1
+        vld1.8          {d4, d5}, [r1], r2
+        vmull.u8        q9,  d6,  d0
+        vmlal.u8        q9,  d7,  d1
+        pld             [r1]
+        vext.8          d5,  d4,  d5,  #1
+        vrshrn.u16      d16, q8,  #6
+        vrshrn.u16      d17, q9,  #6
+  .ifc \type,avg
+        vld1.8          {d20}, [lr,:64], r2
+        vld1.8          {d21}, [lr,:64], r2
+        vrhadd.u8       q8,  q8,  q10
+  .endif
+        vld1.8          {d6, d7}, [r1], r2
+        vext.8          d7,  d6,  d7,  #1
+        vst1.8          {d16}, [r0,:64], r2
+        vst1.8          {d17}, [r0,:64], r2
+        bgt             5b
+
+        pop             {r4-r7, pc}
+endfunc
+.endm
+
+/* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
+.macro  h264_chroma_mc4 type
+function ff_\type\()_h264_chroma_mc4_neon, export=1
+        push            {r4-r7, lr}
+        ldrd            r4,  [sp, #20]
+  .ifc \type,avg
+        mov             lr,  r0
+  .endif
+        pld             [r1]
+        pld             [r1, r2]
+
+A       muls            r7,  r4,  r5
+T       mul             r7,  r4,  r5
+T       cmp             r7,  #0
+        rsb             r6,  r7,  r5,  lsl #3
+        rsb             r12, r7,  r4,  lsl #3
+        sub             r4,  r7,  r4,  lsl #3
+        sub             r4,  r4,  r5,  lsl #3
+        add             r4,  r4,  #64
+
+        beq             2f
+
+        add             r5,  r1,  r2
+
+        vdup.8          d0,  r4
+        lsl             r4,  r2,  #1
+        vdup.8          d1,  r12
+        vld1.8          {d4},     [r1], r4
+        vdup.8          d2,  r6
+        vld1.8          {d6},     [r5], r4
+        vdup.8          d3,  r7
+
+        vext.8          d5,  d4,  d5,  #1
+        vext.8          d7,  d6,  d7,  #1
+        vtrn.32         d4,  d5
+        vtrn.32         d6,  d7
+
+        vtrn.32         d0,  d1
+        vtrn.32         d2,  d3
+
+1:      pld             [r5]
+        vmull.u8        q8,  d4,  d0
+        vmlal.u8        q8,  d6,  d2
+        vld1.8          {d4},     [r1], r4
+        vext.8          d5,  d4,  d5,  #1
+        vtrn.32         d4,  d5
+        vmull.u8        q9,  d6,  d0
+        vmlal.u8        q9,  d4,  d2
+        vld1.8          {d6},     [r5], r4
+        vadd.i16        d16, d16, d17
+        vadd.i16        d17, d18, d19
+        vrshrn.u16      d16, q8,  #6
+        subs            r3,  r3,  #2
+        pld             [r1]
+  .ifc \type,avg
+        vld1.32         {d20[0]}, [lr,:32], r2
+        vld1.32         {d20[1]}, [lr,:32], r2
+        vrhadd.u8       d16, d16, d20
+  .endif
+        vext.8          d7,  d6,  d7,  #1
+        vtrn.32         d6,  d7
+        vst1.32         {d16[0]}, [r0,:32], r2
+        vst1.32         {d16[1]}, [r0,:32], r2
+        bgt             1b
+
+        pop             {r4-r7, pc}
+
+2:      tst             r6,  r6
+        add             r12, r12, r6
+        vdup.8          d0,  r4
+        vdup.8          d1,  r12
+        vtrn.32         d0,  d1
+
+        beq             4f
+
+        vext.32         d1,  d0,  d1,  #1
+        add             r5,  r1,  r2
+        lsl             r4,  r2,  #1
+        vld1.32         {d4[0]},  [r1], r4
+        vld1.32         {d4[1]},  [r5], r4
+
+3:      pld             [r5]
+        vmull.u8        q8,  d4,  d0
+        vld1.32         {d4[0]},  [r1], r4
+        vmull.u8        q9,  d4,  d1
+        vld1.32         {d4[1]},  [r5], r4
+        vadd.i16        d16, d16, d17
+        vadd.i16        d17, d18, d19
+        vrshrn.u16      d16, q8,  #6
+  .ifc \type,avg
+        vld1.32         {d20[0]}, [lr,:32], r2
+        vld1.32         {d20[1]}, [lr,:32], r2
+        vrhadd.u8       d16, d16, d20
+  .endif
+        subs            r3,  r3,  #2
+        pld             [r1]
+        vst1.32         {d16[0]}, [r0,:32], r2
+        vst1.32         {d16[1]}, [r0,:32], r2
+        bgt             3b
+
+        pop             {r4-r7, pc}
+
+4:      vld1.8          {d4},     [r1], r2
+        vld1.8          {d6},     [r1], r2
+        vext.8          d5,  d4,  d5,  #1
+        vext.8          d7,  d6,  d7,  #1
+        vtrn.32         d4,  d5
+        vtrn.32         d6,  d7
+
+5:      vmull.u8        q8,  d4,  d0
+        vmull.u8        q9,  d6,  d0
+        subs            r3,  r3,  #2
+        vld1.8          {d4},     [r1], r2
+        vext.8          d5,  d4,  d5,  #1
+        vtrn.32         d4,  d5
+        vadd.i16        d16, d16, d17
+        vadd.i16        d17, d18, d19
+        pld             [r1]
+        vrshrn.u16      d16, q8,  #6
+  .ifc \type,avg
+        vld1.32         {d20[0]}, [lr,:32], r2
+        vld1.32         {d20[1]}, [lr,:32], r2
+        vrhadd.u8       d16, d16, d20
+  .endif
+        vld1.8          {d6},     [r1], r2
+        vext.8          d7,  d6,  d7,  #1
+        vtrn.32         d6,  d7
+        pld             [r1]
+        vst1.32         {d16[0]}, [r0,:32], r2
+        vst1.32         {d16[1]}, [r0,:32], r2
+        bgt             5b
+
+        pop             {r4-r7, pc}
+endfunc
+.endm
+
+.macro  h264_chroma_mc2 type
+function ff_\type\()_h264_chroma_mc2_neon, export=1
+        push            {r4-r6, lr}
+        ldr             r4,  [sp, #16]
+        ldr             lr,  [sp, #20]
+        pld             [r1]
+        pld             [r1, r2]
+        orrs            r5,  r4,  lr
+        beq             2f
+
+        mul             r5,  r4,  lr
+        rsb             r6,  r5,  lr,  lsl #3
+        rsb             r12, r5,  r4,  lsl #3
+        sub             r4,  r5,  r4,  lsl #3
+        sub             r4,  r4,  lr,  lsl #3
+        add             r4,  r4,  #64
+        vdup.8          d0,  r4
+        vdup.8          d2,  r12
+        vdup.8          d1,  r6
+        vdup.8          d3,  r5
+        vtrn.16         q0,  q1
+1:
+        vld1.32         {d4[0]},  [r1], r2
+        vld1.32         {d4[1]},  [r1], r2
+        vrev64.32       d5,  d4
+        vld1.32         {d5[1]},  [r1]
+        vext.8          q3,  q2,  q2,  #1
+        vtrn.16         q2,  q3
+        vmull.u8        q8,  d4,  d0
+        vmlal.u8        q8,  d5,  d1
+  .ifc \type,avg
+        vld1.16         {d18[0]}, [r0,:16], r2
+        vld1.16         {d18[1]}, [r0,:16]
+        sub             r0,  r0,  r2
+  .endif
+        vtrn.32         d16, d17
+        vadd.i16        d16, d16, d17
+        vrshrn.u16      d16, q8,  #6
+  .ifc \type,avg
+        vrhadd.u8       d16, d16, d18
+  .endif
+        vst1.16         {d16[0]}, [r0,:16], r2
+        vst1.16         {d16[1]}, [r0,:16], r2
+        subs            r3,  r3,  #2
+        bgt             1b
+        pop             {r4-r6, pc}
+2:
+  .ifc \type,put
+        ldrh_post       r5,  r1,  r2
+        strh_post       r5,  r0,  r2
+        ldrh_post       r6,  r1,  r2
+        strh_post       r6,  r0,  r2
+  .else
+        vld1.16         {d16[0]}, [r1], r2
+        vld1.16         {d16[1]}, [r1], r2
+        vld1.16         {d18[0]}, [r0,:16], r2
+        vld1.16         {d18[1]}, [r0,:16]
+        sub             r0,  r0,  r2
+        vrhadd.u8       d16, d16, d18
+        vst1.16         {d16[0]}, [r0,:16], r2
+        vst1.16         {d16[1]}, [r0,:16], r2
+  .endif
+        subs            r3,  r3,  #2
+        bgt             2b
+        pop             {r4-r6, pc}
+endfunc
+.endm
+
+        h264_chroma_mc8 put
+        h264_chroma_mc8 avg
+        h264_chroma_mc4 put
+        h264_chroma_mc4 avg
+        h264_chroma_mc2 put
+        h264_chroma_mc2 avg
diff --git a/libavcodec/arm/h264dsp_neon.S b/libavcodec/arm/h264dsp_neon.S
index e23f7b149a..a4abf66494 100644
--- a/libavcodec/arm/h264dsp_neon.S
+++ b/libavcodec/arm/h264dsp_neon.S
@@ -21,345 +21,6 @@
 #include "asm.S"
 #include "neon.S"
 
-/* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
-.macro  h264_chroma_mc8 type
-function ff_\type\()_h264_chroma_mc8_neon, export=1
-        push            {r4-r7, lr}
-        ldrd            r4,  [sp, #20]
-  .ifc \type,avg
-        mov             lr,  r0
-  .endif
-        pld             [r1]
-        pld             [r1, r2]
-
-A       muls            r7,  r4,  r5
-T       mul             r7,  r4,  r5
-T       cmp             r7,  #0
-        rsb             r6,  r7,  r5,  lsl #3
-        rsb             r12, r7,  r4,  lsl #3
-        sub             r4,  r7,  r4,  lsl #3
-        sub             r4,  r4,  r5,  lsl #3
-        add             r4,  r4,  #64
-
-        beq             2f
-
-        add             r5,  r1,  r2
-
-        vdup.8          d0,  r4
-        lsl             r4,  r2,  #1
-        vdup.8          d1,  r12
-        vld1.8          {d4, d5}, [r1], r4
-        vdup.8          d2,  r6
-        vld1.8          {d6, d7}, [r5], r4
-        vdup.8          d3,  r7
-
-        vext.8          d5,  d4,  d5,  #1
-        vext.8          d7,  d6,  d7,  #1
-
-1:      pld             [r5]
-        vmull.u8        q8,  d4,  d0
-        vmlal.u8        q8,  d5,  d1
-        vld1.8          {d4, d5}, [r1], r4
-        vmlal.u8        q8,  d6,  d2
-        vext.8          d5,  d4,  d5,  #1
-        vmlal.u8        q8,  d7,  d3
-        vmull.u8        q9,  d6,  d0
-        subs            r3,  r3,  #2
-        vmlal.u8        q9,  d7,  d1
-        vmlal.u8        q9,  d4,  d2
-        vmlal.u8        q9,  d5,  d3
-        vrshrn.u16      d16, q8,  #6
-        vld1.8          {d6, d7}, [r5], r4
-        pld             [r1]
-        vrshrn.u16      d17, q9,  #6
-  .ifc \type,avg
-        vld1.8          {d20}, [lr,:64], r2
-        vld1.8          {d21}, [lr,:64], r2
-        vrhadd.u8       q8,  q8,  q10
-  .endif
-        vext.8          d7,  d6,  d7,  #1
-        vst1.8          {d16}, [r0,:64], r2
-        vst1.8          {d17}, [r0,:64], r2
-        bgt             1b
-
-        pop             {r4-r7, pc}
-
-2:      tst             r6,  r6
-        add             r12, r12, r6
-        vdup.8          d0,  r4
-        vdup.8          d1,  r12
-
-        beq             4f
-
-        add             r5,  r1,  r2
-        lsl             r4,  r2,  #1
-        vld1.8          {d4}, [r1], r4
-        vld1.8          {d6}, [r5], r4
-
-3:      pld             [r5]
-        vmull.u8        q8,  d4,  d0
-        vmlal.u8        q8,  d6,  d1
-        vld1.8          {d4}, [r1], r4
-        vmull.u8        q9,  d6,  d0
-        vmlal.u8        q9,  d4,  d1
-        vld1.8          {d6}, [r5], r4
-        vrshrn.u16      d16, q8,  #6
-        vrshrn.u16      d17, q9,  #6
-  .ifc \type,avg
-        vld1.8          {d20}, [lr,:64], r2
-        vld1.8          {d21}, [lr,:64], r2
-        vrhadd.u8       q8,  q8,  q10
-  .endif
-        subs            r3,  r3,  #2
-        pld             [r1]
-        vst1.8          {d16}, [r0,:64], r2
-        vst1.8          {d17}, [r0,:64], r2
-        bgt             3b
-
-        pop             {r4-r7, pc}
-
-4:      vld1.8          {d4, d5}, [r1], r2
-        vld1.8          {d6, d7}, [r1], r2
-        vext.8          d5,  d4,  d5,  #1
-        vext.8          d7,  d6,  d7,  #1
-
-5:      pld             [r1]
-        subs            r3,  r3,  #2
-        vmull.u8        q8,  d4,  d0
-        vmlal.u8        q8,  d5,  d1
-        vld1.8          {d4, d5}, [r1], r2
-        vmull.u8        q9,  d6,  d0
-        vmlal.u8        q9,  d7,  d1
-        pld             [r1]
-        vext.8          d5,  d4,  d5,  #1
-        vrshrn.u16      d16, q8,  #6
-        vrshrn.u16      d17, q9,  #6
-  .ifc \type,avg
-        vld1.8          {d20}, [lr,:64], r2
-        vld1.8          {d21}, [lr,:64], r2
-        vrhadd.u8       q8,  q8,  q10
-  .endif
-        vld1.8          {d6, d7}, [r1], r2
-        vext.8          d7,  d6,  d7,  #1
-        vst1.8          {d16}, [r0,:64], r2
-        vst1.8          {d17}, [r0,:64], r2
-        bgt             5b
-
-        pop             {r4-r7, pc}
-endfunc
-.endm
-
-/* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
-.macro  h264_chroma_mc4 type
-function ff_\type\()_h264_chroma_mc4_neon, export=1
-        push            {r4-r7, lr}
-        ldrd            r4,  [sp, #20]
-  .ifc \type,avg
-        mov             lr,  r0
-  .endif
-        pld             [r1]
-        pld             [r1, r2]
-
-A       muls            r7,  r4,  r5
-T       mul             r7,  r4,  r5
-T       cmp             r7,  #0
-        rsb             r6,  r7,  r5,  lsl #3
-        rsb             r12, r7,  r4,  lsl #3
-        sub             r4,  r7,  r4,  lsl #3
-        sub             r4,  r4,  r5,  lsl #3
-        add             r4,  r4,  #64
-
-        beq             2f
-
-        add             r5,  r1,  r2
-
-        vdup.8          d0,  r4
-        lsl             r4,  r2,  #1
-        vdup.8          d1,  r12
-        vld1.8          {d4},     [r1], r4
-        vdup.8          d2,  r6
-        vld1.8          {d6},     [r5], r4
-        vdup.8          d3,  r7
-
-        vext.8          d5,  d4,  d5,  #1
-        vext.8          d7,  d6,  d7,  #1
-        vtrn.32         d4,  d5
-        vtrn.32         d6,  d7
-
-        vtrn.32         d0,  d1
-        vtrn.32         d2,  d3
-
-1:      pld             [r5]
-        vmull.u8        q8,  d4,  d0
-        vmlal.u8        q8,  d6,  d2
-        vld1.8          {d4},     [r1], r4
-        vext.8          d5,  d4,  d5,  #1
-        vtrn.32         d4,  d5
-        vmull.u8        q9,  d6,  d0
-        vmlal.u8        q9,  d4,  d2
-        vld1.8          {d6},     [r5], r4
-        vadd.i16        d16, d16, d17
-        vadd.i16        d17, d18, d19
-        vrshrn.u16      d16, q8,  #6
-        subs            r3,  r3,  #2
-        pld             [r1]
-  .ifc \type,avg
-        vld1.32         {d20[0]}, [lr,:32], r2
-        vld1.32         {d20[1]}, [lr,:32], r2
-        vrhadd.u8       d16, d16, d20
-  .endif
-        vext.8          d7,  d6,  d7,  #1
-        vtrn.32         d6,  d7
-        vst1.32         {d16[0]}, [r0,:32], r2
-        vst1.32         {d16[1]}, [r0,:32], r2
-        bgt             1b
-
-        pop             {r4-r7, pc}
-
-2:      tst             r6,  r6
-        add             r12, r12, r6
-        vdup.8          d0,  r4
-        vdup.8          d1,  r12
-        vtrn.32         d0,  d1
-
-        beq             4f
-
-        vext.32         d1,  d0,  d1,  #1
-        add             r5,  r1,  r2
-        lsl             r4,  r2,  #1
-        vld1.32         {d4[0]},  [r1], r4
-        vld1.32         {d4[1]},  [r5], r4
-
-3:      pld             [r5]
-        vmull.u8        q8,  d4,  d0
-        vld1.32         {d4[0]},  [r1], r4
-        vmull.u8        q9,  d4,  d1
-        vld1.32         {d4[1]},  [r5], r4
-        vadd.i16        d16, d16, d17
-        vadd.i16        d17, d18, d19
-        vrshrn.u16      d16, q8,  #6
-  .ifc \type,avg
-        vld1.32         {d20[0]}, [lr,:32], r2
-        vld1.32         {d20[1]}, [lr,:32], r2
-        vrhadd.u8       d16, d16, d20
-  .endif
-        subs            r3,  r3,  #2
-        pld             [r1]
-        vst1.32         {d16[0]}, [r0,:32], r2
-        vst1.32         {d16[1]}, [r0,:32], r2
-        bgt             3b
-
-        pop             {r4-r7, pc}
-
-4:      vld1.8          {d4},     [r1], r2
-        vld1.8          {d6},     [r1], r2
-        vext.8          d5,  d4,  d5,  #1
-        vext.8          d7,  d6,  d7,  #1
-        vtrn.32         d4,  d5
-        vtrn.32         d6,  d7
-
-5:      vmull.u8        q8,  d4,  d0
-        vmull.u8        q9,  d6,  d0
-        subs            r3,  r3,  #2
-        vld1.8          {d4},     [r1], r2
-        vext.8          d5,  d4,  d5,  #1
-        vtrn.32         d4,  d5
-        vadd.i16        d16, d16, d17
-        vadd.i16        d17, d18, d19
-        pld             [r1]
-        vrshrn.u16      d16, q8,  #6
-  .ifc \type,avg
-        vld1.32         {d20[0]}, [lr,:32], r2
-        vld1.32         {d20[1]}, [lr,:32], r2
-        vrhadd.u8       d16, d16, d20
-  .endif
-        vld1.8          {d6},     [r1], r2
-        vext.8          d7,  d6,  d7,  #1
-        vtrn.32         d6,  d7
-        pld             [r1]
-        vst1.32         {d16[0]}, [r0,:32], r2
-        vst1.32         {d16[1]}, [r0,:32], r2
-        bgt             5b
-
-        pop             {r4-r7, pc}
-endfunc
-.endm
-
-.macro  h264_chroma_mc2 type
-function ff_\type\()_h264_chroma_mc2_neon, export=1
-        push            {r4-r6, lr}
-        ldr             r4,  [sp, #16]
-        ldr             lr,  [sp, #20]
-        pld             [r1]
-        pld             [r1, r2]
-        orrs            r5,  r4,  lr
-        beq             2f
-
-        mul             r5,  r4,  lr
-        rsb             r6,  r5,  lr,  lsl #3
-        rsb             r12, r5,  r4,  lsl #3
-        sub             r4,  r5,  r4,  lsl #3
-        sub             r4,  r4,  lr,  lsl #3
-        add             r4,  r4,  #64
-        vdup.8          d0,  r4
-        vdup.8          d2,  r12
-        vdup.8          d1,  r6
-        vdup.8          d3,  r5
-        vtrn.16         q0,  q1
-1:
-        vld1.32         {d4[0]},  [r1], r2
-        vld1.32         {d4[1]},  [r1], r2
-        vrev64.32       d5,  d4
-        vld1.32         {d5[1]},  [r1]
-        vext.8          q3,  q2,  q2,  #1
-        vtrn.16         q2,  q3
-        vmull.u8        q8,  d4,  d0
-        vmlal.u8        q8,  d5,  d1
-  .ifc \type,avg
-        vld1.16         {d18[0]}, [r0,:16], r2
-        vld1.16         {d18[1]}, [r0,:16]
-        sub             r0,  r0,  r2
-  .endif
-        vtrn.32         d16, d17
-        vadd.i16        d16, d16, d17
-        vrshrn.u16      d16, q8,  #6
-  .ifc \type,avg
-        vrhadd.u8       d16, d16, d18
-  .endif
-        vst1.16         {d16[0]}, [r0,:16], r2
-        vst1.16         {d16[1]}, [r0,:16], r2
-        subs            r3,  r3,  #2
-        bgt             1b
-        pop             {r4-r6, pc}
-2:
-  .ifc \type,put
-        ldrh_post       r5,  r1,  r2
-        strh_post       r5,  r0,  r2
-        ldrh_post       r6,  r1,  r2
-        strh_post       r6,  r0,  r2
-  .else
-        vld1.16         {d16[0]}, [r1], r2
-        vld1.16         {d16[1]}, [r1], r2
-        vld1.16         {d18[0]}, [r0,:16], r2
-        vld1.16         {d18[1]}, [r0,:16]
-        sub             r0,  r0,  r2
-        vrhadd.u8       d16, d16, d18
-        vst1.16         {d16[0]}, [r0,:16], r2
-        vst1.16         {d16[1]}, [r0,:16], r2
-  .endif
-        subs            r3,  r3,  #2
-        bgt             2b
-        pop             {r4-r6, pc}
-endfunc
-.endm
-
-        h264_chroma_mc8 put
-        h264_chroma_mc8 avg
-        h264_chroma_mc4 put
-        h264_chroma_mc4 avg
-        h264_chroma_mc2 put
-        h264_chroma_mc2 avg
-
         /* H.264 loop filter */
 
 .macro  h264_loop_filter_start

From f5c05b9aa5aeb6079b76f9da452f8ee4050e8955 Mon Sep 17 00:00:00 2001
From: Janne Grunau <janne-libav@jannau.net>
Date: Mon, 5 Dec 2011 21:18:05 +0000
Subject: [PATCH 3/5] rv40: NEON optimised chroma MC

Signed-off-by: Mans Rullgard <mans@mansr.com>
---
 libavcodec/arm/Makefile            |  2 +
 libavcodec/arm/h264cmc_neon.S      | 80 ++++++++++++++++++++++++++++--
 libavcodec/arm/rv40dsp_init_neon.c | 38 ++++++++++++++
 libavcodec/rv34dsp.h               |  1 +
 libavcodec/rv40dsp.c               |  2 +
 5 files changed, 118 insertions(+), 5 deletions(-)
 create mode 100644 libavcodec/arm/rv40dsp_init_neon.c

diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
index c125a59078..a948e6db3f 100644
--- a/libavcodec/arm/Makefile
+++ b/libavcodec/arm/Makefile
@@ -68,6 +68,8 @@ NEON-OBJS-$(CONFIG_RV30_DECODER)       += arm/rv34dsp_init_neon.o       \
 
 NEON-OBJS-$(CONFIG_RV40_DECODER)       += arm/rv34dsp_init_neon.o       \
                                           arm/rv34dsp_neon.o            \
+                                          arm/rv40dsp_init_neon.o       \
+                                          arm/h264cmc_neon.o            \
 
 NEON-OBJS-$(CONFIG_VP3_DECODER)        += arm/vp3dsp_neon.o
 
diff --git a/libavcodec/arm/h264cmc_neon.S b/libavcodec/arm/h264cmc_neon.S
index e10adaca10..a6feadd189 100644
--- a/libavcodec/arm/h264cmc_neon.S
+++ b/libavcodec/arm/h264cmc_neon.S
@@ -21,8 +21,8 @@
 #include "asm.S"
 
 /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
-.macro  h264_chroma_mc8 type
-function ff_\type\()_h264_chroma_mc8_neon, export=1
+.macro  h264_chroma_mc8 type, codec=h264
+function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
         push            {r4-r7, lr}
         ldrd            r4,  [sp, #20]
   .ifc \type,avg
@@ -31,6 +31,15 @@ function ff_\type\()_h264_chroma_mc8_neon, export=1
         pld             [r1]
         pld             [r1, r2]
 
+  .ifc \codec,rv40
+        movrel          r6,  rv40bias
+        lsr             r7,  r5,  #1
+        add             r6,  r6,  r7,  lsl #3
+        lsr             r7,  r4,  #1
+        add             r6,  r6,  r7,  lsl #1
+        vld1.16         {d22[],d23[]}, [r6,:16]
+  .endif
+
 A       muls            r7,  r4,  r5
 T       mul             r7,  r4,  r5
 T       cmp             r7,  #0
@@ -67,10 +76,17 @@ T       cmp             r7,  #0
         vmlal.u8        q9,  d7,  d1
         vmlal.u8        q9,  d4,  d2
         vmlal.u8        q9,  d5,  d3
-        vrshrn.u16      d16, q8,  #6
         vld1.8          {d6, d7}, [r5], r4
         pld             [r1]
+  .ifc \codec,h264
+        vrshrn.u16      d16, q8,  #6
         vrshrn.u16      d17, q9,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vadd.u16        q9,  q9,  q11
+        vshrn.u16       d16, q8,  #6
+        vshrn.u16       d17, q9,  #6
+  .endif
   .ifc \type,avg
         vld1.8          {d20}, [lr,:64], r2
         vld1.8          {d21}, [lr,:64], r2
@@ -102,8 +118,15 @@ T       cmp             r7,  #0
         vmull.u8        q9,  d6,  d0
         vmlal.u8        q9,  d4,  d1
         vld1.8          {d6}, [r5], r4
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
         vrshrn.u16      d17, q9,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vadd.u16        q9,  q9,  q11
+        vshrn.u16       d16, q8,  #6
+        vshrn.u16       d17, q9,  #6
+  .endif
   .ifc \type,avg
         vld1.8          {d20}, [lr,:64], r2
         vld1.8          {d21}, [lr,:64], r2
@@ -131,8 +154,15 @@ T       cmp             r7,  #0
         vmlal.u8        q9,  d7,  d1
         pld             [r1]
         vext.8          d5,  d4,  d5,  #1
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
         vrshrn.u16      d17, q9,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vadd.u16        q9,  q9,  q11
+        vshrn.u16       d16, q8,  #6
+        vshrn.u16       d17, q9,  #6
+  .endif
   .ifc \type,avg
         vld1.8          {d20}, [lr,:64], r2
         vld1.8          {d21}, [lr,:64], r2
@@ -149,8 +179,8 @@ endfunc
 .endm
 
 /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
-.macro  h264_chroma_mc4 type
-function ff_\type\()_h264_chroma_mc4_neon, export=1
+.macro  h264_chroma_mc4 type, codec=h264
+function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
         push            {r4-r7, lr}
         ldrd            r4,  [sp, #20]
   .ifc \type,avg
@@ -159,6 +189,15 @@ function ff_\type\()_h264_chroma_mc4_neon, export=1
         pld             [r1]
         pld             [r1, r2]
 
+  .ifc \codec,rv40
+        movrel          r6,  rv40bias
+        lsr             r7,  r5,  #1
+        add             r6,  r6,  r7,  lsl #3
+        lsr             r7,  r4,  #1
+        add             r6,  r6,  r7,  lsl #1
+        vld1.16         {d22[],d23[]}, [r6,:16]
+  .endif
+
 A       muls            r7,  r4,  r5
 T       mul             r7,  r4,  r5
 T       cmp             r7,  #0
@@ -199,7 +238,12 @@ T       cmp             r7,  #0
         vld1.8          {d6},     [r5], r4
         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vshrn.u16       d16, q8,  #6
+  .endif
         subs            r3,  r3,  #2
         pld             [r1]
   .ifc \type,avg
@@ -236,7 +280,12 @@ T       cmp             r7,  #0
         vld1.32         {d4[1]},  [r5], r4
         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vshrn.u16       d16, q8,  #6
+  .endif
   .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
@@ -266,7 +315,12 @@ T       cmp             r7,  #0
         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
         pld             [r1]
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vshrn.u16       d16, q8,  #6
+  .endif
   .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
@@ -352,9 +406,25 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1
 endfunc
 .endm
 
+#if CONFIG_H264_DECODER
         h264_chroma_mc8 put
         h264_chroma_mc8 avg
         h264_chroma_mc4 put
         h264_chroma_mc4 avg
         h264_chroma_mc2 put
         h264_chroma_mc2 avg
+#endif
+
+#if CONFIG_RV40_DECODER
+const   rv40bias
+        .short           0, 16, 32, 16
+        .short          32, 28, 32, 28
+        .short           0, 32, 16, 32
+        .short          32, 28, 32, 28
+endconst
+
+        h264_chroma_mc8 put, rv40
+        h264_chroma_mc8 avg, rv40
+        h264_chroma_mc4 put, rv40
+        h264_chroma_mc4 avg, rv40
+#endif
diff --git a/libavcodec/arm/rv40dsp_init_neon.c b/libavcodec/arm/rv40dsp_init_neon.c
new file mode 100644
index 0000000000..aa4a88da1a
--- /dev/null
+++ b/libavcodec/arm/rv40dsp_init_neon.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavcodec/avcodec.h"
+#include "libavcodec/rv34dsp.h"
+
+void ff_put_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
+void ff_put_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
+
+void ff_avg_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
+void ff_avg_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
+
+void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
+{
+    c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_neon;
+    c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_neon;
+    c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_neon;
+    c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_neon;
+}
diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h
index a1636e6eb5..695af06970 100644
--- a/libavcodec/rv34dsp.h
+++ b/libavcodec/rv34dsp.h
@@ -59,5 +59,6 @@ void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp);
 void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext *dsp);
 
 void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp);
+void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext *dsp);
 
 #endif /* AVCODEC_RV34DSP_H */
diff --git a/libavcodec/rv40dsp.c b/libavcodec/rv40dsp.c
index f193b6050d..06bdf18c42 100644
--- a/libavcodec/rv40dsp.c
+++ b/libavcodec/rv40dsp.c
@@ -534,4 +534,6 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) {
 
     if (HAVE_MMX)
         ff_rv40dsp_init_x86(c, dsp);
+    if (HAVE_NEON)
+        ff_rv40dsp_init_neon(c, dsp);
 }

From 6c889888662168811389e209bfbc662d70c27627 Mon Sep 17 00:00:00 2001
From: Janne Grunau <janne-libav@jannau.net>
Date: Mon, 5 Dec 2011 21:22:57 +0000
Subject: [PATCH 4/5] rv40: NEON optimised weighted prediction

Signed-off-by: Mans Rullgard <mans@mansr.com>
---
 libavcodec/arm/Makefile            |  1 +
 libavcodec/arm/rv40dsp_init_neon.c |  6 +++
 libavcodec/arm/rv40dsp_neon.S      | 85 ++++++++++++++++++++++++++++++
 3 files changed, 92 insertions(+)
 create mode 100644 libavcodec/arm/rv40dsp_neon.S

diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
index a948e6db3f..fc1711395b 100644
--- a/libavcodec/arm/Makefile
+++ b/libavcodec/arm/Makefile
@@ -69,6 +69,7 @@ NEON-OBJS-$(CONFIG_RV30_DECODER)       += arm/rv34dsp_init_neon.o       \
 NEON-OBJS-$(CONFIG_RV40_DECODER)       += arm/rv34dsp_init_neon.o       \
                                           arm/rv34dsp_neon.o            \
                                           arm/rv40dsp_init_neon.o       \
+                                          arm/rv40dsp_neon.o            \
                                           arm/h264cmc_neon.o            \
 
 NEON-OBJS-$(CONFIG_VP3_DECODER)        += arm/vp3dsp_neon.o
diff --git a/libavcodec/arm/rv40dsp_init_neon.c b/libavcodec/arm/rv40dsp_init_neon.c
index aa4a88da1a..3a863e1916 100644
--- a/libavcodec/arm/rv40dsp_init_neon.c
+++ b/libavcodec/arm/rv40dsp_init_neon.c
@@ -29,10 +29,16 @@ void ff_put_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
 void ff_avg_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
 void ff_avg_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
 
+void ff_rv40_weight_func_16_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int);
+void ff_rv40_weight_func_8_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int);
+
 void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
 {
     c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_neon;
     c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_neon;
     c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_neon;
     c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_neon;
+
+    c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_neon;
+    c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_neon;
 }
diff --git a/libavcodec/arm/rv40dsp_neon.S b/libavcodec/arm/rv40dsp_neon.S
new file mode 100644
index 0000000000..cafd98add0
--- /dev/null
+++ b/libavcodec/arm/rv40dsp_neon.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "asm.S"
+
+.macro  rv40_weight
+        vmovl.u8        q8,  d2
+        vmovl.u8        q9,  d3
+        vmovl.u8        q10, d4
+        vmovl.u8        q11, d5
+        vmull.u16       q2,  d16, d0[2]
+        vmull.u16       q3,  d17, d0[2]
+        vmull.u16       q8,  d18, d0[2]
+        vmull.u16       q9,  d19, d0[2]
+        vmull.u16       q12, d20, d0[0]
+        vmull.u16       q13, d21, d0[0]
+        vmull.u16       q14, d22, d0[0]
+        vmull.u16       q15, d23, d0[0]
+        vshrn.i32       d4,  q2,  #9
+        vshrn.i32       d5,  q3,  #9
+        vshrn.i32       d6,  q8,  #9
+        vshrn.i32       d7,  q9,  #9
+        vshrn.i32       d16, q12, #9
+        vshrn.i32       d17, q13, #9
+        vshrn.i32       d18, q14, #9
+        vshrn.i32       d19, q15, #9
+        vadd.u16        q2,  q2,  q8
+        vadd.u16        q3,  q3,  q9
+        vrshrn.i16      d2,  q2,  #5
+        vrshrn.i16      d3,  q3,  #5
+.endm
+
+/* void ff_rv40_weight_func_16_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2,
+                                    int w1, int w2, int stride) */
+function ff_rv40_weight_func_16_neon, export=1
+        ldr             r12, [sp]
+        vmov            d0,  r3,  r12
+        ldr             r12, [sp, #4]
+        mov             r3,  #16
+1:
+        vld1.8          {q1},     [r1,:128], r12
+        vld1.8          {q2},     [r2,:128], r12
+        rv40_weight
+        vst1.8          {q1},     [r0,:128], r12
+        subs            r3,  r3,  #1
+        bne             1b
+        bx              lr
+endfunc
+
+/* void ff_rv40_weight_func_8_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2,
+                                   int w1, int w2, int stride) */
+function ff_rv40_weight_func_8_neon, export=1
+        ldr             r12, [sp]
+        vmov            d0,  r3,  r12
+        ldr             r12, [sp, #4]
+        mov             r3,  #8
+1:
+        vld1.8          {d2},     [r1,:64], r12
+        vld1.8          {d3},     [r1,:64], r12
+        vld1.8          {d4},     [r2,:64], r12
+        vld1.8          {d5},     [r2,:64], r12
+        rv40_weight
+        vst1.8          {d2},     [r0,:64], r12
+        vst1.8          {d3},     [r0,:64], r12
+        subs            r3,  r3,  #2
+        bne             1b
+        bx              lr
+endfunc

From 4d9ec050a21c953f5a6e31483cf5ebd344afa3fd Mon Sep 17 00:00:00 2001
From: Konstantin Todorov <bioactiv3@abv.bg>
Date: Mon, 5 Dec 2011 18:40:28 +0200
Subject: [PATCH 5/5] Code cleanup - mpegvideo.c - 500-1000line

Signed-off-by: Ronald S. Bultje <rsbultje@gmail.com>
---
 libavcodec/mpegvideo.c | 542 +++++++++++++++++++++++------------------
 1 file changed, 309 insertions(+), 233 deletions(-)

diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 46b6232b08..78fae1026d 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -567,46 +567,56 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
     s->input_picture_number = s1->input_picture_number;
 
     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
-    memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
+    memcpy(&s->last_picture, &s1->last_picture,
+           (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
 
-    s->last_picture_ptr     = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
-    s->current_picture_ptr  = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
-    s->next_picture_ptr     = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
+    s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
+    s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
+    s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
 
-    memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
+    memcpy(s->prev_pict_types, s1->prev_pict_types,
+           PREV_PICT_TYPES_BUFFER_SIZE);
 
-    //Error/bug resilience
+    // Error/bug resilience
     s->next_p_frame_damaged = s1->next_p_frame_damaged;
     s->workaround_bugs      = s1->workaround_bugs;
 
-    //MPEG4 timing info
-    memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
+    // MPEG4 timing info
+    memcpy(&s->time_increment_bits, &s1->time_increment_bits,
+           (char *) &s1->shape - (char *) &s1->time_increment_bits);
 
-    //B-frame info
-    s->max_b_frames         = s1->max_b_frames;
-    s->low_delay            = s1->low_delay;
-    s->dropable             = s1->dropable;
+    // B-frame info
+    s->max_b_frames = s1->max_b_frames;
+    s->low_delay    = s1->low_delay;
+    s->dropable     = s1->dropable;
 
-    //DivX handling (doesn't work)
-    s->divx_packed          = s1->divx_packed;
+    // DivX handling (doesn't work)
+    s->divx_packed  = s1->divx_packed;
 
-    if(s1->bitstream_buffer){
-        if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
-            av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
-        s->bitstream_buffer_size  = s1->bitstream_buffer_size;
-        memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
-        memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+    if (s1->bitstream_buffer) {
+        if (s1->bitstream_buffer_size +
+            FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
+            av_fast_malloc(&s->bitstream_buffer,
+                           &s->allocated_bitstream_buffer_size,
+                           s1->allocated_bitstream_buffer_size);
+            s->bitstream_buffer_size = s1->bitstream_buffer_size;
+        memcpy(s->bitstream_buffer, s1->bitstream_buffer,
+               s1->bitstream_buffer_size);
+        memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
+               FF_INPUT_BUFFER_PADDING_SIZE);
     }
 
-    //MPEG2/interlacing info
-    memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
+    // MPEG2/interlacing info
+    memcpy(&s->progressive_sequence, &s1->progressive_sequence,
+           (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
 
-    if(!s1->first_field){
-        s->last_pict_type= s1->pict_type;
-        if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
+    if (!s1->first_field) {
+        s->last_pict_type = s1->pict_type;
+        if (s1->current_picture_ptr)
+            s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
 
         if (s1->pict_type != AV_PICTURE_TYPE_B) {
-            s->last_non_b_pict_type= s1->pict_type;
+            s->last_non_b_pict_type = s1->pict_type;
         }
     }
 
@@ -614,35 +624,40 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
 }
 
 /**
- * sets the given MpegEncContext to common defaults (same for encoding and decoding).
- * the changed fields will not depend upon the prior state of the MpegEncContext.
+ * sets the given MpegEncContext to common defaults
+ * (same for encoding and decoding).
+ * the changed fields will not depend upon the
+ * prior state of the MpegEncContext.
  */
-void MPV_common_defaults(MpegEncContext *s){
-    s->y_dc_scale_table=
-    s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
-    s->chroma_qscale_table= ff_default_chroma_qscale_table;
-    s->progressive_frame= 1;
-    s->progressive_sequence= 1;
-    s->picture_structure= PICT_FRAME;
-
-    s->coded_picture_number = 0;
-    s->picture_number = 0;
-    s->input_picture_number = 0;
+void MPV_common_defaults(MpegEncContext *s)
+{
+    s->y_dc_scale_table      =
+    s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
+    s->chroma_qscale_table   = ff_default_chroma_qscale_table;
+    s->progressive_frame     = 1;
+    s->progressive_sequence  = 1;
+    s->picture_structure     = PICT_FRAME;
+
+    s->coded_picture_number  = 0;
+    s->picture_number        = 0;
+    s->input_picture_number  = 0;
 
     s->picture_in_gop_number = 0;
 
-    s->f_code = 1;
-    s->b_code = 1;
+    s->f_code                = 1;
+    s->b_code                = 1;
 
-    s->picture_range_start = 0;
-    s->picture_range_end = MAX_PICTURE_COUNT;
+    s->picture_range_start   = 0;
+    s->picture_range_end     = MAX_PICTURE_COUNT;
 }
 
 /**
  * sets the given MpegEncContext to defaults for decoding.
- * the changed fields will not depend upon the prior state of the MpegEncContext.
+ * the changed fields will not depend upon
+ * the prior state of the MpegEncContext.
  */
-void MPV_decode_defaults(MpegEncContext *s){
+void MPV_decode_defaults(MpegEncContext *s)
+{
     MPV_common_defaults(s);
 }
 
@@ -658,157 +673,203 @@ av_cold int MPV_common_init(MpegEncContext *s)
                     s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
                   s->avctx->thread_count : 1;
 
-    if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
+    if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
         s->mb_height = (s->height + 31) / 32 * 2;
     else if (s->codec_id != CODEC_ID_H264)
         s->mb_height = (s->height + 15) / 16;
 
-    if(s->avctx->pix_fmt == PIX_FMT_NONE){
-        av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
+    if (s->avctx->pix_fmt == PIX_FMT_NONE) {
+        av_log(s->avctx, AV_LOG_ERROR,
+               "decoding to PIX_FMT_NONE is not supported.\n");
         return -1;
     }
 
-    if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
-       (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
+    if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
+        (s->avctx->thread_count > MAX_THREADS ||
+         (s->avctx->thread_count > s->mb_height && s->mb_height))) {
         int max_threads = FFMIN(MAX_THREADS, s->mb_height);
-        av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
+        av_log(s->avctx, AV_LOG_WARNING,
+               "too many threads (%d), reducing to %d\n",
                s->avctx->thread_count, max_threads);
         threads = max_threads;
     }
 
-    if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
+    if ((s->width || s->height) &&
+        av_image_check_size(s->width, s->height, 0, s->avctx))
         return -1;
 
     ff_dct_common_init(s);
 
-    s->flags= s->avctx->flags;
-    s->flags2= s->avctx->flags2;
+    s->flags  = s->avctx->flags;
+    s->flags2 = s->avctx->flags2;
 
     if (s->width && s->height) {
-        s->mb_width  = (s->width  + 15) / 16;
-        s->mb_stride = s->mb_width + 1;
-        s->b8_stride = s->mb_width*2 + 1;
-        s->b4_stride = s->mb_width*4 + 1;
-        mb_array_size= s->mb_height * s->mb_stride;
-        mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
+        s->mb_width   = (s->width + 15) / 16;
+        s->mb_stride  = s->mb_width + 1;
+        s->b8_stride  = s->mb_width * 2 + 1;
+        s->b4_stride  = s->mb_width * 4 + 1;
+        mb_array_size = s->mb_height * s->mb_stride;
+        mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
 
         /* set chroma shifts */
         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
                                       &(s->chroma_y_shift) );
 
-        /* set default edge pos, will be overriden in decode_header if needed */
-        s->h_edge_pos= s->mb_width*16;
-        s->v_edge_pos= s->mb_height*16;
+        /* set default edge pos, will be overriden
+         * in decode_header if needed */
+        s->h_edge_pos = s->mb_width * 16;
+        s->v_edge_pos = s->mb_height * 16;
 
-        s->mb_num = s->mb_width * s->mb_height;
+        s->mb_num     = s->mb_width * s->mb_height;
 
-        s->block_wrap[0]=
-        s->block_wrap[1]=
-        s->block_wrap[2]=
-        s->block_wrap[3]= s->b8_stride;
-        s->block_wrap[4]=
-        s->block_wrap[5]= s->mb_stride;
+        s->block_wrap[0] =
+        s->block_wrap[1] =
+        s->block_wrap[2] =
+        s->block_wrap[3] = s->b8_stride;
+        s->block_wrap[4] =
+        s->block_wrap[5] = s->mb_stride;
 
-        y_size = s->b8_stride * (2 * s->mb_height + 1);
-        c_size = s->mb_stride * (s->mb_height + 1);
-        yc_size = y_size + 2 * c_size;
+        y_size  = s->b8_stride * (2 * s->mb_height + 1);
+        c_size  = s->mb_stride * (s->mb_height + 1);
+        yc_size = y_size + 2   * c_size;
 
         /* convert fourcc to upper case */
-        s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
+        s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
 
-        s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
+        s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
 
-        s->avctx->coded_frame= (AVFrame*)&s->current_picture;
+        s->avctx->coded_frame = (AVFrame *)&s->current_picture;
 
-        FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
-        for(y=0; y<s->mb_height; y++){
-            for(x=0; x<s->mb_width; x++){
-                s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
-            }
-        }
-        s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
+        FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
+                          fail); // error ressilience code looks cleaner with this
+        for (y = 0; y < s->mb_height; y++)
+            for (x = 0; x < s->mb_width; x++)
+                s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
+
+        s->mb_index2xy[s->mb_height * s->mb_width] =
+                       (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
 
         if (s->encoding) {
             /* Allocate MV tables */
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t), fail)
-            s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
-            s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
-            s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
-            s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
-            s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
-            s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
-
-            if(s->msmpeg4_version){
-                FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
+                              mv_table_size * 2 * sizeof(int16_t), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
+                              mv_table_size * 2 * sizeof(int16_t), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
+                              mv_table_size * 2 * sizeof(int16_t), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
+                              mv_table_size * 2 * sizeof(int16_t), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
+                              mv_table_size * 2 * sizeof(int16_t), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
+                              mv_table_size * 2 * sizeof(int16_t), fail);
+            s->p_mv_table            = s->p_mv_table_base +
+                                       s->mb_stride + 1;
+            s->b_forw_mv_table       = s->b_forw_mv_table_base +
+                                       s->mb_stride + 1;
+            s->b_back_mv_table       = s->b_back_mv_table_base +
+                                       s->mb_stride + 1;
+            s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
+                                       s->mb_stride + 1;
+            s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
+                                       s->mb_stride + 1;
+            s->b_direct_mv_table     = s->b_direct_mv_table_base +
+                                       s->mb_stride + 1;
+
+            if (s->msmpeg4_version) {
+                FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
+                                  2 * 2 * (MAX_LEVEL + 1) *
+                                  (MAX_RUN + 1) * 2 * sizeof(int), fail);
             }
             FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
 
             /* Allocate MB type table */
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type  , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
-
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
-
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix  , 64*32   * sizeof(int), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix  , 64*32   * sizeof(int), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
-
-            if(s->avctx->noise_reduction){
-                FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
+                              sizeof(uint16_t), fail); // needed for encoding
+
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
+                              sizeof(int), fail);
+
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
+                              64 * 32   * sizeof(int), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
+                              64 * 32   * sizeof(int), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
+                              64 * 32 * 2 * sizeof(uint16_t), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
+                              64 * 32 * 2 * sizeof(uint16_t), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
+                              MAX_PICTURE_COUNT * sizeof(Picture *), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
+                              MAX_PICTURE_COUNT * sizeof(Picture *), fail);
+
+            if (s->avctx->noise_reduction) {
+                FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
+                                  2 * 64 * sizeof(uint16_t), fail);
             }
         }
     }
 
     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
-    FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
-    for(i = 0; i < s->picture_count; i++) {
-        avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
+    FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
+                      s->picture_count * sizeof(Picture), fail);
+    for (i = 0; i < s->picture_count; i++) {
+        avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
     }
 
     if (s->width && s->height) {
-        FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
+        FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
+                          mb_array_size * sizeof(uint8_t), fail);
 
-        if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
+        if (s->codec_id == CODEC_ID_MPEG4 ||
+            (s->flags & CODEC_FLAG_INTERLACED_ME)) {
             /* interlaced direct mode decoding tables */
-            for(i=0; i<2; i++){
+            for (i = 0; i < 2; i++) {
                 int j, k;
-                for(j=0; j<2; j++){
-                    for(k=0; k<2; k++){
-                        FF_ALLOCZ_OR_GOTO(s->avctx,    s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
-                        s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
+                for (j = 0; j < 2; j++) {
+                    for (k = 0; k < 2; k++) {
+                        FF_ALLOCZ_OR_GOTO(s->avctx,
+                                          s->b_field_mv_table_base[i][j][k],
+                                          mv_table_size * 2 * sizeof(int16_t),
+                                          fail);
+                        s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
+                                                       s->mb_stride + 1;
                     }
-                    FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
-                    FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
-                    s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
+                    FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
+                                      mb_array_size * 2 * sizeof(uint8_t),
+                                      fail);
+                    FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
+                                      mv_table_size * 2 * sizeof(int16_t),
+                                      fail);
+                    s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
+                                                + s->mb_stride + 1;
                 }
-                FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
+                FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
+                                  mb_array_size * 2 * sizeof(uint8_t),
+                                  fail);
             }
         }
         if (s->out_format == FMT_H263) {
             /* cbp values */
             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
-            s->coded_block= s->coded_block_base + s->b8_stride + 1;
+            s->coded_block = s->coded_block_base + s->b8_stride + 1;
 
             /* cbp, ac_pred, pred_dir */
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail)
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
+                              mb_array_size * sizeof(uint8_t), fail);
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
+                              mb_array_size * sizeof(uint8_t), fail);
         }
 
         if (s->h263_pred || s->h263_plus || !s->encoding) {
             /* dc values */
-            //MN: we need these for error resilience of intra-frames
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
+            // MN: we need these for  error resilience of intra-frames
+            FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
+                              yc_size * sizeof(int16_t), fail);
             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
             s->dc_val[2] = s->dc_val[1] + c_size;
-            for(i=0;i<yc_size;i++)
+            for (i = 0; i < yc_size; i++)
                 s->dc_val_base[i] = 1024;
         }
 
@@ -817,39 +878,50 @@ av_cold int MPV_common_init(MpegEncContext *s)
         memset(s->mbintra_table, 1, mb_array_size);
 
         /* init macroblock skip table */
-        FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
-        //Note the +1 is for a quicker mpeg4 slice_end detection
-        FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
-
-        s->parse_context.state= -1;
-        if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
-            s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
-            s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
-            s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
+        FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
+        // Note the + 1 is for  a quicker mpeg4 slice_end detection
+        FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types,
+                          PREV_PICT_TYPES_BUFFER_SIZE, fail);
+
+        s->parse_context.state = -1;
+        if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
+            (s->avctx->debug_mv)) {
+            s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
+                        2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
+            s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
+                        2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
+            s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
+                        2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
         }
     }
 
     s->context_initialized = 1;
-    s->thread_context[0]= s;
+    s->thread_context[0]   = s;
 
     if (s->width && s->height) {
-    if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
-        for(i=1; i<threads; i++){
-            s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
-            memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
-        }
+        if (s->encoding || (HAVE_THREADS &&
+                            s->avctx->active_thread_type&FF_THREAD_SLICE)) {
+            for (i = 1; i < threads; i++) {
+                s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
+                memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
+            }
 
-        for(i=0; i<threads; i++){
-            if(init_duplicate_context(s->thread_context[i], s) < 0)
+            for (i = 0; i < threads; i++) {
+                if (init_duplicate_context(s->thread_context[i], s) < 0)
+                    goto fail;
+                    s->thread_context[i]->start_mb_y =
+                        (s->mb_height * (i) + s->avctx->thread_count / 2) /
+                        s->avctx->thread_count;
+                    s->thread_context[i]->end_mb_y   =
+                        (s->mb_height * (i + 1) + s->avctx->thread_count / 2) /
+                        s->avctx->thread_count;
+            }
+        } else {
+            if (init_duplicate_context(s, s) < 0)
                 goto fail;
-            s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
-            s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
+            s->start_mb_y = 0;
+            s->end_mb_y   = s->mb_height;
         }
-    } else {
-        if(init_duplicate_context(s, s) < 0) goto fail;
-        s->start_mb_y = 0;
-        s->end_mb_y   = s->mb_height;
-    }
     }
 
     return 0;
@@ -863,17 +935,18 @@ void MPV_common_end(MpegEncContext *s)
 {
     int i, j, k;
 
-    if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
-        for(i=0; i<s->avctx->thread_count; i++){
+    if (s->encoding || (HAVE_THREADS &&
+        s->avctx->active_thread_type & FF_THREAD_SLICE)) {
+        for (i = 0; i < s->avctx->thread_count; i++) {
             free_duplicate_context(s->thread_context[i]);
         }
-        for(i=1; i<s->avctx->thread_count; i++){
+        for (i = 1; i < s->avctx->thread_count; i++) {
             av_freep(&s->thread_context[i]);
         }
     } else free_duplicate_context(s);
 
     av_freep(&s->parse_context.buffer);
-    s->parse_context.buffer_size=0;
+    s->parse_context.buffer_size = 0;
 
     av_freep(&s->mb_type);
     av_freep(&s->p_mv_table_base);
@@ -882,21 +955,21 @@ void MPV_common_end(MpegEncContext *s)
     av_freep(&s->b_bidir_forw_mv_table_base);
     av_freep(&s->b_bidir_back_mv_table_base);
     av_freep(&s->b_direct_mv_table_base);
-    s->p_mv_table= NULL;
-    s->b_forw_mv_table= NULL;
-    s->b_back_mv_table= NULL;
-    s->b_bidir_forw_mv_table= NULL;
-    s->b_bidir_back_mv_table= NULL;
-    s->b_direct_mv_table= NULL;
-    for(i=0; i<2; i++){
-        for(j=0; j<2; j++){
-            for(k=0; k<2; k++){
+    s->p_mv_table            = NULL;
+    s->b_forw_mv_table       = NULL;
+    s->b_back_mv_table       = NULL;
+    s->b_bidir_forw_mv_table = NULL;
+    s->b_bidir_back_mv_table = NULL;
+    s->b_direct_mv_table     = NULL;
+    for (i = 0; i < 2; i++) {
+        for (j = 0; j < 2; j++) {
+            for (k = 0; k < 2; k++) {
                 av_freep(&s->b_field_mv_table_base[i][j][k]);
-                s->b_field_mv_table[i][j][k]=NULL;
+                s->b_field_mv_table[i][j][k] = NULL;
             }
             av_freep(&s->b_field_select_table[i][j]);
             av_freep(&s->p_field_mv_table_base[i][j]);
-            s->p_field_mv_table[i][j]=NULL;
+            s->p_field_mv_table[i][j] = NULL;
         }
         av_freep(&s->p_field_select_table[i]);
     }
@@ -910,7 +983,7 @@ void MPV_common_end(MpegEncContext *s)
     av_freep(&s->mbskip_table);
     av_freep(&s->prev_pict_types);
     av_freep(&s->bitstream_buffer);
-    s->allocated_bitstream_buffer_size=0;
+    s->allocated_bitstream_buffer_size = 0;
 
     av_freep(&s->avctx->stats_out);
     av_freep(&s->ac_stats);
@@ -925,37 +998,38 @@ void MPV_common_end(MpegEncContext *s)
     av_freep(&s->reordered_input_picture);
     av_freep(&s->dct_offset);
 
-    if(s->picture && !s->avctx->internal->is_copy){
-        for(i=0; i<s->picture_count; i++){
+    if (s->picture && !s->avctx->internal->is_copy) {
+        for (i = 0; i < s->picture_count; i++) {
             free_picture(s, &s->picture[i]);
         }
     }
     av_freep(&s->picture);
-    s->context_initialized = 0;
-    s->last_picture_ptr=
-    s->next_picture_ptr=
-    s->current_picture_ptr= NULL;
-    s->linesize= s->uvlinesize= 0;
+    s->context_initialized      = 0;
+    s->last_picture_ptr         =
+    s->next_picture_ptr         =
+    s->current_picture_ptr      = NULL;
+    s->linesize = s->uvlinesize = 0;
 
-    for(i=0; i<3; i++)
+    for (i = 0; i < 3; i++)
         av_freep(&s->visualization_buffer[i]);
 
-    if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
+    if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
         avcodec_default_free_buffers(s->avctx);
 }
 
-void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
+void init_rl(RLTable *rl,
+             uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
 {
-    int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
-    uint8_t index_run[MAX_RUN+1];
+    int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
+    uint8_t index_run[MAX_RUN + 1];
     int last, run, level, start, end, i;
 
-    /* If table is static, we can quit if rl->max_level[0] is not NULL */
-    if(static_store && rl->max_level[0])
+    /* If  table is static, we can quit if rl->max_level[0] is not NULL */
+    if (static_store && rl->max_level[0])
         return;
 
     /* compute max_level[], max_run[] and index_run[] */
-    for(last=0;last<2;last++) {
+    for (last = 0; last < 2; last++) {
         if (last == 0) {
             start = 0;
             end = rl->last;
@@ -967,8 +1041,8 @@ void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
         memset(max_level, 0, MAX_RUN + 1);
         memset(max_run, 0, MAX_LEVEL + 1);
         memset(index_run, rl->n, MAX_RUN + 1);
-        for(i=start;i<end;i++) {
-            run = rl->table_run[i];
+        for (i = start; i < end; i++) {
+            run   = rl->table_run[i];
             level = rl->table_level[i];
             if (index_run[run] == rl->n)
                 index_run[run] = i;
@@ -977,17 +1051,17 @@ void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
             if (run > max_run[level])
                 max_run[level] = run;
         }
-        if(static_store)
+        if (static_store)
             rl->max_level[last] = static_store[last];
         else
             rl->max_level[last] = av_malloc(MAX_RUN + 1);
         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
-        if(static_store)
-            rl->max_run[last] = static_store[last] + MAX_RUN + 1;
+        if (static_store)
+            rl->max_run[last]   = static_store[last] + MAX_RUN + 1;
         else
-            rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
+            rl->max_run[last]   = av_malloc(MAX_LEVEL + 1);
         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
-        if(static_store)
+        if (static_store)
             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
         else
             rl->index_run[last] = av_malloc(MAX_RUN + 1);
@@ -999,77 +1073,79 @@ void init_vlc_rl(RLTable *rl)
 {
     int i, q;
 
-    for(q=0; q<32; q++){
-        int qmul= q*2;
-        int qadd= (q-1)|1;
+    for (q = 0; q < 32; q++) {
+        int qmul = q * 2;
+        int qadd = (q - 1) | 1;
 
-        if(q==0){
-            qmul=1;
-            qadd=0;
+        if (q == 0) {
+            qmul = 1;
+            qadd = 0;
         }
-        for(i=0; i<rl->vlc.table_size; i++){
-            int code= rl->vlc.table[i][0];
-            int len = rl->vlc.table[i][1];
+        for (i = 0; i < rl->vlc.table_size; i++) {
+            int code = rl->vlc.table[i][0];
+            int len  = rl->vlc.table[i][1];
             int level, run;
 
-            if(len==0){ // illegal code
-                run= 66;
-                level= MAX_LEVEL;
-            }else if(len<0){ //more bits needed
-                run= 0;
-                level= code;
-            }else{
-                if(code==rl->n){ //esc
-                    run= 66;
-                    level= 0;
-                }else{
-                    run=   rl->table_run  [code] + 1;
-                    level= rl->table_level[code] * qmul + qadd;
-                    if(code >= rl->last) run+=192;
+            if (len == 0) { // illegal code
+                run   = 66;
+                level = MAX_LEVEL;
+            } else if (len < 0) { // more bits needed
+                run   = 0;
+                level = code;
+            } else {
+                if (code == rl->n) { // esc
+                    run   = 66;
+                    level =  0;
+                } else {
+                    run   = rl->table_run[code] + 1;
+                    level = rl->table_level[code] * qmul + qadd;
+                    if (code >= rl->last) run += 192;
                 }
             }
-            rl->rl_vlc[q][i].len= len;
-            rl->rl_vlc[q][i].level= level;
-            rl->rl_vlc[q][i].run= run;
+            rl->rl_vlc[q][i].len   = len;
+            rl->rl_vlc[q][i].level = level;
+            rl->rl_vlc[q][i].run   = run;
         }
     }
 }
 
-void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
+void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
 {
     int i;
 
     /* release non reference frames */
-    for(i=0; i<s->picture_count; i++){
-        if (s->picture[i].f.data[0] && !s->picture[i].f.reference
-           && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
-           && (remove_current || &s->picture[i] != s->current_picture_ptr)
-           /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
+    for (i = 0; i < s->picture_count; i++) {
+        if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
+            (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
+            (remove_current || &s->picture[i] !=  s->current_picture_ptr)
+            /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
             free_frame_buffer(s, &s->picture[i]);
         }
     }
 }
 
-int ff_find_unused_picture(MpegEncContext *s, int shared){
+int ff_find_unused_picture(MpegEncContext *s, int shared)
+{
     int i;
 
-    if(shared){
-        for(i=s->picture_range_start; i<s->picture_range_end; i++){
+    if (shared) {
+        for (i = s->picture_range_start; i < s->picture_range_end; i++) {
             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
                 return i;
         }
-    }else{
-        for(i=s->picture_range_start; i<s->picture_range_end; i++){
+    } else {
+        for (i = s->picture_range_start; i < s->picture_range_end; i++) {
             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
-                return i; //FIXME
+                return i; // FIXME
         }
-        for(i=s->picture_range_start; i<s->picture_range_end; i++){
+        for (i = s->picture_range_start; i < s->picture_range_end; i++) {
             if (s->picture[i].f.data[0] == NULL)
                 return i;
         }
     }
 
-    av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
+    av_log(s->avctx, AV_LOG_FATAL,
+           "Internal error, picture buffer overflow\n");
     /* We could return -1, but the codec would crash trying to draw into a
      * non-existing frame anyway. This is safer than waiting for a random crash.
      * Also the return of this is never useful, an encoder must only allocate