|
|
|
@ -208,19 +208,19 @@ void ff_fmt_convert_init_x86(FmtConvertContext *c, AVCodecContext *avctx) |
|
|
|
|
#if HAVE_YASM |
|
|
|
|
c->float_interleave = float_interleave_mmx; |
|
|
|
|
|
|
|
|
|
if(mm_flags & AV_CPU_FLAG_3DNOW){ |
|
|
|
|
if (HAVE_AMD3DNOW && mm_flags & AV_CPU_FLAG_3DNOW) { |
|
|
|
|
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ |
|
|
|
|
c->float_to_int16 = ff_float_to_int16_3dnow; |
|
|
|
|
c->float_to_int16_interleave = float_to_int16_interleave_3dnow; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
if(mm_flags & AV_CPU_FLAG_3DNOWEXT){ |
|
|
|
|
if (HAVE_AMD3DNOWEXT && mm_flags & AV_CPU_FLAG_3DNOWEXT) { |
|
|
|
|
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ |
|
|
|
|
c->float_to_int16_interleave = float_to_int16_interleave_3dn2; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
if(mm_flags & AV_CPU_FLAG_SSE){ |
|
|
|
|
if (HAVE_SSE && mm_flags & AV_CPU_FLAG_SSE) { |
|
|
|
|
c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse; |
|
|
|
|
#if HAVE_YASM |
|
|
|
|
c->float_to_int16 = ff_float_to_int16_sse; |
|
|
|
@ -228,7 +228,7 @@ void ff_fmt_convert_init_x86(FmtConvertContext *c, AVCodecContext *avctx) |
|
|
|
|
c->float_interleave = float_interleave_sse; |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
if(mm_flags & AV_CPU_FLAG_SSE2){ |
|
|
|
|
if (HAVE_SSE && mm_flags & AV_CPU_FLAG_SSE2) { |
|
|
|
|
c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2; |
|
|
|
|
#if HAVE_YASM |
|
|
|
|
c->float_to_int16 = ff_float_to_int16_sse2; |
|
|
|
|