|
|
|
@ -685,6 +685,25 @@ static void read_vuya_A_c(uint8_t *dst, const uint8_t *src, const uint8_t *unuse |
|
|
|
|
dst[i] = src[i * 4 + 3]; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void read_xv36le_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width, |
|
|
|
|
uint32_t *unused2, void *opq) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
for (i = 0; i < width; i++) |
|
|
|
|
AV_WN16(dst + i * 2, AV_RL16(src + i * 8 + 2) >> 4); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void read_xv36le_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src, |
|
|
|
|
const uint8_t *unused1, int width, uint32_t *unused2, void *opq) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
for (i = 0; i < width; i++) { |
|
|
|
|
AV_WN16(dstU + i * 2, AV_RL16(src + i * 8 + 0) >> 4); |
|
|
|
|
AV_WN16(dstV + i * 2, AV_RL16(src + i * 8 + 4) >> 4); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* This is almost identical to the previous, end exists only because
|
|
|
|
|
* yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */ |
|
|
|
|
static void uyvyToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, |
|
|
|
@ -1381,6 +1400,9 @@ av_cold void ff_sws_init_input_funcs(SwsContext *c) |
|
|
|
|
case AV_PIX_FMT_AYUV64LE: |
|
|
|
|
c->chrToYV12 = read_ayuv64le_UV_c; |
|
|
|
|
break; |
|
|
|
|
case AV_PIX_FMT_XV36LE: |
|
|
|
|
c->chrToYV12 = read_xv36le_UV_c; |
|
|
|
|
break; |
|
|
|
|
case AV_PIX_FMT_P010LE: |
|
|
|
|
case AV_PIX_FMT_P210LE: |
|
|
|
|
case AV_PIX_FMT_P410LE: |
|
|
|
@ -1759,6 +1781,9 @@ av_cold void ff_sws_init_input_funcs(SwsContext *c) |
|
|
|
|
case AV_PIX_FMT_AYUV64LE: |
|
|
|
|
c->lumToYV12 = read_ayuv64le_Y_c; |
|
|
|
|
break; |
|
|
|
|
case AV_PIX_FMT_XV36LE: |
|
|
|
|
c->lumToYV12 = read_xv36le_Y_c; |
|
|
|
|
break; |
|
|
|
|
case AV_PIX_FMT_YUYV422: |
|
|
|
|
case AV_PIX_FMT_YVYU422: |
|
|
|
|
case AV_PIX_FMT_YA8: |
|
|
|
|