[FFmpeg-devel] [PATCH] sws/aarch64: add {nv12, nv21, yuv420p, yuv422p}_to_{argb, rgba, abgr, rgba}_neon
Clément Bœsch
u at pkh.me
Wed Feb 17 16:18:26 CET 2016
From: Clément Bœsch <clement at stupeflix.com>
---
This code is mirroring the ARM version.
It's not tested on real hardware (qemu only), so I have no idea about
performance. This also means I didn't put any prefetch mechanism (which had a
noticeable impact on performance on ARM).
Last but not least, the 32-bit precision functions are added (and tested) but
are not enabled at all, just like the ARM counterpart.
---
libswscale/aarch64/Makefile | 3 +
libswscale/aarch64/swscale_unscaled.c | 142 +++++++++++++++
libswscale/aarch64/yuv2rgb_neon.S | 332 ++++++++++++++++++++++++++++++++++
libswscale/swscale_internal.h | 1 +
libswscale/swscale_unscaled.c | 2 +
5 files changed, 480 insertions(+)
create mode 100644 libswscale/aarch64/Makefile
create mode 100644 libswscale/aarch64/swscale_unscaled.c
create mode 100644 libswscale/aarch64/yuv2rgb_neon.S
diff --git a/libswscale/aarch64/Makefile b/libswscale/aarch64/Makefile
new file mode 100644
index 0000000..823806e
--- /dev/null
+++ b/libswscale/aarch64/Makefile
@@ -0,0 +1,3 @@
+OBJS += aarch64/swscale_unscaled.o
+
+NEON-OBJS += aarch64/yuv2rgb_neon.o
diff --git a/libswscale/aarch64/swscale_unscaled.c b/libswscale/aarch64/swscale_unscaled.c
new file mode 100644
index 0000000..8b04c2b
--- /dev/null
+++ b/libswscale/aarch64/swscale_unscaled.c
@@ -0,0 +1,142 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+#include "libavutil/aarch64/cpu.h"
+
+#define YUV_TO_RGB_TABLE(precision) \
+ c->yuv2rgb_v2r_coeff / ((precision) == 16 ? 1 << 7 : 1), \
+ c->yuv2rgb_u2g_coeff / ((precision) == 16 ? 1 << 7 : 1), \
+ c->yuv2rgb_v2g_coeff / ((precision) == 16 ? 1 << 7 : 1), \
+ c->yuv2rgb_u2b_coeff / ((precision) == 16 ? 1 << 7 : 1), \
+
+#define DECLARE_FF_YUVX_TO_RGBX_FUNCS(ifmt, ofmt, precision) \
+int ff_##ifmt##_to_##ofmt##_neon_##precision(int w, int h, \
+ uint8_t *dst, int linesize, \
+ const uint8_t *srcY, int linesizeY, \
+ const uint8_t *srcU, int linesizeU, \
+ const uint8_t *srcV, int linesizeV, \
+ const int16_t *table, \
+ int y_offset, \
+ int y_coeff); \
+ \
+static int ifmt##_to_##ofmt##_neon_wrapper_##precision(SwsContext *c, const uint8_t *src[], \
+ int srcStride[], int srcSliceY, int srcSliceH, \
+ uint8_t *dst[], int dstStride[]) { \
+ const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE(precision) }; \
+ \
+ ff_##ifmt##_to_##ofmt##_neon_##precision(c->srcW, srcSliceH, \
+ dst[0] + srcSliceY * dstStride[0], dstStride[0], \
+ src[0], srcStride[0], \
+ src[1], srcStride[1], \
+ src[2], srcStride[2], \
+ yuv2rgb_table, \
+ c->yuv2rgb_y_offset >> 9, \
+ c->yuv2rgb_y_coeff / ((precision) == 16 ? 1 << 7 : 1)); \
+ \
+ return 0; \
+} \
+
+#define DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuvx, precision) \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, argb, precision) \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, rgba, precision) \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, abgr, precision) \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, bgra, precision) \
+
+#define DECLARE_FF_YUVX_TO_ALL_RGBX_ALL_PRECISION_FUNCS(yuvx) \
+DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuvx, 16) \
+//DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuvx, 32) \
+
+DECLARE_FF_YUVX_TO_ALL_RGBX_ALL_PRECISION_FUNCS(yuv420p)
+DECLARE_FF_YUVX_TO_ALL_RGBX_ALL_PRECISION_FUNCS(yuv422p)
+
+#define DECLARE_FF_NVX_TO_RGBX_FUNCS(ifmt, ofmt, precision) \
+int ff_##ifmt##_to_##ofmt##_neon_##precision(int w, int h, \
+ uint8_t *dst, int linesize, \
+ const uint8_t *srcY, int linesizeY, \
+ const uint8_t *srcC, int linesizeC, \
+ const int16_t *table, \
+ int y_offset, \
+ int y_coeff); \
+ \
+static int ifmt##_to_##ofmt##_neon_wrapper_##precision(SwsContext *c, const uint8_t *src[], \
+ int srcStride[], int srcSliceY, int srcSliceH, \
+ uint8_t *dst[], int dstStride[]) { \
+ const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE(precision) }; \
+ \
+ ff_##ifmt##_to_##ofmt##_neon_##precision(c->srcW, srcSliceH, \
+ dst[0] + srcSliceY * dstStride[0], dstStride[0], \
+ src[0], srcStride[0], src[1], srcStride[1], \
+ yuv2rgb_table, \
+ c->yuv2rgb_y_offset >> 9, \
+ c->yuv2rgb_y_coeff / ((precision) == 16 ? 1 << 7 : 1)); \
+ \
+ return 0; \
+} \
+
+#define DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nvx, precision) \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, argb, precision) \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, rgba, precision) \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, abgr, precision) \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, bgra, precision) \
+
+#define DECLARE_FF_NVX_TO_ALL_RGBX_ALL_PRECISION_FUNCS(nvx) \
+DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nvx, 16) \
+//DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nvx, 32) \
+
+DECLARE_FF_NVX_TO_ALL_RGBX_ALL_PRECISION_FUNCS(nv12)
+DECLARE_FF_NVX_TO_ALL_RGBX_ALL_PRECISION_FUNCS(nv21)
+
+/* We need a 16 pixel width alignment. This constraint can easily be removed
+ * for input reading but for the output which is 4-bytes per pixel (RGBA) the
+ * assembly might be writing as much as 4*15=60 extra bytes at the end of the
+ * line, which won't fit the 32-bytes buffer alignment. */
+#define SET_FF_NVX_TO_RGBX_FUNC(ifmt, IFMT, ofmt, OFMT, accurate_rnd) do { \
+ if (c->srcFormat == AV_PIX_FMT_##IFMT \
+ && c->dstFormat == AV_PIX_FMT_##OFMT \
+ && !(c->srcH & 1) \
+ && !(c->srcW & 15) \
+ && !accurate_rnd) { \
+ c->swscale = ifmt##_to_##ofmt##_neon_wrapper_16; \
+ } \
+} while (0)
+
+#define SET_FF_NVX_TO_ALL_RGBX_FUNC(nvx, NVX, accurate_rnd) do { \
+ SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, argb, ARGB, accurate_rnd); \
+ SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, rgba, RGBA, accurate_rnd); \
+ SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, abgr, ABGR, accurate_rnd); \
+ SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, bgra, BGRA, accurate_rnd); \
+} while (0)
+
+static void get_unscaled_swscale_neon(SwsContext *c) {
+ int accurate_rnd = c->flags & SWS_ACCURATE_RND;
+
+ SET_FF_NVX_TO_ALL_RGBX_FUNC(nv12, NV12, accurate_rnd);
+ SET_FF_NVX_TO_ALL_RGBX_FUNC(nv21, NV21, accurate_rnd);
+ SET_FF_NVX_TO_ALL_RGBX_FUNC(yuv420p, YUV420P, accurate_rnd);
+ SET_FF_NVX_TO_ALL_RGBX_FUNC(yuv422p, YUV422P, accurate_rnd);
+}
+
+void ff_get_unscaled_swscale_aarch64(SwsContext *c)
+{
+ int cpu_flags = av_get_cpu_flags();
+ if (have_neon(cpu_flags))
+ get_unscaled_swscale_neon(c);
+}
diff --git a/libswscale/aarch64/yuv2rgb_neon.S b/libswscale/aarch64/yuv2rgb_neon.S
new file mode 100644
index 0000000..360168a
--- /dev/null
+++ b/libswscale/aarch64/yuv2rgb_neon.S
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2015 Matthieu Bouron <matthieu.bouron stupeflix.com>
+ * Copyright (c) 2015 Clément Bœsch <clement stupeflix.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+.macro compute_premult_16 u v
+ zip1 v4.8H, \u, \u
+ zip2 v5.8H, \u, \u
+ zip1 v6.8H, \v, \v
+ zip2 v7.8H, \v, \v
+
+ mul v20.8H, v6.8H, v1.H[0] // V * v2r (left, red)
+ mul v21.8H, v7.8H, v1.H[0] // V * v2r (right, red)
+ mul v22.8H, v4.8H, v1.H[1] // U * u2g
+ mul v23.8H, v5.8H, v1.H[1] // U * u2g
+ mla v22.8H, v6.8H, v1.H[2] // U * u2g + V * v2g (left, green)
+ mla v23.8H, v7.8H, v1.H[2] // U * u2g + V * v2g (right, green)
+ mul v24.8H, v4.8H, v1.H[3] // U * u2b (left, blue)
+ mul v25.8H, v5.8H, v1.H[3] // U * u2b (right, blue)
+.endm
+
+.macro compute_premult_32 part u v
+ zip\part v4.8H, \u, \u
+ zip\part v5.8H, \v, \v
+
+ smull v20.4S, v5.4H, v1.H[0] // V * v2r (left, red)
+ smull2 v21.4S, v5.8H, v1.H[0] // V * v2r (right, red)
+ smull v22.4S, v4.4H, v1.H[1] // U * u2g
+ smull2 v23.4S, v4.8H, v1.H[1] // U * u2g
+ smlal v22.4S, v5.4H, v1.H[2] // U * u2g + V * v2g (left, green)
+ smlal2 v23.4S, v5.8H, v1.H[2] // U * u2g + V * v2g (right, green)
+ smull v24.4S, v4.4H, v1.H[3] // U * u2b (left, blue)
+ smull2 v25.4S, v4.8H, v1.H[3] // U * u2b (right, blue)
+.endm
+
+.macro compute_color_16 dst_comp1 dst_comp2 pre1 pre2
+ add v28.8H, v26.8H, \pre1
+ add v29.8H, v27.8H, \pre2
+ sqrshrun \dst_comp1, v28.8H, #6
+ sqrshrun \dst_comp2, v29.8H, #6
+.endm
+
+.macro compute_color_32 dst_comp pre1 pre2
+ add v28.4S, v26.4S, \pre1
+ add v29.4S, v27.4S, \pre2
+ sqrshrun v16.4H, v28.4S, #13
+ sqrshrun2 v16.8H, v29.4S, #13
+ uqxtn \dst_comp, v16.8H
+.endm
+
+.macro compute_rgba_16 r1 g1 b1 a1 r2 g2 b2 a2
+ compute_color_16 \r1, \r2, v20.8H, v21.8H
+ compute_color_16 \g1, \g2, v22.8H, v23.8H
+ compute_color_16 \b1, \b2, v24.8H, v25.8H
+ movi \a1, #255
+ movi \a2, #255
+.endm
+
+.macro compute_rgba_32 r g b a
+ compute_color_32 \r, v20.4S, v21.4S
+ compute_color_32 \g, v22.4S, v23.4S
+ compute_color_32 \b, v24.4S, v25.4S
+ movi \a, #255
+.endm
+
+.macro compute_16px_16 dst y ofmt
+ uxtl v26.8H, \y\().8B
+ uxtl2 v27.8H, \y\().16B
+
+ sub v26.8H, v26.8H, v3.8H
+ sub v27.8H, v27.8H, v3.8H
+
+ mul v26.8H, v26.8H, v0.8H
+ mul v27.8H, v27.8H, v0.8H
+
+.ifc \ofmt,argb // 1 2 3 0
+ compute_rgba_16 v5.8B,v6.8B,v7.8B,v4.8B, v17.8B,v18.8B,v19.8B,v16.8B
+.endif
+
+.ifc \ofmt,rgba // 0 1 2 3
+ compute_rgba_16 v4.8B,v5.8B,v6.8B,v7.8B, v16.8B,v17.8B,v18.8B,v19.8B
+.endif
+
+.ifc \ofmt,abgr // 3 2 1 0
+ compute_rgba_16 v7.8B,v6.8B,v5.8B,v4.8B, v19.8B,v18.8B,v17.8B,v16.8B
+.endif
+
+.ifc \ofmt,bgra // 2 1 0 3
+ compute_rgba_16 v6.8B,v5.8B,v4.8B,v7.8B, v18.8B,v17.8B,v16.8B,v19.8B
+.endif
+
+ st4 { v4.8B, v5.8B, v6.8B, v7.8B}, [\dst], #32
+ st4 {v16.8B,v17.8B,v18.8B,v19.8B}, [\dst], #32
+.endm
+
+.macro compute_8px_32 dst half_y ofmt
+ sub \half_y\().8H, \half_y\().8H, v3.8H
+ smull v26.4S, \half_y\().4H, v0.4H // q1 = (srcY - y_offset) * y_coeff (left)
+ smull2 v27.4S, \half_y\().8H, v0.8H // q2 = (srcY - y_offset) * y_coeff (right)
+
+.ifc \ofmt,argb // 1 2 3 0
+ compute_rgba_32 v5.8B,v6.8B,v7.8B,v4.8B
+.endif
+
+.ifc \ofmt,rgba // 0 1 2 3
+ compute_rgba_32 v4.8B,v5.8B,v6.8B,v7.8B
+.endif
+
+.ifc \ofmt,abgr // 3 2 1 0
+ compute_rgba_32 v7.8B,v6.8B,v5.8B,v4.8B
+.endif
+
+.ifc \ofmt,bgra // 2 1 0 3
+ compute_rgba_32 v6.8B,v5.8B,v4.8B,v7.8B
+.endif
+
+ st4 {v4.8B, v5.8B, v6.8B, v7.8B}, [\dst], #32
+.endm
+
+.macro process_1l_16px_16 ofmt
+ compute_premult_16 v18.8H, v19.8H
+ ld1 {v2.16B}, [x4], #16
+ compute_16px_16 x2, v2, \ofmt
+.endm
+
+.macro process_1l_16px_32 ofmt
+ compute_premult_32 1, v18.8H, v19.8H
+ ld1 {v30.16B}, [x4], #16
+ uxtl v2.8H, v30.8B
+ compute_8px_32 x2, v2, \ofmt
+
+ compute_premult_32 2, v18.8H, v19.8H
+ uxtl2 v2.8H, v30.16B
+ compute_8px_32 x2, v2, \ofmt
+.endm
+
+.macro process_2l_16px_16 ofmt
+ compute_premult_16 v18.8H, v19.8H
+
+ ld1 {v2.16B}, [x4], #16 // first line of luma
+ compute_16px_16 x2, v2, \ofmt
+
+ ld1 {v2.16B}, [x12], #16 // second line of luma
+ compute_16px_16 x11, v2, \ofmt
+.endm
+
+.macro process_2l_16px_32 ofmt
+ compute_premult_32 1, v18.8H, v19.8H
+
+ ld1 {v30.16B}, [x4], #16 // first line of luma
+
+ uxtl v2.8H, v30.8B
+ compute_8px_32 x2, v2, \ofmt
+
+ ld1 {v31.16B}, [x12], #16 // second line of luma
+ uxtl v2.8H, v31.8B
+ compute_8px_32 x11, v2, \ofmt
+
+ compute_premult_32 2, v18.8H, v19.8H
+ uxtl2 v2.8H, v30.16B
+ compute_8px_32 x2, v2, \ofmt
+ uxtl2 v2.8H, v31.16B
+ compute_8px_32 x11, v2, \ofmt
+.endm
+
+.macro load_args_nv12
+ ldr x8, [sp] // table
+ ldr w9, [sp, #8] // y_offset
+ ldr w10, [sp, #16] // y_coeff
+ ld1 {v1.1D}, [x8]
+ dup v0.8H, w10
+ dup v3.8H, w9
+ add x11, x2, w3, UXTW // w11 = dst + linesize (dst2)
+ add x12, x4, w5, UXTW // w12 = srcY + linesizeY (srcY2)
+ lsl w3, w3, #1
+ lsl w5, w5, #1
+ lsl w8, w0, #2
+ sub w3, w3, w8 // w3 = linesize * 2 - width * 4 (padding)
+ sub w5, w5, w0 // w5 = linesizeY * 2 - width (paddingY)
+ sub w7, w7, w0 // w7 = linesizeC - width (paddingC)
+.endm
+
+.macro load_args_nv21
+ load_args_nv12
+.endm
+
+.macro load_args_yuv420p
+ ldr x13, [sp] // srcV
+ ldr w14, [sp, #8] // linesizeV
+ ldr x8, [sp, #16] // table
+ ldr w9, [sp, #24] // y_offset
+ ldr w10, [sp, #32] // y_coeff
+ ld1 {v1.1D}, [x8]
+ dup v0.8H, w10
+ dup v3.8H, w9
+ add x11, x2, w3, UXTW // w11 = dst + linesize (dst2)
+ add x12, x4, w5, UXTW // w12 = srcY + linesizeY (srcY2)
+ lsl w3, w3, #1
+ lsl w5, w5, #1
+ lsl w8, w0, #2
+ sub w3, w3, w8 // w3 = linesize * 2 - width * 4 (padding)
+ sub w5, w5, w0 // w5 = linesizeY * 2 - width (paddingY)
+ sub w7, w7, w0, lsr #1 // w6 = linesizeU - width / 2 (paddingU)
+ sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
+.endm
+
+.macro load_args_yuv422p
+ ldr x13, [sp] // srcV
+ ldr w14, [sp, #8] // linesizeV
+ ldr x8, [sp, #16] // table
+ ldr w9, [sp, #24] // y_offset
+ ldr w10, [sp, #32] // y_coeff
+ ld1 {v1.1D}, [x8]
+ dup v0.8H, w10
+ dup v3.8H, w9
+ add x12, x4, w5, UXTW // w12 = srcY + linesizeY (srcY2)
+ lsl w8, w0, #2
+ sub w3, w3, w8 // w3 = linesize - width * 4 (padding)
+ sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
+ sub w7, w7, w0, lsr #1 // w6 = linesizeU - width / 2 (paddingU)
+ sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
+.endm
+
+.macro process_nv12 ofmt precision
+ ld2 {v16.8B, v17.8B}, [x6], #16
+ usubl v18.8H, v16.8B, v5.8B
+ usubl v19.8H, v17.8B, v5.8B
+ process_2l_16px_\precision \ofmt
+.endm
+
+.macro process_nv21 ofmt precision
+ ld2 {v16.8B, v17.8B}, [x6], #16
+ usubl v19.8H, v16.8B, v5.8B
+ usubl v18.8H, v17.8B, v5.8B
+ process_2l_16px_\precision \ofmt
+.endm
+
+.macro process_yuv420p ofmt precision
+ ld1 {v16.8B}, [ x6], #8
+ ld1 {v17.8B}, [x13], #8
+ usubl v18.8H, v16.8B, v5.8B
+ usubl v19.8H, v17.8B, v5.8B
+ process_2l_16px_\precision \ofmt
+.endm
+
+.macro process_yuv422p ofmt precision
+ ld1 {v16.8B}, [ x6], #8
+ ld1 {v17.8B}, [x13], #8
+ usubl v18.8H, v16.8B, v5.8B
+ usubl v19.8H, v17.8B, v5.8B
+ process_1l_16px_\precision \ofmt
+.endm
+
+.macro increment_nv12
+ add x11, x11, w3, UXTW // dst2 += padding
+ add x12, x12, w5, UXTW // srcY2 += paddingY
+ add x6, x6, w7, UXTW // srcC += paddingC
+ subs w1, w1, #2 // height -= 2
+.endm
+
+.macro increment_nv21
+ add x11, x11, w3, UXTW // dst2 += padding
+ add x12, x12, w5, UXTW // srcY2 += paddingY
+ add x6, x6, w7, UXTW // srcC += paddingC
+ subs w1, w1, #2 // height -= 2
+.endm
+
+.macro increment_yuv420p
+ add x11, x11, w3, UXTW // dst2 += padding
+ add x12, x12, w5, UXTW // srcY2 += paddingY
+ add x6, x6, w7, UXTW // srcU += paddingU
+ add x13, x13, w14, UXTW // srcV += paddingV
+ subs w1, w1, #2 // height -= 2
+.endm
+
+.macro increment_yuv422p
+ add x6, x6, w7, UXTW // srcU += paddingU
+ add x13, x13, w14, UXTW // srcV += paddingV
+ subs w1, w1, #1 // height -= 1
+.endm
+
+.macro declare_func ifmt ofmt precision
+function ff_\ifmt\()_to_\ofmt\()_neon_\precision\(), export=1
+ load_args_\ifmt
+1:
+ mov w8, w0 // w8 = width
+2:
+ movi v5.8B, #128
+ process_\ifmt \ofmt \precision
+ subs w8, w8, #16 // width -= 16
+ b.gt 2b
+ add x2, x2, w3, UXTW // dst += padding
+ add x4, x4, w5, UXTW // srcY += paddingY
+ increment_\ifmt
+ b.gt 1b
+ ret
+endfunc
+.endm
+
+.macro declare_rgb_funcs ifmt precision
+ declare_func \ifmt, argb, \precision
+ declare_func \ifmt, rgba, \precision
+ declare_func \ifmt, abgr, \precision
+ declare_func \ifmt, bgra, \precision
+.endm
+
+declare_rgb_funcs nv12, 16
+declare_rgb_funcs nv21, 16
+declare_rgb_funcs nv12, 32
+declare_rgb_funcs nv21, 32
+declare_rgb_funcs yuv420p, 16
+declare_rgb_funcs yuv420p, 32
+declare_rgb_funcs yuv422p, 16
+declare_rgb_funcs yuv422p, 32
diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h
index 1e29ec3..f0bab78 100644
--- a/libswscale/swscale_internal.h
+++ b/libswscale/swscale_internal.h
@@ -877,6 +877,7 @@ extern const AVClass ff_sws_context_class;
void ff_get_unscaled_swscale(SwsContext *c);
void ff_get_unscaled_swscale_ppc(SwsContext *c);
void ff_get_unscaled_swscale_arm(SwsContext *c);
+void ff_get_unscaled_swscale_aarch64(SwsContext *c);
/**
* Return function pointer to fastest main scaler path function depending
diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c
index 74f3467..4bf2447 100644
--- a/libswscale/swscale_unscaled.c
+++ b/libswscale/swscale_unscaled.c
@@ -1776,6 +1776,8 @@ void ff_get_unscaled_swscale(SwsContext *c)
ff_get_unscaled_swscale_ppc(c);
if (ARCH_ARM)
ff_get_unscaled_swscale_arm(c);
+ if (ARCH_AARCH64)
+ ff_get_unscaled_swscale_aarch64(c);
}
/* Convert the palette to the same packed 32-bit format as the palette */
--
2.7.1
More information about the ffmpeg-devel
mailing list