[FFmpeg-devel] [PATCH 2/5] lavc/aarch64: Add neon implementation of vsse16
Hubert Mazur
hum at semihalf.com
Mon Aug 22 18:26:24 EEST 2022
Provide optimized implementation of vsse16 for arm64.
Performance comparison tests are shown below.
- vsse_0_c: 254.4
- vsse_0_neon: 64.7
Benchmarks and tests are run with checkasm tool on AWS Graviton 3.
Signed-off-by: Hubert Mazur <hum at semihalf.com>
---
libavcodec/aarch64/me_cmp_init_aarch64.c | 4 +
libavcodec/aarch64/me_cmp_neon.S | 97 ++++++++++++++++++++++++
2 files changed, 101 insertions(+)
diff --git a/libavcodec/aarch64/me_cmp_init_aarch64.c b/libavcodec/aarch64/me_cmp_init_aarch64.c
index ddc5d05611..7b81e48d16 100644
--- a/libavcodec/aarch64/me_cmp_init_aarch64.c
+++ b/libavcodec/aarch64/me_cmp_init_aarch64.c
@@ -43,6 +43,8 @@ int sse4_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
int vsad16_neon(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h);
+int vsse16_neon(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
+ ptrdiff_t stride, int h);
av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx)
{
@@ -62,5 +64,7 @@ av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx)
c->sse[2] = sse4_neon;
c->vsad[0] = vsad16_neon;
+
+ c->vsse[0] = vsse16_neon;
}
}
diff --git a/libavcodec/aarch64/me_cmp_neon.S b/libavcodec/aarch64/me_cmp_neon.S
index d4c0099854..279bae7cb5 100644
--- a/libavcodec/aarch64/me_cmp_neon.S
+++ b/libavcodec/aarch64/me_cmp_neon.S
@@ -659,3 +659,100 @@ function vsad16_neon, export=1
ret
endfunc
+
+function vsse16_neon, export=1
+ // x0 unused
+ // x1 uint8_t *pix1
+ // x2 uint8_t *pix2
+ // x3 ptrdiff_t stride
+ // w4 int h
+
+ movi v30.4s, #0
+ movi v29.4s, #0
+
+ add x5, x1, x3 // pix1 + stride
+ add x6, x2, x3 // pix2 + stride
+ sub w4, w4, #1 // we need to make h-1 iterations
+ cmp w4, #3 // check if we can make 4 iterations at once
+ b.le 2f
+
+// make 4 iterations at once
+1:
+ // x = abs(pix1[0] - pix2[0] - pix1[0 + stride] + pix2[0 + stride]) =
+ // res = (x) * (x)
+ ld1 {v0.16b}, [x1], x3 // Load pix1[0], first iteration
+ ld1 {v1.16b}, [x2], x3 // Load pix2[0], first iteration
+ ld1 {v2.16b}, [x5], x3 // Load pix1[0 + stride], first iteration
+ usubl v28.8h, v0.8b, v1.8b // Signed difference of pix1[0] - pix2[0], first iteration
+ ld1 {v3.16b}, [x6], x3 // Load pix2[0 + stride], first iteration
+ usubl2 v27.8h, v0.16b, v1.16b // Signed difference of pix1[0] - pix2[0], first iteration
+ usubl v26.8h, v3.8b, v2.8b // Signed difference of pix1[0 + stride] - pix2[0 + stride], first iteration
+ usubl2 v25.8h, v3.16b, v2.16b // Signed difference of pix1[0 + stride] - pix2[0 + stride], first iteration
+ ld1 {v4.16b}, [x1], x3 // Load pix1[0], second iteration
+ sqadd v28.8h, v28.8h, v26.8h // Add first iteration
+ ld1 {v6.16b}, [x5], x3 // Load pix1[0 + stride], second iteration
+ sqadd v27.8h, v27.8h, v25.8h // Add first iteration
+ ld1 {v5.16b}, [x2], x3 // Load pix2[0], second iteration
+ smlal v30.4s, v28.4h, v28.4h // Multiply-accumulate first iteration
+ ld1 {v7.16b}, [x6], x3 // Load pix2[0 + stride], second iteration
+ usubl v26.8h, v4.8b, v5.8b // Signed difference of pix1[0] - pix2[0], second iteration
+ smlal2 v29.4s, v28.8h, v28.8h // Multiply-accumulate first iteration
+ usubl2 v25.8h, v4.16b, v5.16b // Signed difference of pix1[0] - pix2[0], second iteration
+ usubl v24.8h, v7.8b, v6.8b // Signed difference of pix1[0 + stride] - pix2[0 + stride], first iteration
+ smlal v30.4s, v27.4h, v27.4h // Multiply-accumulate first iteration
+ usubl2 v23.8h, v7.16b, v6.16b // Signed difference of pix1[0 + stride] - pix2[0 + stride], first iteration
+ sqadd v24.8h, v26.8h, v24.8h // Add second iteration
+ smlal2 v29.4s, v27.8h, v27.8h // Multiply-accumulate first iteration
+ sqadd v23.8h, v25.8h, v23.8h // Add second iteration
+ ld1 {v18.16b}, [x1], x3 // Load pix1[0], third iteration
+ smlal v30.4s, v24.4h, v24.4h // Multiply-accumulate second iteration
+ ld1 {v31.16b}, [x2], x3 // Load pix2[0], third iteration
+ ld1 {v17.16b}, [x5], x3 // Load pix1[0 + stride], third iteration
+ smlal2 v29.4s, v24.8h, v24.8h // Multiply-accumulate second iteration
+ ld1 {v16.16b}, [x6], x3 // Load pix2[0 + stride], third iteration
+ usubl v22.8h, v18.8b, v31.8b // Signed difference of pix1[0] - pix2[0], third iteration
+ smlal v30.4s, v23.4h, v23.4h // Multiply-accumulate second iteration
+ usubl2 v21.8h, v18.16b, v31.16b // Signed difference of pix1[0] - pix2[0], third iteration
+ usubl v20.8h, v16.8b, v17.8b // Signed difference of pix1[0 + stride] - pix2[0 + stride], first iteration
+ smlal2 v29.4s, v23.8h, v23.8h // Multiply-accumulate second iteration
+ sqadd v20.8h, v22.8h, v20.8h // Add third iteration
+ usubl2 v19.8h, v16.16b, v17.16b // Signed difference of pix1[0 + stride] - pix2[0 + stride], first iteration
+ smlal v30.4s, v20.4h, v20.4h // Multiply-accumulate third iteration
+ sqadd v19.8h, v21.8h, v19.8h // Add third iteration
+ smlal2 v29.4s, v20.8h, v20.8h // Multiply-accumulate third iteration
+ sub w4, w4, #3
+ smlal v30.4s, v19.4h, v19.4h // Multiply-accumulate third iteration
+ cmp w4, #3
+ smlal2 v29.4s, v19.8h, v19.8h // Multiply-accumulate third iteration
+
+ b.ge 1b
+
+ cbz w4, 3f
+
+// iterate by once
+2:
+ ld1 {v0.16b}, [x1], x3
+ ld1 {v1.16b}, [x2], x3
+ ld1 {v2.16b}, [x5], x3
+ usubl v28.8h, v0.8b, v1.8b
+ ld1 {v3.16b}, [x6], x3
+ usubl2 v27.8h, v0.16b, v1.16b
+ usubl v26.8h, v3.8b, v2.8b
+ usubl2 v25.8h, v3.16b, v2.16b
+ sqadd v28.8h, v28.8h, v26.8h
+ sqadd v27.8h, v27.8h, v25.8h
+ smlal v30.4s, v28.4h, v28.4h
+ smlal2 v29.4s, v28.8h, v28.8h
+ subs w4, w4, #1
+ smlal v30.4s, v27.4h, v27.4h
+ smlal2 v29.4s, v27.8h, v27.8h
+
+ b.ne 2b
+
+3:
+ add v30.4s, v30.4s, v29.4s
+ saddlv d17, v30.4s
+ fmov w0, s17
+
+ ret
+endfunc
--
2.34.1
More information about the ffmpeg-devel
mailing list