[FFmpeg-devel] [PATCH 4/4] avcodec/mips: Restructure as per avutil/mips/generic_macros_msa.h
shivraj.patil at imgtec.com
shivraj.patil at imgtec.com
Thu May 28 11:21:24 CEST 2015
From: Shivraj Patil <shivraj.patil at imgtec.com>
This patch modifies HEVC mc MIPS-SIMD optimized code according to improved version of generic macros.
This patch also adds new HEVC header file .libavcodec/mips/hevc_macros_msa.h.
Overall, this patch is just upgrading the code with styling changes and will bring it in sync with MIPS-SIMD optimized latest codebase at our end.
Signed-off-by: Shivraj Patil <shivraj.patil at imgtec.com>
---
libavcodec/mips/hevc_macros_msa.h | 34 +
libavcodec/mips/hevcdsp_msa.c | 2428 +++++++++++++------------------------
2 files changed, 876 insertions(+), 1586 deletions(-)
create mode 100644 libavcodec/mips/hevc_macros_msa.h
diff --git a/libavcodec/mips/hevc_macros_msa.h b/libavcodec/mips/hevc_macros_msa.h
new file mode 100644
index 0000000..418c58f
--- /dev/null
+++ b/libavcodec/mips/hevc_macros_msa.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale at imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MIPS_HEVC_MACROS_MSA_H
+#define AVCODEC_MIPS_HEVC_MACROS_MSA_H
+
+#define HEVC_FILT_8TAP(in0, in1, in2, in3, filt0, filt1, filt2, filt3) \
+( { \
+ v4i32 out_m; \
+ \
+ out_m = __msa_dotp_s_w((v8i16) in0, (v8i16) filt0); \
+ out_m = __msa_dpadd_s_w(out_m, (v8i16) in1, (v8i16) filt1); \
+ DPADD_SH2_SW(in2, in3, filt2, filt3, out_m, out_m); \
+ out_m; \
+} )
+
+#endif AVCODEC_MIPS_HEVC_MACROS_MSA_H
diff --git a/libavcodec/mips/hevcdsp_msa.c b/libavcodec/mips/hevcdsp_msa.c
index fcc344b..1ecef0a 100644
--- a/libavcodec/mips/hevcdsp_msa.c
+++ b/libavcodec/mips/hevcdsp_msa.c
@@ -20,405 +20,181 @@
#include "libavutil/mips/generic_macros_msa.h"
#include "libavcodec/mips/hevcdsp_mips.h"
+#include "libavcodec/mips/hevc_macros_msa.h"
-#define HEVC_FILT_8TAP_DPADD_W(vec0, vec1, vec2, vec3, \
- filt0, filt1, filt2, filt3) \
-( { \
- v4i32 out; \
- \
- out = __msa_dotp_s_w((v8i16) (vec0), (v8i16) (filt0)); \
- out = __msa_dpadd_s_w(out, (v8i16) (vec1), (v8i16) (filt1)); \
- out = __msa_dpadd_s_w(out, (v8i16) (vec2), (v8i16) (filt2)); \
- out = __msa_dpadd_s_w(out, (v8i16) (vec3), (v8i16) (filt3)); \
- out; \
-} )
-
-#define HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3, \
- filt0, filt1, filt2, filt3, \
- var_in) \
-( { \
- v8i16 out; \
- \
- out = __msa_dpadd_s_h((v8i16) (var_in), (v16i8) (vec0), (v16i8) (filt0)); \
- out = __msa_dpadd_s_h(out, (v16i8) (vec1), (v16i8) (filt1)); \
- out = __msa_dpadd_s_h(out, (v16i8) (vec2), (v16i8) (filt2)); \
- out = __msa_dpadd_s_h(out, (v16i8) (vec3), (v16i8) (filt3)); \
- out; \
-} )
-
-static void hevc_copy_4w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
+static void hevc_copy_4w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
int32_t height)
{
v16i8 zero = { 0 };
if (2 == height) {
- uint64_t out0, out1;
v16i8 src0, src1;
- v8i16 input0;
+ v8i16 in0;
- LOAD_2VECS_SB(src, src_stride, src0, src1);
+ LD_SB2(src, src_stride, src0, src1);
src0 = (v16i8) __msa_ilvr_w((v4i32) src1, (v4i32) src0);
-
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
-
- input0 <<= 6;
-
- out0 = __msa_copy_u_d((v2i64) input0, 0);
- out1 = __msa_copy_u_d((v2i64) input0, 1);
-
- STORE_DWORD(dst, out0);
- dst += dst_stride;
- STORE_DWORD(dst, out1);
+ in0 = (v8i16) __msa_ilvr_b(zero, src0);
+ in0 <<= 6;
+ ST8x2_UB(in0, dst, 2 * dst_stride);
} else if (4 == height) {
- uint64_t out0, out1, out2, out3;
v16i8 src0, src1, src2, src3;
- v8i16 input0, input1;
+ v8i16 in0, in1;
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
- src0 = (v16i8) __msa_ilvr_w((v4i32) src1, (v4i32) src0);
- src1 = (v16i8) __msa_ilvr_w((v4i32) src3, (v4i32) src2);
-
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
- input1 = (v8i16) __msa_ilvr_b(zero, src1);
-
- input0 <<= 6;
- input1 <<= 6;
-
- out0 = __msa_copy_u_d((v2i64) input0, 0);
- out1 = __msa_copy_u_d((v2i64) input0, 1);
- out2 = __msa_copy_u_d((v2i64) input1, 0);
- out3 = __msa_copy_u_d((v2i64) input1, 1);
-
- STORE_DWORD(dst, out0);
- dst += dst_stride;
- STORE_DWORD(dst, out1);
- dst += dst_stride;
- STORE_DWORD(dst, out2);
- dst += dst_stride;
- STORE_DWORD(dst, out3);
+ ILVR_W2_SB(src1, src0, src3, src2, src0, src1);
+ ILVR_B2_SH(zero, src0, zero, src1, in0, in1);
+ in0 <<= 6;
+ in1 <<= 6;
+ ST8x4_UB(in0, in1, dst, 2 * dst_stride);
} else if (0 == height % 8) {
v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
- uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
- v8i16 input0, input1, input2, input3;
+ v8i16 in0, in1, in2, in3;
uint32_t loop_cnt;
for (loop_cnt = (height >> 3); loop_cnt--;) {
- LOAD_8VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
+ LD_SB8(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
src += (8 * src_stride);
- src0 = (v16i8) __msa_ilvr_w((v4i32) src1, (v4i32) src0);
- src1 = (v16i8) __msa_ilvr_w((v4i32) src3, (v4i32) src2);
- src2 = (v16i8) __msa_ilvr_w((v4i32) src5, (v4i32) src4);
- src3 = (v16i8) __msa_ilvr_w((v4i32) src7, (v4i32) src6);
-
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
- input1 = (v8i16) __msa_ilvr_b(zero, src1);
- input2 = (v8i16) __msa_ilvr_b(zero, src2);
- input3 = (v8i16) __msa_ilvr_b(zero, src3);
-
- input0 <<= 6;
- input1 <<= 6;
- input2 <<= 6;
- input3 <<= 6;
-
- out0 = __msa_copy_u_d((v2i64) input0, 0);
- out1 = __msa_copy_u_d((v2i64) input0, 1);
- out2 = __msa_copy_u_d((v2i64) input1, 0);
- out3 = __msa_copy_u_d((v2i64) input1, 1);
- out4 = __msa_copy_u_d((v2i64) input2, 0);
- out5 = __msa_copy_u_d((v2i64) input2, 1);
- out6 = __msa_copy_u_d((v2i64) input3, 0);
- out7 = __msa_copy_u_d((v2i64) input3, 1);
-
- STORE_DWORD(dst, out0);
- dst += dst_stride;
- STORE_DWORD(dst, out1);
- dst += dst_stride;
- STORE_DWORD(dst, out2);
- dst += dst_stride;
- STORE_DWORD(dst, out3);
- dst += dst_stride;
- STORE_DWORD(dst, out4);
- dst += dst_stride;
- STORE_DWORD(dst, out5);
- dst += dst_stride;
- STORE_DWORD(dst, out6);
- dst += dst_stride;
- STORE_DWORD(dst, out7);
- dst += dst_stride;
+ ILVR_W4_SB(src1, src0, src3, src2, src5, src4, src7, src6,
+ src0, src1, src2, src3);
+ ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0, in1, in2, in3);
+ SLLI_4V(in0, in1, in2, in3, 6);
+ ST8x8_UB(in0, in1, in2, in3, dst, 2 * dst_stride);
+ dst += (8 * dst_stride);
}
}
}
-static void hevc_copy_6w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
+static void hevc_copy_6w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
int32_t height)
{
uint32_t loop_cnt;
- uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
- uint32_t out8, out9, out10, out11, out12, out13, out14, out15;
v16i8 zero = { 0 };
v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
- v8i16 input0, input1, input2, input3, input4, input5, input6, input7;
+ v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
for (loop_cnt = (height >> 3); loop_cnt--;) {
- LOAD_8VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
src += (8 * src_stride);
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
- input1 = (v8i16) __msa_ilvr_b(zero, src1);
- input2 = (v8i16) __msa_ilvr_b(zero, src2);
- input3 = (v8i16) __msa_ilvr_b(zero, src3);
- input4 = (v8i16) __msa_ilvr_b(zero, src4);
- input5 = (v8i16) __msa_ilvr_b(zero, src5);
- input6 = (v8i16) __msa_ilvr_b(zero, src6);
- input7 = (v8i16) __msa_ilvr_b(zero, src7);
-
- input0 <<= 6;
- input1 <<= 6;
- input2 <<= 6;
- input3 <<= 6;
- input4 <<= 6;
- input5 <<= 6;
- input6 <<= 6;
- input7 <<= 6;
-
- out0 = __msa_copy_u_d((v2i64) input0, 0);
- out1 = __msa_copy_u_d((v2i64) input1, 0);
- out2 = __msa_copy_u_d((v2i64) input2, 0);
- out3 = __msa_copy_u_d((v2i64) input3, 0);
- out4 = __msa_copy_u_d((v2i64) input4, 0);
- out5 = __msa_copy_u_d((v2i64) input5, 0);
- out6 = __msa_copy_u_d((v2i64) input6, 0);
- out7 = __msa_copy_u_d((v2i64) input7, 0);
-
- out8 = __msa_copy_u_w((v4i32) input0, 2);
- out9 = __msa_copy_u_w((v4i32) input1, 2);
- out10 = __msa_copy_u_w((v4i32) input2, 2);
- out11 = __msa_copy_u_w((v4i32) input3, 2);
- out12 = __msa_copy_u_w((v4i32) input4, 2);
- out13 = __msa_copy_u_w((v4i32) input5, 2);
- out14 = __msa_copy_u_w((v4i32) input6, 2);
- out15 = __msa_copy_u_w((v4i32) input7, 2);
-
- STORE_DWORD(dst, out0);
- STORE_WORD(dst + 4, out8);
- dst += dst_stride;
- STORE_DWORD(dst, out1);
- STORE_WORD(dst + 4, out9);
- dst += dst_stride;
- STORE_DWORD(dst, out2);
- STORE_WORD(dst + 4, out10);
- dst += dst_stride;
- STORE_DWORD(dst, out3);
- STORE_WORD(dst + 4, out11);
- dst += dst_stride;
- STORE_DWORD(dst, out4);
- STORE_WORD(dst + 4, out12);
- dst += dst_stride;
- STORE_DWORD(dst, out5);
- STORE_WORD(dst + 4, out13);
- dst += dst_stride;
- STORE_DWORD(dst, out6);
- STORE_WORD(dst + 4, out14);
- dst += dst_stride;
- STORE_DWORD(dst, out7);
- STORE_WORD(dst + 4, out15);
- dst += dst_stride;
+ ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0, in1, in2, in3);
+ ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
+ in4, in5, in6, in7);
+ SLLI_4V(in0, in1, in2, in3, 6);
+ SLLI_4V(in4, in5, in6, in7, 6);
+ ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, dst, 2 * dst_stride);
+ dst += (8 * dst_stride);
}
}
-static void hevc_copy_8w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
+static void hevc_copy_8w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
int32_t height)
{
v16i8 zero = { 0 };
if (2 == height) {
v16i8 src0, src1;
- v8i16 input0, input1;
+ v8i16 in0, in1;
- LOAD_2VECS_SB(src, src_stride, src0, src1);
+ LD_SB2(src, src_stride, src0, src1);
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
- input1 = (v8i16) __msa_ilvr_b(zero, src1);
-
- input0 <<= 6;
- input1 <<= 6;
-
- STORE_2VECS_SH(dst, dst_stride, input0, input1);
+ ILVR_B2_SH(zero, src0, zero, src1, in0, in1);
+ in0 <<= 6;
+ in1 <<= 6;
+ ST_SH2(in0, in1, dst, dst_stride);
} else if (4 == height) {
v16i8 src0, src1, src2, src3;
- v8i16 input0, input1, input2, input3;
-
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+ v8i16 in0, in1, in2, in3;
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
- input1 = (v8i16) __msa_ilvr_b(zero, src1);
- input2 = (v8i16) __msa_ilvr_b(zero, src2);
- input3 = (v8i16) __msa_ilvr_b(zero, src3);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
- input0 <<= 6;
- input1 <<= 6;
- input2 <<= 6;
- input3 <<= 6;
-
- STORE_4VECS_SH(dst, dst_stride, input0, input1, input2, input3);
+ ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0, in1, in2, in3);
+ SLLI_4V(in0, in1, in2, in3, 6);
+ ST_SH4(in0, in1, in2, in3, dst, dst_stride);
} else if (6 == height) {
v16i8 src0, src1, src2, src3, src4, src5;
- v8i16 input0, input1, input2, input3, input4, input5;
-
- LOAD_6VECS_SB(src, src_stride, src0, src1, src2, src3, src4, src5);
-
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
- input1 = (v8i16) __msa_ilvr_b(zero, src1);
- input2 = (v8i16) __msa_ilvr_b(zero, src2);
- input3 = (v8i16) __msa_ilvr_b(zero, src3);
- input4 = (v8i16) __msa_ilvr_b(zero, src4);
- input5 = (v8i16) __msa_ilvr_b(zero, src5);
-
- input0 <<= 6;
- input1 <<= 6;
- input2 <<= 6;
- input3 <<= 6;
- input4 <<= 6;
- input5 <<= 6;
-
- STORE_6VECS_SH(dst, dst_stride,
- input0, input1, input2, input3, input4, input5);
+ v8i16 in0, in1, in2, in3, in4, in5;
+
+ LD_SB6(src, src_stride, src0, src1, src2, src3, src4, src5);
+
+ ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0, in1, in2, in3);
+ ILVR_B2_SH(zero, src4, zero, src5, in4, in5);
+ SLLI_4V(in0, in1, in2, in3, 6);
+ in4 <<= 6;
+ in5 <<= 6;
+ ST_SH6(in0, in1, in2, in3, in4, in5, dst, dst_stride);
} else if (0 == height % 8) {
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
- v8i16 input0, input1, input2, input3;
- v8i16 input4, input5, input6, input7;
+ v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
for (loop_cnt = (height >> 3); loop_cnt--;) {
- LOAD_8VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
+ LD_SB8(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
src += (8 * src_stride);
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
- input1 = (v8i16) __msa_ilvr_b(zero, src1);
- input2 = (v8i16) __msa_ilvr_b(zero, src2);
- input3 = (v8i16) __msa_ilvr_b(zero, src3);
- input4 = (v8i16) __msa_ilvr_b(zero, src4);
- input5 = (v8i16) __msa_ilvr_b(zero, src5);
- input6 = (v8i16) __msa_ilvr_b(zero, src6);
- input7 = (v8i16) __msa_ilvr_b(zero, src7);
-
- input0 <<= 6;
- input1 <<= 6;
- input2 <<= 6;
- input3 <<= 6;
- input4 <<= 6;
- input5 <<= 6;
- input6 <<= 6;
- input7 <<= 6;
-
- STORE_8VECS_SH(dst, dst_stride,
- input0, input1, input2, input3,
- input4, input5, input6, input7);
+ ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0, in1, in2, in3);
+ ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
+ in4, in5, in6, in7);
+ SLLI_4V(in0, in1, in2, in3, 6);
+ SLLI_4V(in4, in5, in6, in7, 6);
+ ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, dst, dst_stride);
dst += (8 * dst_stride);
}
}
}
-static void hevc_copy_12w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
+static void hevc_copy_12w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
int32_t height)
{
uint32_t loop_cnt;
- uint64_t dst_val0, dst_val1, dst_val2, dst_val3;
v16i8 zero = { 0 };
v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
- v8i16 input0, input1;
- v8i16 input0_r, input1_r, input2_r, input3_r;
+ v8i16 in0, in1, in0_r, in1_r, in2_r, in3_r;
for (loop_cnt = (height >> 3); loop_cnt--;) {
- LOAD_8VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
src += (8 * src_stride);
- input0_r = (v8i16) __msa_ilvr_b(zero, src0);
- input1_r = (v8i16) __msa_ilvr_b(zero, src1);
- input2_r = (v8i16) __msa_ilvr_b(zero, src2);
- input3_r = (v8i16) __msa_ilvr_b(zero, src3);
-
- input0_r <<= 6;
- input1_r <<= 6;
- input2_r <<= 6;
- input3_r <<= 6;
-
- src0 = (v16i8) __msa_ilvl_w((v4i32) src1, (v4i32) src0);
- src1 = (v16i8) __msa_ilvl_w((v4i32) src3, (v4i32) src2);
-
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
- input1 = (v8i16) __msa_ilvr_b(zero, src1);
-
- input0 <<= 6;
- input1 <<= 6;
-
- dst_val0 = __msa_copy_u_d((v2i64) input0, 0);
- dst_val1 = __msa_copy_u_d((v2i64) input0, 1);
- dst_val2 = __msa_copy_u_d((v2i64) input1, 0);
- dst_val3 = __msa_copy_u_d((v2i64) input1, 1);
-
- STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
-
- STORE_DWORD(dst + 8, dst_val0);
- dst += dst_stride;
- STORE_DWORD(dst + 8, dst_val1);
- dst += dst_stride;
- STORE_DWORD(dst + 8, dst_val2);
- dst += dst_stride;
- STORE_DWORD(dst + 8, dst_val3);
- dst += dst_stride;
-
- input0_r = (v8i16) __msa_ilvr_b(zero, src4);
- input1_r = (v8i16) __msa_ilvr_b(zero, src5);
- input2_r = (v8i16) __msa_ilvr_b(zero, src6);
- input3_r = (v8i16) __msa_ilvr_b(zero, src7);
-
- input0_r <<= 6;
- input1_r <<= 6;
- input2_r <<= 6;
- input3_r <<= 6;
-
- src0 = (v16i8) __msa_ilvl_w((v4i32) src5, (v4i32) src4);
- src1 = (v16i8) __msa_ilvl_w((v4i32) src7, (v4i32) src6);
-
- input0 = (v8i16) __msa_ilvr_b(zero, src0);
- input1 = (v8i16) __msa_ilvr_b(zero, src1);
-
- input0 <<= 6;
- input1 <<= 6;
-
- dst_val0 = __msa_copy_u_d((v2i64) input0, 0);
- dst_val1 = __msa_copy_u_d((v2i64) input0, 1);
- dst_val2 = __msa_copy_u_d((v2i64) input1, 0);
- dst_val3 = __msa_copy_u_d((v2i64) input1, 1);
-
- STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
+ ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0_r, in1_r, in2_r, in3_r);
+ SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
+ ILVL_W2_SB(src1, src0, src3, src2, src0, src1);
+ ILVR_B2_SH(zero, src0, zero, src1, in0, in1);
+ in0 <<= 6;
+ in1 <<= 6;
+ ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
+ ST8x4_UB(in0, in1, dst + 8, 2 * dst_stride);
+ dst += (4 * dst_stride);
- STORE_DWORD(dst + 8, dst_val0);
- dst += dst_stride;
- STORE_DWORD(dst + 8, dst_val1);
- dst += dst_stride;
- STORE_DWORD(dst + 8, dst_val2);
- dst += dst_stride;
- STORE_DWORD(dst + 8, dst_val3);
- dst += dst_stride;
+ ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
+ in0_r, in1_r, in2_r, in3_r);
+ SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
+ ILVL_W2_SB(src5, src4, src7, src6, src0, src1);
+ ILVR_B2_SH(zero, src0, zero, src1, in0, in1);
+ in0 <<= 6;
+ in1 <<= 6;
+ ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
+ ST8x4_UB(in0, in1, dst + 8, 2 * dst_stride);
+ dst += (4 * dst_stride);
}
}
-static void hevc_copy_16multx8mult_msa(uint8_t * __restrict src,
+static void hevc_copy_16multx8mult_msa(uint8_t *src,
int32_t src_stride,
- int16_t * __restrict dst,
+ int16_t *dst,
int32_t dst_stride,
int32_t height,
int32_t width)
@@ -428,64 +204,36 @@ static void hevc_copy_16multx8mult_msa(uint8_t * __restrict src,
uint32_t loop_cnt, cnt;
v16i8 zero = { 0 };
v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
- v8i16 input0_r, input1_r, input2_r, input3_r;
- v8i16 input0_l, input1_l, input2_l, input3_l;
+ v8i16 in0_r, in1_r, in2_r, in3_r;
+ v8i16 in0_l, in1_l, in2_l, in3_l;
for (cnt = (width >> 4); cnt--;) {
src_tmp = src;
dst_tmp = dst;
for (loop_cnt = (height >> 3); loop_cnt--;) {
- LOAD_8VECS_SB(src_tmp, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
+ LD_SB8(src_tmp, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
src_tmp += (8 * src_stride);
- input0_r = (v8i16) __msa_ilvr_b(zero, src0);
- input0_l = (v8i16) __msa_ilvl_b(zero, src0);
- input1_r = (v8i16) __msa_ilvr_b(zero, src1);
- input1_l = (v8i16) __msa_ilvl_b(zero, src1);
- input2_r = (v8i16) __msa_ilvr_b(zero, src2);
- input2_l = (v8i16) __msa_ilvl_b(zero, src2);
- input3_r = (v8i16) __msa_ilvr_b(zero, src3);
- input3_l = (v8i16) __msa_ilvl_b(zero, src3);
-
- input0_r <<= 6;
- input0_l <<= 6;
- input1_r <<= 6;
- input1_l <<= 6;
- input2_r <<= 6;
- input2_l <<= 6;
- input3_r <<= 6;
- input3_l <<= 6;
-
- STORE_4VECS_SH(dst_tmp, dst_stride,
- input0_r, input1_r, input2_r, input3_r);
- STORE_4VECS_SH((dst_tmp + 8), dst_stride,
- input0_l, input1_l, input2_l, input3_l);
+ ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0_r, in1_r, in2_r, in3_r);
+ ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0_l, in1_l, in2_l, in3_l);
+ SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
+ SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
+ ST_SH4(in0_r, in1_r, in2_r, in3_r, dst_tmp, dst_stride);
+ ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst_tmp + 8), dst_stride);
dst_tmp += (4 * dst_stride);
- input0_r = (v8i16) __msa_ilvr_b(zero, src4);
- input0_l = (v8i16) __msa_ilvl_b(zero, src4);
- input1_r = (v8i16) __msa_ilvr_b(zero, src5);
- input1_l = (v8i16) __msa_ilvl_b(zero, src5);
- input2_r = (v8i16) __msa_ilvr_b(zero, src6);
- input2_l = (v8i16) __msa_ilvl_b(zero, src6);
- input3_r = (v8i16) __msa_ilvr_b(zero, src7);
- input3_l = (v8i16) __msa_ilvl_b(zero, src7);
-
- input0_r <<= 6;
- input0_l <<= 6;
- input1_r <<= 6;
- input1_l <<= 6;
- input2_r <<= 6;
- input2_l <<= 6;
- input3_r <<= 6;
- input3_l <<= 6;
-
- STORE_4VECS_SH(dst_tmp, dst_stride,
- input0_r, input1_r, input2_r, input3_r);
- STORE_4VECS_SH((dst_tmp + 8), dst_stride,
- input0_l, input1_l, input2_l, input3_l);
+ ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
+ in0_r, in1_r, in2_r, in3_r);
+ ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
+ in0_l, in1_l, in2_l, in3_l);
+ SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
+ SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
+ ST_SH4(in0_r, in1_r, in2_r, in3_r, dst_tmp, dst_stride);
+ ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst_tmp + 8), dst_stride);
dst_tmp += (4 * dst_stride);
}
@@ -494,459 +242,298 @@ static void hevc_copy_16multx8mult_msa(uint8_t * __restrict src,
}
}
-static void hevc_copy_16w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
+static void hevc_copy_16w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
int32_t height)
{
v16i8 zero = { 0 };
if (4 == height) {
v16i8 src0, src1, src2, src3;
- v8i16 input0_r, input1_r, input2_r, input3_r;
- v8i16 input0_l, input1_l, input2_l, input3_l;
-
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
-
- input0_r = (v8i16) __msa_ilvr_b(zero, src0);
- input0_l = (v8i16) __msa_ilvl_b(zero, src0);
- input1_r = (v8i16) __msa_ilvr_b(zero, src1);
- input1_l = (v8i16) __msa_ilvl_b(zero, src1);
- input2_r = (v8i16) __msa_ilvr_b(zero, src2);
- input2_l = (v8i16) __msa_ilvl_b(zero, src2);
- input3_r = (v8i16) __msa_ilvr_b(zero, src3);
- input3_l = (v8i16) __msa_ilvl_b(zero, src3);
-
- input0_r <<= 6;
- input0_l <<= 6;
- input1_r <<= 6;
- input1_l <<= 6;
- input2_r <<= 6;
- input2_l <<= 6;
- input3_r <<= 6;
- input3_l <<= 6;
-
- STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
- STORE_4VECS_SH((dst + 8), dst_stride,
- input0_l, input1_l, input2_l, input3_l);
+ v8i16 in0_r, in1_r, in2_r, in3_r;
+ v8i16 in0_l, in1_l, in2_l, in3_l;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+
+ ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0_r, in1_r, in2_r, in3_r);
+ ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0_l, in1_l, in2_l, in3_l);
+ SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
+ SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
+ ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
+ ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
} else if (12 == height) {
v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
v16i8 src8, src9, src10, src11;
- v8i16 input0_r, input1_r, input2_r, input3_r;
- v8i16 input0_l, input1_l, input2_l, input3_l;
+ v8i16 in0_r, in1_r, in2_r, in3_r;
+ v8i16 in0_l, in1_l, in2_l, in3_l;
- LOAD_8VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
src += (8 * src_stride);
-
- LOAD_4VECS_SB(src, src_stride, src8, src9, src10, src11);
-
- input0_r = (v8i16) __msa_ilvr_b(zero, src0);
- input0_l = (v8i16) __msa_ilvl_b(zero, src0);
- input1_r = (v8i16) __msa_ilvr_b(zero, src1);
- input1_l = (v8i16) __msa_ilvl_b(zero, src1);
- input2_r = (v8i16) __msa_ilvr_b(zero, src2);
- input2_l = (v8i16) __msa_ilvl_b(zero, src2);
- input3_r = (v8i16) __msa_ilvr_b(zero, src3);
- input3_l = (v8i16) __msa_ilvl_b(zero, src3);
-
- input0_r <<= 6;
- input0_l <<= 6;
- input1_r <<= 6;
- input1_l <<= 6;
- input2_r <<= 6;
- input2_l <<= 6;
- input3_r <<= 6;
- input3_l <<= 6;
-
- STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
- STORE_4VECS_SH((dst + 8), dst_stride,
- input0_l, input1_l, input2_l, input3_l);
+ LD_SB4(src, src_stride, src8, src9, src10, src11);
+
+ ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0_r, in1_r, in2_r, in3_r);
+ ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
+ in0_l, in1_l, in2_l, in3_l);
+ SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
+ SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
+ ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
+ ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
dst += (4 * dst_stride);
- input0_r = (v8i16) __msa_ilvr_b(zero, src4);
- input0_l = (v8i16) __msa_ilvl_b(zero, src4);
- input1_r = (v8i16) __msa_ilvr_b(zero, src5);
- input1_l = (v8i16) __msa_ilvl_b(zero, src5);
- input2_r = (v8i16) __msa_ilvr_b(zero, src6);
- input2_l = (v8i16) __msa_ilvl_b(zero, src6);
- input3_r = (v8i16) __msa_ilvr_b(zero, src7);
- input3_l = (v8i16) __msa_ilvl_b(zero, src7);
-
- input0_r <<= 6;
- input0_l <<= 6;
- input1_r <<= 6;
- input1_l <<= 6;
- input2_r <<= 6;
- input2_l <<= 6;
- input3_r <<= 6;
- input3_l <<= 6;
-
- STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
- STORE_4VECS_SH((dst + 8), dst_stride,
- input0_l, input1_l, input2_l, input3_l);
+ ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
+ in0_r, in1_r, in2_r, in3_r);
+ ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
+ in0_l, in1_l, in2_l, in3_l);
+ SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
+ SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
+ ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
+ ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
dst += (4 * dst_stride);
- input0_r = (v8i16) __msa_ilvr_b(zero, src8);
- input0_l = (v8i16) __msa_ilvl_b(zero, src8);
- input1_r = (v8i16) __msa_ilvr_b(zero, src9);
- input1_l = (v8i16) __msa_ilvl_b(zero, src9);
- input2_r = (v8i16) __msa_ilvr_b(zero, src10);
- input2_l = (v8i16) __msa_ilvl_b(zero, src10);
- input3_r = (v8i16) __msa_ilvr_b(zero, src11);
- input3_l = (v8i16) __msa_ilvl_b(zero, src11);
-
- input0_r <<= 6;
- input0_l <<= 6;
- input1_r <<= 6;
- input1_l <<= 6;
- input2_r <<= 6;
- input2_l <<= 6;
- input3_r <<= 6;
- input3_l <<= 6;
-
- STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
- STORE_4VECS_SH((dst + 8), dst_stride,
- input0_l, input1_l, input2_l, input3_l);
+ ILVR_B4_SH(zero, src8, zero, src9, zero, src10, zero, src11,
+ in0_r, in1_r, in2_r, in3_r);
+ ILVL_B4_SH(zero, src8, zero, src9, zero, src10, zero, src11,
+ in0_l, in1_l, in2_l, in3_l);
+ SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
+ SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
+ ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
+ ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
} else if (0 == (height % 8)) {
hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride,
height, 16);
}
}
-static void hevc_copy_24w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
+static void hevc_copy_24w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
int32_t height)
{
hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16);
-
hevc_copy_8w_msa(src + 16, src_stride, dst + 16, dst_stride, height);
}
-static void hevc_copy_32w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
+static void hevc_copy_32w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
int32_t height)
{
hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 32);
}
-static void hevc_copy_48w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
+static void hevc_copy_48w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
int32_t height)
{
hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 48);
}
-static void hevc_copy_64w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
+static void hevc_copy_64w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
int32_t height)
{
hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64);
}
-static void hevc_hz_8t_4w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_hz_8t_4w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
uint32_t loop_cnt;
- uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
v8i16 filt0, filt1, filt2, filt3;
v16i8 mask1, mask2, mask3;
- v8u16 const_vec;
v16i8 vec0, vec1, vec2, vec3;
v8i16 dst0, dst1, dst2, dst3;
- v8i16 filter_vec;
+ v8i16 filter_vec, const_vec;
v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20 };
src -= 3;
-
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = (height >> 3); loop_cnt--;) {
- LOAD_8VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
src += (8 * src_stride);
-
- XORI_B_8VECS_SB(src0, src1, src2, src3, src4, src5, src6, src7,
- src0, src1, src2, src3, src4, src5, src6, src7, 128);
-
- vec0 = __msa_vshf_b(mask0, src1, src0);
- vec1 = __msa_vshf_b(mask1, src1, src0);
- vec2 = __msa_vshf_b(mask2, src1, src0);
- vec3 = __msa_vshf_b(mask3, src1, src0);
-
- dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src3, src2);
- vec1 = __msa_vshf_b(mask1, src3, src2);
- vec2 = __msa_vshf_b(mask2, src3, src2);
- vec3 = __msa_vshf_b(mask3, src3, src2);
-
- dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src5, src4);
- vec1 = __msa_vshf_b(mask1, src5, src4);
- vec2 = __msa_vshf_b(mask2, src5, src4);
- vec3 = __msa_vshf_b(mask3, src5, src4);
-
- dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src7, src6);
- vec1 = __msa_vshf_b(mask1, src7, src6);
- vec2 = __msa_vshf_b(mask2, src7, src6);
- vec3 = __msa_vshf_b(mask3, src7, src6);
-
- dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- out0 = __msa_copy_u_d((v2i64) dst0, 0);
- out1 = __msa_copy_u_d((v2i64) dst0, 1);
- out2 = __msa_copy_u_d((v2i64) dst1, 0);
- out3 = __msa_copy_u_d((v2i64) dst1, 1);
- out4 = __msa_copy_u_d((v2i64) dst2, 0);
- out5 = __msa_copy_u_d((v2i64) dst2, 1);
- out6 = __msa_copy_u_d((v2i64) dst3, 0);
- out7 = __msa_copy_u_d((v2i64) dst3, 1);
-
- STORE_DWORD(dst, out0);
- dst += dst_stride;
- STORE_DWORD(dst, out1);
- dst += dst_stride;
- STORE_DWORD(dst, out2);
- dst += dst_stride;
- STORE_DWORD(dst, out3);
- dst += dst_stride;
- STORE_DWORD(dst, out4);
- dst += dst_stride;
- STORE_DWORD(dst, out5);
- dst += dst_stride;
- STORE_DWORD(dst, out6);
- dst += dst_stride;
- STORE_DWORD(dst, out7);
- dst += dst_stride;
+ XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
+
+ VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst0 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst0, dst0, dst0, dst0);
+ VSHF_B4_SB(src2, src3, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst1 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst1, dst1, dst1, dst1);
+ VSHF_B4_SB(src4, src5, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst2 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst2, dst2, dst2, dst2);
+ VSHF_B4_SB(src6, src7, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst3 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst3, dst3, dst3, dst3);
+
+ ST8x8_UB(dst0, dst1, dst2, dst3, dst, 2 * dst_stride);
+ dst += (8 * dst_stride);
}
}
-static void hevc_hz_8t_8w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_hz_8t_8w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3;
v8i16 filt0, filt1, filt2, filt3;
v16i8 mask1, mask2, mask3;
- v8u16 const_vec;
v16i8 vec0, vec1, vec2, vec3;
v8i16 dst0, dst1, dst2, dst3;
- v8i16 filter_vec;
+ v8i16 filter_vec, const_vec;
v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
src -= 3;
-
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
- vec0 = __msa_vshf_b(mask0, src0, src0);
- vec1 = __msa_vshf_b(mask1, src0, src0);
- vec2 = __msa_vshf_b(mask2, src0, src0);
- vec3 = __msa_vshf_b(mask3, src0, src0);
-
- dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src1, src1);
- vec1 = __msa_vshf_b(mask1, src1, src1);
- vec2 = __msa_vshf_b(mask2, src1, src1);
- vec3 = __msa_vshf_b(mask3, src1, src1);
-
- dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src2, src2);
- vec1 = __msa_vshf_b(mask1, src2, src2);
- vec2 = __msa_vshf_b(mask2, src2, src2);
- vec3 = __msa_vshf_b(mask3, src2, src2);
-
- dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src3, src3);
- vec1 = __msa_vshf_b(mask1, src3, src3);
- vec2 = __msa_vshf_b(mask2, src3, src3);
- vec3 = __msa_vshf_b(mask3, src3, src3);
-
- dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst0, dst);
- dst += dst_stride;
- STORE_SH(dst1, dst);
- dst += dst_stride;
- STORE_SH(dst2, dst);
- dst += dst_stride;
- STORE_SH(dst3, dst);
- dst += dst_stride;
+ XORI_B4_128_SB(src0, src1, src2, src3);
+
+ VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst0 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst0, dst0, dst0, dst0);
+ VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst1 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst1, dst1, dst1, dst1);
+ VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst2 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst2, dst2, dst2, dst2);
+ VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst3 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst3, dst3, dst3, dst3);
+
+ ST_SH4(dst0, dst1, dst2, dst3, dst, dst_stride);
+ dst += (4 * dst_stride);
}
}
-static void hevc_hz_8t_12w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_hz_8t_12w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
hevc_hz_8t_8w_msa(src, src_stride, dst, dst_stride, filter, height);
-
hevc_hz_8t_4w_msa(src + 8, src_stride, dst + 8, dst_stride, filter, height);
}
-static void hevc_hz_8t_16w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_hz_8t_16w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
v8i16 filt0, filt1, filt2, filt3;
v16i8 mask1, mask2, mask3;
- v8u16 const_vec;
v16i8 vec0, vec1, vec2, vec3;
v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
- v8i16 filter_vec;
+ v8i16 filter_vec, const_vec;
v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
src -= 3;
-
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_SB(src, src_stride, src0, src2, src4, src6);
- LOAD_4VECS_SB(src + 8, src_stride, src1, src3, src5, src7);
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
src += (4 * src_stride);
-
- XORI_B_8VECS_SB(src0, src1, src2, src3, src4, src5, src6, src7,
- src0, src1, src2, src3, src4, src5, src6, src7, 128);
-
- vec0 = __msa_vshf_b(mask0, src0, src0);
- vec1 = __msa_vshf_b(mask1, src0, src0);
- vec2 = __msa_vshf_b(mask2, src0, src0);
- vec3 = __msa_vshf_b(mask3, src0, src0);
-
- dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src1, src1);
- vec1 = __msa_vshf_b(mask1, src1, src1);
- vec2 = __msa_vshf_b(mask2, src1, src1);
- vec3 = __msa_vshf_b(mask3, src1, src1);
-
- dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src2, src2);
- vec1 = __msa_vshf_b(mask1, src2, src2);
- vec2 = __msa_vshf_b(mask2, src2, src2);
- vec3 = __msa_vshf_b(mask3, src2, src2);
-
- dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src3, src3);
- vec1 = __msa_vshf_b(mask1, src3, src3);
- vec2 = __msa_vshf_b(mask2, src3, src3);
- vec3 = __msa_vshf_b(mask3, src3, src3);
-
- dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src4, src4);
- vec1 = __msa_vshf_b(mask1, src4, src4);
- vec2 = __msa_vshf_b(mask2, src4, src4);
- vec3 = __msa_vshf_b(mask3, src4, src4);
-
- dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src5, src5);
- vec1 = __msa_vshf_b(mask1, src5, src5);
- vec2 = __msa_vshf_b(mask2, src5, src5);
- vec3 = __msa_vshf_b(mask3, src5, src5);
-
- dst5 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src6, src6);
- vec1 = __msa_vshf_b(mask1, src6, src6);
- vec2 = __msa_vshf_b(mask2, src6, src6);
- vec3 = __msa_vshf_b(mask3, src6, src6);
-
- dst6 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src7, src7);
- vec1 = __msa_vshf_b(mask1, src7, src7);
- vec2 = __msa_vshf_b(mask2, src7, src7);
- vec3 = __msa_vshf_b(mask3, src7, src7);
-
- dst7 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst0, dst);
- STORE_SH(dst1, dst + 8);
- dst += dst_stride;
- STORE_SH(dst2, dst);
- STORE_SH(dst3, dst + 8);
- dst += dst_stride;
- STORE_SH(dst4, dst);
- STORE_SH(dst5, dst + 8);
- dst += dst_stride;
- STORE_SH(dst6, dst);
- STORE_SH(dst7, dst + 8);
- dst += dst_stride;
+ XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
+
+ VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst0 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst0, dst0, dst0, dst0);
+ VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst1 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst1, dst1, dst1, dst1);
+ VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst2 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst2, dst2, dst2, dst2);
+ VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst3 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst3, dst3, dst3, dst3);
+ VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst4 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst4, dst4, dst4, dst4);
+ VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst5 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst5, dst5, dst5, dst5);
+ VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst6 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst6, dst6, dst6, dst6);
+ VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst7 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst7, dst7, dst7, dst7);
+
+ ST_SH4(dst0, dst2, dst4, dst6, dst, dst_stride);
+ ST_SH4(dst1, dst3, dst5, dst7, dst + 8, dst_stride);
+ dst += (4 * dst_stride);
}
}
-static void hevc_hz_8t_24w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_hz_8t_24w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3;
@@ -954,17 +541,12 @@ static void hevc_hz_8t_24w_msa(uint8_t * __restrict src, int32_t src_stride,
v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
v16i8 vec0, vec1, vec2, vec3;
v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
- v8i16 filter_vec;
- v8u16 const_vec;
+ v8i16 filter_vec, const_vec;
v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
src -= 3;
-
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
@@ -974,81 +556,59 @@ static void hevc_hz_8t_24w_msa(uint8_t * __restrict src, int32_t src_stride,
mask6 = mask0 + 12;
mask7 = mask0 + 14;
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
for (loop_cnt = (height >> 1); loop_cnt--;) {
- src0 = LOAD_SB(src);
- src1 = LOAD_SB(src + 16);
+ LD_SB2(src, 16, src0, src1);
src += src_stride;
- src2 = LOAD_SB(src);
- src3 = LOAD_SB(src + 16);
+ LD_SB2(src, 16, src2, src3);
src += src_stride;
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
- vec0 = __msa_vshf_b(mask0, src0, src0);
- vec1 = __msa_vshf_b(mask1, src0, src0);
- vec2 = __msa_vshf_b(mask2, src0, src0);
- vec3 = __msa_vshf_b(mask3, src0, src0);
-
- dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask4, src1, src0);
- vec1 = __msa_vshf_b(mask5, src1, src0);
- vec2 = __msa_vshf_b(mask6, src1, src0);
- vec3 = __msa_vshf_b(mask7, src1, src0);
-
- dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src1, src1);
- vec1 = __msa_vshf_b(mask1, src1, src1);
- vec2 = __msa_vshf_b(mask2, src1, src1);
- vec3 = __msa_vshf_b(mask3, src1, src1);
-
- dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src2, src2);
- vec1 = __msa_vshf_b(mask1, src2, src2);
- vec2 = __msa_vshf_b(mask2, src2, src2);
- vec3 = __msa_vshf_b(mask3, src2, src2);
-
- dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask4, src3, src2);
- vec1 = __msa_vshf_b(mask5, src3, src2);
- vec2 = __msa_vshf_b(mask6, src3, src2);
- vec3 = __msa_vshf_b(mask7, src3, src2);
-
- dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src3, src3);
- vec1 = __msa_vshf_b(mask1, src3, src3);
- vec2 = __msa_vshf_b(mask2, src3, src3);
- vec3 = __msa_vshf_b(mask3, src3, src3);
-
- dst5 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst0, dst);
- STORE_SH(dst1, dst + 8);
- STORE_SH(dst2, dst + 16);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+
+ VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst0 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst0, dst0, dst0, dst0);
+ VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
+ vec0, vec1, vec2, vec3);
+ dst1 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst1, dst1, dst1, dst1);
+ VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst2 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst2, dst2, dst2, dst2);
+ VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst3 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst3, dst3, dst3, dst3);
+ VSHF_B4_SB(src2, src3, mask4, mask5, mask6, mask7,
+ vec0, vec1, vec2, vec3);
+ dst4 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst4, dst4, dst4, dst4);
+ VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst5 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst5, dst5, dst5, dst5);
+
+ ST_SH2(dst0, dst1, dst, 8);
+ ST_SH(dst2, dst + 16);
dst += dst_stride;
- STORE_SH(dst3, dst);
- STORE_SH(dst4, dst + 8);
- STORE_SH(dst5, dst + 16);
+ ST_SH2(dst3, dst4, dst, 8);
+ ST_SH(dst5, dst + 16);
dst += dst_stride;
}
}
-static void hevc_hz_8t_32w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_hz_8t_32w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
uint32_t loop_cnt;
v16i8 src0, src1, src2;
@@ -1056,17 +616,12 @@ static void hevc_hz_8t_32w_msa(uint8_t * __restrict src, int32_t src_stride,
v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
v16i8 vec0, vec1, vec2, vec3;
v8i16 dst0, dst1, dst2, dst3;
- v8i16 filter_vec;
- v8u16 const_vec;
+ v8i16 filter_vec, const_vec;
v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
src -= 3;
-
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
@@ -1076,60 +631,44 @@ static void hevc_hz_8t_32w_msa(uint8_t * __restrict src, int32_t src_stride,
mask6 = mask0 + 12;
mask7 = mask0 + 14;
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
for (loop_cnt = height; loop_cnt--;) {
- src0 = LOAD_SB(src);
- src1 = LOAD_SB(src + 16);
- src2 = LOAD_SB(src + 24);
+ LD_SB2(src, 16, src0, src1);
+ src2 = LD_SB(src + 24);
src += src_stride;
-
- XORI_B_3VECS_SB(src0, src1, src2, src0, src1, src2, 128);
-
- vec0 = __msa_vshf_b(mask0, src0, src0);
- vec1 = __msa_vshf_b(mask1, src0, src0);
- vec2 = __msa_vshf_b(mask2, src0, src0);
- vec3 = __msa_vshf_b(mask3, src0, src0);
-
- dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask4, src1, src0);
- vec1 = __msa_vshf_b(mask5, src1, src0);
- vec2 = __msa_vshf_b(mask6, src1, src0);
- vec3 = __msa_vshf_b(mask7, src1, src0);
-
- dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src1, src1);
- vec1 = __msa_vshf_b(mask1, src1, src1);
- vec2 = __msa_vshf_b(mask2, src1, src1);
- vec3 = __msa_vshf_b(mask3, src1, src1);
-
- dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src2, src2);
- vec1 = __msa_vshf_b(mask1, src2, src2);
- vec2 = __msa_vshf_b(mask2, src2, src2);
- vec3 = __msa_vshf_b(mask3, src2, src2);
-
- dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst0, dst);
- STORE_SH(dst1, dst + 8);
- STORE_SH(dst2, dst + 16);
- STORE_SH(dst3, dst + 24);
+ XORI_B3_128_SB(src0, src1, src2);
+
+ VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst0 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst0, dst0, dst0, dst0);
+ VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
+ vec0, vec1, vec2, vec3);
+ dst1 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst1, dst1, dst1, dst1);
+ VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst2 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst2, dst2, dst2, dst2);
+ VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst3 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst3, dst3, dst3, dst3);
+
+ ST_SH4(dst0, dst1, dst2, dst3, dst, 8);
dst += dst_stride;
}
}
-static void hevc_hz_8t_48w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_hz_8t_48w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3;
@@ -1137,17 +676,12 @@ static void hevc_hz_8t_48w_msa(uint8_t * __restrict src, int32_t src_stride,
v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
v16i8 vec0, vec1, vec2, vec3;
v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
- v8i16 filter_vec;
- v8u16 const_vec;
+ v8i16 filter_vec, const_vec;
v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
src -= 3;
-
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
@@ -1157,79 +691,54 @@ static void hevc_hz_8t_48w_msa(uint8_t * __restrict src, int32_t src_stride,
mask6 = mask0 + 12;
mask7 = mask0 + 14;
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
for (loop_cnt = height; loop_cnt--;) {
- src0 = LOAD_SB(src);
- src1 = LOAD_SB(src + 16);
- src2 = LOAD_SB(src + 32);
- src3 = LOAD_SB(src + 40);
+ LD_SB3(src, 16, src0, src1, src2);
+ src3 = LD_SB(src + 40);
src += src_stride;
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
- vec0 = __msa_vshf_b(mask0, src0, src0);
- vec1 = __msa_vshf_b(mask1, src0, src0);
- vec2 = __msa_vshf_b(mask2, src0, src0);
- vec3 = __msa_vshf_b(mask3, src0, src0);
-
- dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask4, src1, src0);
- vec1 = __msa_vshf_b(mask5, src1, src0);
- vec2 = __msa_vshf_b(mask6, src1, src0);
- vec3 = __msa_vshf_b(mask7, src1, src0);
-
- dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src1, src1);
- vec1 = __msa_vshf_b(mask1, src1, src1);
- vec2 = __msa_vshf_b(mask2, src1, src1);
- vec3 = __msa_vshf_b(mask3, src1, src1);
-
- dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask4, src2, src1);
- vec1 = __msa_vshf_b(mask5, src2, src1);
- vec2 = __msa_vshf_b(mask6, src2, src1);
- vec3 = __msa_vshf_b(mask7, src2, src1);
-
- dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src2, src2);
- vec1 = __msa_vshf_b(mask1, src2, src2);
- vec2 = __msa_vshf_b(mask2, src2, src2);
- vec3 = __msa_vshf_b(mask3, src2, src2);
-
- dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- vec0 = __msa_vshf_b(mask0, src3, src3);
- vec1 = __msa_vshf_b(mask1, src3, src3);
- vec2 = __msa_vshf_b(mask2, src3, src3);
- vec3 = __msa_vshf_b(mask3, src3, src3);
-
- dst5 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst0, dst);
- STORE_SH(dst1, dst + 8);
- STORE_SH(dst2, dst + 16);
- STORE_SH(dst3, dst + 24);
- STORE_SH(dst4, dst + 32);
- STORE_SH(dst5, dst + 40);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+
+ VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst0 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst0, dst0, dst0, dst0);
+ VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
+ vec0, vec1, vec2, vec3);
+ dst1 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst1, dst1, dst1, dst1);
+ VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst2 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst2, dst2, dst2, dst2);
+ VSHF_B4_SB(src1, src2, mask4, mask5, mask6, mask7,
+ vec0, vec1, vec2, vec3);
+ dst3 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst3, dst3, dst3, dst3);
+ VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst4 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst4, dst4, dst4, dst4);
+ VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst5 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst5, dst5, dst5, dst5);
+
+ ST_SH6(dst0, dst1, dst2, dst3, dst4, dst5, dst, 8);
dst += dst_stride;
}
}
-static void hevc_hz_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_hz_8t_64w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4;
@@ -1237,17 +746,13 @@ static void hevc_hz_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
v16i8 vec0, vec1, vec2, vec3;
v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
- v8i16 filter_vec;
- v8u16 const_vec;
+ v8i16 filter_vec, const_vec;
v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
src -= 3;
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
@@ -1257,110 +762,79 @@ static void hevc_hz_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
mask6 = mask0 + 12;
mask7 = mask0 + 14;
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
for (loop_cnt = height; loop_cnt--;) {
- src0 = LOAD_SB(src);
- src1 = LOAD_SB(src + 16);
- src2 = LOAD_SB(src + 32);
- src3 = LOAD_SB(src + 48);
- src4 = LOAD_SB(src + 56);
+ LD_SB4(src, 16, src0, src1, src2, src3);
+ src4 = LD_SB(src + 56);
src += src_stride;
-
- XORI_B_5VECS_SB(src0, src1, src2, src3, src4,
- src0, src1, src2, src3, src4, 128);
-
- vec0 = __msa_vshf_b(mask0, src0, src0);
- vec1 = __msa_vshf_b(mask1, src0, src0);
- vec2 = __msa_vshf_b(mask2, src0, src0);
- vec3 = __msa_vshf_b(mask3, src0, src0);
-
- dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst0, dst);
-
- vec0 = __msa_vshf_b(mask4, src1, src0);
- vec1 = __msa_vshf_b(mask5, src1, src0);
- vec2 = __msa_vshf_b(mask6, src1, src0);
- vec3 = __msa_vshf_b(mask7, src1, src0);
-
- dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst1, dst + 8);
-
- vec0 = __msa_vshf_b(mask0, src1, src1);
- vec1 = __msa_vshf_b(mask1, src1, src1);
- vec2 = __msa_vshf_b(mask2, src1, src1);
- vec3 = __msa_vshf_b(mask3, src1, src1);
-
- dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst2, dst + 16);
-
- vec0 = __msa_vshf_b(mask4, src2, src1);
- vec1 = __msa_vshf_b(mask5, src2, src1);
- vec2 = __msa_vshf_b(mask6, src2, src1);
- vec3 = __msa_vshf_b(mask7, src2, src1);
-
- dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst3, dst + 24);
-
- vec0 = __msa_vshf_b(mask0, src2, src2);
- vec1 = __msa_vshf_b(mask1, src2, src2);
- vec2 = __msa_vshf_b(mask2, src2, src2);
- vec3 = __msa_vshf_b(mask3, src2, src2);
-
- dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst4, dst + 32);
-
- vec0 = __msa_vshf_b(mask4, src3, src2);
- vec1 = __msa_vshf_b(mask5, src3, src2);
- vec2 = __msa_vshf_b(mask6, src3, src2);
- vec3 = __msa_vshf_b(mask7, src3, src2);
-
- dst5 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst5, dst + 40);
-
- vec0 = __msa_vshf_b(mask0, src3, src3);
- vec1 = __msa_vshf_b(mask1, src3, src3);
- vec2 = __msa_vshf_b(mask2, src3, src3);
- vec3 = __msa_vshf_b(mask3, src3, src3);
-
- dst6 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst6, dst + 48);
-
- vec0 = __msa_vshf_b(mask0, src4, src4);
- vec1 = __msa_vshf_b(mask1, src4, src4);
- vec2 = __msa_vshf_b(mask2, src4, src4);
- vec3 = __msa_vshf_b(mask3, src4, src4);
-
- dst7 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst7, dst + 56);
-
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+
+ VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst0 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst0, dst0, dst0, dst0);
+ ST_SH(dst0, dst);
+
+ VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
+ vec0, vec1, vec2, vec3);
+ dst1 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst1, dst1, dst1, dst1);
+ ST_SH(dst1, dst + 8);
+
+ VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst2 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst2, dst2, dst2, dst2);
+ ST_SH(dst2, dst + 16);
+
+ VSHF_B4_SB(src1, src2, mask4, mask5, mask6, mask7,
+ vec0, vec1, vec2, vec3);
+ dst3 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst3, dst3, dst3, dst3);
+ ST_SH(dst3, dst + 24);
+
+ VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst4 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst4, dst4, dst4, dst4);
+ ST_SH(dst4, dst + 32);
+
+ VSHF_B4_SB(src2, src3, mask4, mask5, mask6, mask7,
+ vec0, vec1, vec2, vec3);
+ dst5 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst5, dst5, dst5, dst5);
+ ST_SH(dst5, dst + 40);
+
+ VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst6 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst6, dst6, dst6, dst6);
+ ST_SH(dst6, dst + 48);
+
+ VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst7 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst7, dst7, dst7, dst7);
+ ST_SH(dst7, dst + 56);
dst += dst_stride;
}
}
-static void hevc_vt_8t_4w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_vt_8t_4w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
int32_t loop_cnt;
- uint64_t out0, out1, out2, out3;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16i8 src9, src10, src11, src12, src13, src14;
v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
@@ -1369,183 +843,131 @@ static void hevc_vt_8t_4w_msa(uint8_t * __restrict src, int32_t src_stride,
v16i8 src2110, src4332, src6554, src8776, src10998;
v16i8 src12111110, src14131312;
v8i16 dst10, dst32, dst54, dst76;
- v8i16 filter_vec;
v8i16 filt0, filt1, filt2, filt3;
- v8u16 const_vec;
+ v8i16 filter_vec, const_vec;
src -= (3 * src_stride);
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- LOAD_7VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6);
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
src += (7 * src_stride);
-
- ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
-
- ILVR_D_3VECS_SB(src2110, src21_r, src10_r, src4332, src43_r, src32_r,
- src6554, src65_r, src54_r);
-
- XORI_B_3VECS_SB(src2110, src4332, src6554, src2110, src4332, src6554, 128);
+ ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
+ src10_r, src32_r, src54_r, src21_r);
+ ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
+ ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
+ src2110, src4332, src6554);
+ XORI_B3_128_SB(src2110, src4332, src6554);
for (loop_cnt = (height >> 3); loop_cnt--;) {
- LOAD_8VECS_SB(src, src_stride,
- src7, src8, src9, src10, src11, src12, src13, src14);
+ LD_SB8(src, src_stride,
+ src7, src8, src9, src10, src11, src12, src13, src14);
src += (8 * src_stride);
- ILVR_B_8VECS_SB(src6, src7, src8, src9, src10, src11, src12, src13,
- src7, src8, src9, src10, src11, src12, src13, src14,
- src76_r, src87_r, src98_r, src109_r,
- src1110_r, src1211_r, src1312_r, src1413_r);
-
- ILVR_D_4VECS_SB(src8776, src87_r, src76_r, src10998, src109_r, src98_r,
- src12111110, src1211_r, src1110_r,
- src14131312, src1413_r, src1312_r);
-
- XORI_B_4VECS_SB(src8776, src10998, src12111110, src14131312,
- src8776, src10998, src12111110, src14131312, 128);
-
- dst10 = HEVC_FILT_8TAP_DPADD_H(src2110, src4332, src6554, src8776,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst32 = HEVC_FILT_8TAP_DPADD_H(src4332, src6554, src8776, src10998,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst54 = HEVC_FILT_8TAP_DPADD_H(src6554, src8776, src10998, src12111110,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst76 = HEVC_FILT_8TAP_DPADD_H(src8776, src10998,
- src12111110, src14131312,
- filt0, filt1, filt2, filt3, const_vec);
-
- out0 = __msa_copy_u_d((v2i64) dst10, 0);
- out1 = __msa_copy_u_d((v2i64) dst10, 1);
- out2 = __msa_copy_u_d((v2i64) dst32, 0);
- out3 = __msa_copy_u_d((v2i64) dst32, 1);
-
- STORE_DWORD(dst, out0);
- dst += dst_stride;
- STORE_DWORD(dst, out1);
- dst += dst_stride;
- STORE_DWORD(dst, out2);
- dst += dst_stride;
- STORE_DWORD(dst, out3);
- dst += dst_stride;
-
- out0 = __msa_copy_u_d((v2i64) dst54, 0);
- out1 = __msa_copy_u_d((v2i64) dst54, 1);
- out2 = __msa_copy_u_d((v2i64) dst76, 0);
- out3 = __msa_copy_u_d((v2i64) dst76, 1);
-
- STORE_DWORD(dst, out0);
- dst += dst_stride;
- STORE_DWORD(dst, out1);
- dst += dst_stride;
- STORE_DWORD(dst, out2);
- dst += dst_stride;
- STORE_DWORD(dst, out3);
- dst += dst_stride;
+ ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
+ src76_r, src87_r, src98_r, src109_r);
+ ILVR_B4_SB(src11, src10, src12, src11, src13, src12, src14, src13,
+ src1110_r, src1211_r, src1312_r, src1413_r);
+ ILVR_D4_SB(src87_r, src76_r, src109_r, src98_r,
+ src1211_r, src1110_r, src1413_r, src1312_r,
+ src8776, src10998, src12111110, src14131312);
+ XORI_B4_128_SB(src8776, src10998, src12111110, src14131312);
+
+ dst10 = const_vec;
+ DPADD_SB4_SH(src2110, src4332, src6554, src8776,
+ filt0, filt1, filt2, filt3, dst10, dst10, dst10, dst10);
+ dst32 = const_vec;
+ DPADD_SB4_SH(src4332, src6554, src8776, src10998,
+ filt0, filt1, filt2, filt3, dst32, dst32, dst32, dst32);
+ dst54 = const_vec;
+ DPADD_SB4_SH(src6554, src8776, src10998, src12111110,
+ filt0, filt1, filt2, filt3, dst54, dst54, dst54, dst54);
+ dst76 = const_vec;
+ DPADD_SB4_SH(src8776, src10998, src12111110, src14131312,
+ filt0, filt1, filt2, filt3, dst76, dst76, dst76, dst76);
+
+ ST8x8_UB(dst10, dst32, dst54, dst76, dst, 2 * dst_stride);
+ dst += (8 * dst_stride);
src2110 = src10998;
src4332 = src12111110;
src6554 = src14131312;
-
src6 = src14;
}
}
-static void hevc_vt_8t_8w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_vt_8t_8w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
int32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
- v8i16 filter_vec;
+ v8i16 filter_vec, const_vec;
v8i16 filt0, filt1, filt2, filt3;
- v8u16 const_vec;
src -= (3 * src_stride);
-
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- LOAD_7VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6);
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
src += (7 * src_stride);
-
- XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
- src0, src1, src2, src3, src4, src5, src6, 128);
-
- ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+ ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
+ src10_r, src32_r, src54_r, src21_r);
+ ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_SB(src, src_stride, src7, src8, src9, src10);
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
src += (4 * src_stride);
-
- XORI_B_4VECS_SB(src7, src8, src9, src10, src7, src8, src9, src10, 128);
-
- ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
- src76_r, src87_r, src98_r, src109_r);
-
- dst0_r = HEVC_FILT_8TAP_DPADD_H(src10_r, src32_r, src54_r, src76_r,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst1_r = HEVC_FILT_8TAP_DPADD_H(src21_r, src43_r, src65_r, src87_r,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst2_r = HEVC_FILT_8TAP_DPADD_H(src32_r, src54_r, src76_r, src98_r,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst3_r = HEVC_FILT_8TAP_DPADD_H(src43_r, src65_r, src87_r, src109_r,
- filt0, filt1, filt2, filt3, const_vec);
-
- STORE_SH(dst0_r, dst);
- dst += dst_stride;
- STORE_SH(dst1_r, dst);
- dst += dst_stride;
- STORE_SH(dst2_r, dst);
- dst += dst_stride;
- STORE_SH(dst3_r, dst);
- dst += dst_stride;
+ XORI_B4_128_SB(src7, src8, src9, src10);
+ ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
+ src76_r, src87_r, src98_r, src109_r);
+
+ dst0_r = const_vec;
+ DPADD_SB4_SH(src10_r, src32_r, src54_r, src76_r,
+ filt0, filt1, filt2, filt3,
+ dst0_r, dst0_r, dst0_r, dst0_r);
+ dst1_r = const_vec;
+ DPADD_SB4_SH(src21_r, src43_r, src65_r, src87_r,
+ filt0, filt1, filt2, filt3,
+ dst1_r, dst1_r, dst1_r, dst1_r);
+ dst2_r = const_vec;
+ DPADD_SB4_SH(src32_r, src54_r, src76_r, src98_r,
+ filt0, filt1, filt2, filt3,
+ dst2_r, dst2_r, dst2_r, dst2_r);
+ dst3_r = const_vec;
+ DPADD_SB4_SH(src43_r, src65_r, src87_r, src109_r,
+ filt0, filt1, filt2, filt3,
+ dst3_r, dst3_r, dst3_r, dst3_r);
+
+ ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
+ dst += (4 * dst_stride);
src10_r = src54_r;
src32_r = src76_r;
src54_r = src98_r;
-
src21_r = src65_r;
src43_r = src87_r;
src65_r = src109_r;
-
src6 = src10;
}
}
-static void hevc_vt_8t_12w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_vt_8t_12w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
int32_t loop_cnt;
- uint64_t out0, out1, out2, out3;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
@@ -1554,111 +976,85 @@ static void hevc_vt_8t_12w_msa(uint8_t * __restrict src, int32_t src_stride,
v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
v16i8 src2110, src4332, src6554, src8776, src10998;
v8i16 dst0_l, dst1_l;
- v8i16 filter_vec;
+ v8i16 filter_vec, const_vec;
v8i16 filt0, filt1, filt2, filt3;
- v8u16 const_vec;
src -= (3 * src_stride);
-
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- LOAD_7VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6);
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
src += (7 * src_stride);
-
- XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
- src0, src1, src2, src3, src4, src5, src6, 128);
-
- ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
-
- ILVL_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_l, src32_l, src54_l, src21_l, src43_l, src65_l);
-
- ILVR_D_3VECS_SB(src2110, src21_l, src10_l, src4332, src43_l, src32_l,
- src6554, src65_l, src54_l);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+ ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
+ src10_r, src32_r, src54_r, src21_r);
+ ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
+ ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
+ src10_l, src32_l, src54_l, src21_l);
+ ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
+ ILVR_D3_SB(src21_l, src10_l, src43_l, src32_l, src65_l, src54_l,
+ src2110, src4332, src6554);
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_SB(src, src_stride, src7, src8, src9, src10);
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
src += (4 * src_stride);
-
- XORI_B_4VECS_SB(src7, src8, src9, src10, src7, src8, src9, src10, 128);
-
- ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
- src76_r, src87_r, src98_r, src109_r);
-
- ILVL_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
- src76_l, src87_l, src98_l, src109_l);
-
- ILVR_D_2VECS_SB(src8776, src87_l, src76_l, src10998, src109_l, src98_l);
-
- dst0_r = HEVC_FILT_8TAP_DPADD_H(src10_r, src32_r, src54_r, src76_r,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst1_r = HEVC_FILT_8TAP_DPADD_H(src21_r, src43_r, src65_r, src87_r,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst2_r = HEVC_FILT_8TAP_DPADD_H(src32_r, src54_r, src76_r, src98_r,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst3_r = HEVC_FILT_8TAP_DPADD_H(src43_r, src65_r, src87_r, src109_r,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst0_l = HEVC_FILT_8TAP_DPADD_H(src2110, src4332, src6554, src8776,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst1_l = HEVC_FILT_8TAP_DPADD_H(src4332, src6554, src8776, src10998,
- filt0, filt1, filt2, filt3, const_vec);
-
- out0 = __msa_copy_u_d((v2i64) dst0_l, 0);
- out1 = __msa_copy_u_d((v2i64) dst0_l, 1);
- out2 = __msa_copy_u_d((v2i64) dst1_l, 0);
- out3 = __msa_copy_u_d((v2i64) dst1_l, 1);
-
- STORE_SH(dst0_r, dst);
- STORE_DWORD(dst + 8, out0);
- dst += dst_stride;
- STORE_SH(dst1_r, dst);
- STORE_DWORD(dst + 8, out1);
- dst += dst_stride;
-
- STORE_SH(dst2_r, dst);
- STORE_DWORD(dst + 8, out2);
- dst += dst_stride;
- STORE_SH(dst3_r, dst);
- STORE_DWORD(dst + 8, out3);
- dst += dst_stride;
+ XORI_B4_128_SB(src7, src8, src9, src10);
+ ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
+ src76_r, src87_r, src98_r, src109_r);
+ ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
+ src76_l, src87_l, src98_l, src109_l);
+ ILVR_D2_SB(src87_l, src76_l, src109_l, src98_l, src8776, src10998);
+
+ dst0_r = const_vec;
+ DPADD_SB4_SH(src10_r, src32_r, src54_r, src76_r,
+ filt0, filt1, filt2, filt3,
+ dst0_r, dst0_r, dst0_r, dst0_r);
+ dst1_r = const_vec;
+ DPADD_SB4_SH(src21_r, src43_r, src65_r, src87_r,
+ filt0, filt1, filt2, filt3,
+ dst1_r, dst1_r, dst1_r, dst1_r);
+ dst2_r = const_vec;
+ DPADD_SB4_SH(src32_r, src54_r, src76_r, src98_r,
+ filt0, filt1, filt2, filt3,
+ dst2_r, dst2_r, dst2_r, dst2_r);
+ dst3_r = const_vec;
+ DPADD_SB4_SH(src43_r, src65_r, src87_r, src109_r,
+ filt0, filt1, filt2, filt3,
+ dst3_r, dst3_r, dst3_r, dst3_r);
+ dst0_l = const_vec;
+ DPADD_SB4_SH(src2110, src4332, src6554, src8776,
+ filt0, filt1, filt2, filt3,
+ dst0_l, dst0_l, dst0_l, dst0_l);
+ dst1_l = const_vec;
+ DPADD_SB4_SH(src4332, src6554, src8776, src10998,
+ filt0, filt1, filt2, filt3,
+ dst1_l, dst1_l, dst1_l, dst1_l);
+
+ ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
+ ST8x4_UB(dst0_l, dst1_l, dst + 8, 2 * dst_stride);
+ dst += (4 * dst_stride);
src10_r = src54_r;
src32_r = src76_r;
src54_r = src98_r;
-
src21_r = src65_r;
src43_r = src87_r;
src65_r = src109_r;
-
src2110 = src6554;
src4332 = src8776;
src6554 = src10998;
-
src6 = src10;
}
}
-static void hevc_vt_8t_16multx4mult_msa(uint8_t * __restrict src,
+static void hevc_vt_8t_16multx4mult_msa(uint8_t *src,
int32_t src_stride,
- int16_t * __restrict dst,
+ int16_t *dst,
int32_t dst_stride,
- const int8_t * __restrict filter,
+ const int8_t *filter,
int32_t height,
int32_t width)
{
@@ -1672,115 +1068,88 @@ static void hevc_vt_8t_16multx4mult_msa(uint8_t * __restrict src,
v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
v8i16 dst0_l, dst1_l, dst2_l, dst3_l;
- v8i16 filter_vec;
+ v8i16 filter_vec, const_vec;
v8i16 filt0, filt1, filt2, filt3;
- v8u16 const_vec;
src -= (3 * src_stride);
-
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
- filter_vec = LOAD_SH(filter);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
for (cnt = width >> 4; cnt--;) {
src_tmp = src;
dst_tmp = dst;
- LOAD_7VECS_SB(src_tmp, src_stride,
- src0, src1, src2, src3, src4, src5, src6);
+ LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
src_tmp += (7 * src_stride);
-
- XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
- src0, src1, src2, src3, src4, src5, src6, 128);
-
- ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
-
- ILVL_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_l, src32_l, src54_l, src21_l, src43_l, src65_l);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+ ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
+ src10_r, src32_r, src54_r, src21_r);
+ ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
+ ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
+ src10_l, src32_l, src54_l, src21_l);
+ ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_SB(src_tmp, src_stride, src7, src8, src9, src10);
+ LD_SB4(src_tmp, src_stride, src7, src8, src9, src10);
src_tmp += (4 * src_stride);
-
- XORI_B_4VECS_SB(src7, src8, src9, src10,
- src7, src8, src9, src10, 128);
-
- ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
- src76_r, src87_r, src98_r, src109_r);
-
- ILVL_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
- src76_l, src87_l, src98_l, src109_l);
-
- dst0_r = HEVC_FILT_8TAP_DPADD_H(src10_r, src32_r, src54_r, src76_r,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- dst1_r = HEVC_FILT_8TAP_DPADD_H(src21_r, src43_r, src65_r, src87_r,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- dst2_r = HEVC_FILT_8TAP_DPADD_H(src32_r, src54_r, src76_r, src98_r,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- dst3_r = HEVC_FILT_8TAP_DPADD_H(src43_r, src65_r, src87_r, src109_r,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- dst0_l = HEVC_FILT_8TAP_DPADD_H(src10_l, src32_l, src54_l, src76_l,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- dst1_l = HEVC_FILT_8TAP_DPADD_H(src21_l, src43_l, src65_l, src87_l,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- dst2_l = HEVC_FILT_8TAP_DPADD_H(src32_l, src54_l, src76_l, src98_l,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- dst3_l = HEVC_FILT_8TAP_DPADD_H(src43_l, src65_l, src87_l, src109_l,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- STORE_SH(dst0_r, dst_tmp);
- STORE_SH(dst0_l, dst_tmp + 8);
- dst_tmp += dst_stride;
- STORE_SH(dst1_r, dst_tmp);
- STORE_SH(dst1_l, dst_tmp + 8);
- dst_tmp += dst_stride;
-
- STORE_SH(dst2_r, dst_tmp);
- STORE_SH(dst2_l, dst_tmp + 8);
- dst_tmp += dst_stride;
- STORE_SH(dst3_r, dst_tmp);
- STORE_SH(dst3_l, dst_tmp + 8);
- dst_tmp += dst_stride;
+ XORI_B4_128_SB(src7, src8, src9, src10);
+ ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
+ src76_r, src87_r, src98_r, src109_r);
+ ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
+ src76_l, src87_l, src98_l, src109_l);
+
+ dst0_r = const_vec;
+ DPADD_SB4_SH(src10_r, src32_r, src54_r, src76_r,
+ filt0, filt1, filt2, filt3,
+ dst0_r, dst0_r, dst0_r, dst0_r);
+ dst1_r = const_vec;
+ DPADD_SB4_SH(src21_r, src43_r, src65_r, src87_r,
+ filt0, filt1, filt2, filt3,
+ dst1_r, dst1_r, dst1_r, dst1_r);
+ dst2_r = const_vec;
+ DPADD_SB4_SH(src32_r, src54_r, src76_r, src98_r,
+ filt0, filt1, filt2, filt3,
+ dst2_r, dst2_r, dst2_r, dst2_r);
+ dst3_r = const_vec;
+ DPADD_SB4_SH(src43_r, src65_r, src87_r, src109_r,
+ filt0, filt1, filt2, filt3,
+ dst3_r, dst3_r, dst3_r, dst3_r);
+ dst0_l = const_vec;
+ DPADD_SB4_SH(src10_l, src32_l, src54_l, src76_l,
+ filt0, filt1, filt2, filt3,
+ dst0_l, dst0_l, dst0_l, dst0_l);
+ dst1_l = const_vec;
+ DPADD_SB4_SH(src21_l, src43_l, src65_l, src87_l,
+ filt0, filt1, filt2, filt3,
+ dst1_l, dst1_l, dst1_l, dst1_l);
+ dst2_l = const_vec;
+ DPADD_SB4_SH(src32_l, src54_l, src76_l, src98_l,
+ filt0, filt1, filt2, filt3,
+ dst2_l, dst2_l, dst2_l, dst2_l);
+ dst3_l = const_vec;
+ DPADD_SB4_SH(src43_l, src65_l, src87_l, src109_l,
+ filt0, filt1, filt2, filt3,
+ dst3_l, dst3_l, dst3_l, dst3_l);
+
+ ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst_tmp, dst_stride);
+ ST_SH4(dst0_l, dst1_l, dst2_l, dst3_l, dst_tmp + 8, dst_stride);
+ dst_tmp += (4 * dst_stride);
src10_r = src54_r;
src32_r = src76_r;
src54_r = src98_r;
-
src21_r = src65_r;
src43_r = src87_r;
src65_r = src109_r;
-
src10_l = src54_l;
src32_l = src76_l;
src54_l = src98_l;
-
src21_l = src65_l;
src43_l = src87_l;
src65_l = src109_l;
-
src6 = src10;
}
@@ -1789,214 +1158,166 @@ static void hevc_vt_8t_16multx4mult_msa(uint8_t * __restrict src,
}
}
-static void hevc_vt_8t_16w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_vt_8t_16w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
filter, height, 16);
}
-static void hevc_vt_8t_24w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_vt_8t_24w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
filter, height, 16);
-
hevc_vt_8t_8w_msa(src + 16, src_stride, dst + 16, dst_stride,
filter, height);
}
-static void hevc_vt_8t_32w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_vt_8t_32w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
filter, height, 32);
}
-static void hevc_vt_8t_48w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_vt_8t_48w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
filter, height, 48);
}
-static void hevc_vt_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter, int32_t height)
+static void hevc_vt_8t_64w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
{
hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
filter, height, 64);
}
-static void hevc_hv_8t_4w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter_x,
- const int8_t * __restrict filter_y,
+static void hevc_hv_8t_4w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter_x, const int8_t *filter_y,
int32_t height)
{
uint32_t loop_cnt;
- uint64_t out0, out1;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
- v8i16 filt0, filt1, filt2, filt3, filter_vec;
+ v8i16 filt0, filt1, filt2, filt3;
v4i32 filt_h0, filt_h1, filt_h2, filt_h3;
v16i8 mask1, mask2, mask3;
- v8u16 const_vec;
+ v8i16 filter_vec, const_vec;
v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
v8i16 dst30, dst41, dst52, dst63, dst66, dst87;
v4i32 dst0_r, dst1_r;
v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
- v16i8 tmp;
v16i8 mask0 = {
0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20
};
v8u16 mask4 = { 0, 4, 1, 5, 2, 6, 3, 7 };
src -= ((3 * src_stride) + 3);
+ filter_vec = LD_SH(filter_x);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- filter_vec = LOAD_SH(filter_x);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
-
- filter_vec = LOAD_SH(filter_y);
- tmp = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(tmp, (v16i8) filter_vec);
+ filter_vec = LD_SH(filter_y);
+ vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
+ filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
- filt_h0 = __msa_splati_w((v4i32) filter_vec, 0);
- filt_h1 = __msa_splati_w((v4i32) filter_vec, 1);
- filt_h2 = __msa_splati_w((v4i32) filter_vec, 2);
- filt_h3 = __msa_splati_w((v4i32) filter_vec, 3);
+ SPLATI_W4_SW(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
- LOAD_7VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6);
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
src += (7 * src_stride);
-
- XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
- src0, src1, src2, src3, src4, src5, src6, 128);
-
- /* Row 0 Row 1 Row 2 Row 3 */
- vec0 = __msa_vshf_b(mask0, src3, src0);
- vec1 = __msa_vshf_b(mask1, src3, src0);
- vec2 = __msa_vshf_b(mask2, src3, src0);
- vec3 = __msa_vshf_b(mask3, src3, src0);
-
- vec4 = __msa_vshf_b(mask0, src4, src1);
- vec5 = __msa_vshf_b(mask1, src4, src1);
- vec6 = __msa_vshf_b(mask2, src4, src1);
- vec7 = __msa_vshf_b(mask3, src4, src1);
-
- vec8 = __msa_vshf_b(mask0, src5, src2);
- vec9 = __msa_vshf_b(mask1, src5, src2);
- vec10 = __msa_vshf_b(mask2, src5, src2);
- vec11 = __msa_vshf_b(mask3, src5, src2);
-
- vec12 = __msa_vshf_b(mask0, src6, src3);
- vec13 = __msa_vshf_b(mask1, src6, src3);
- vec14 = __msa_vshf_b(mask2, src6, src3);
- vec15 = __msa_vshf_b(mask3, src6, src3);
-
- dst30 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst41 = HEVC_FILT_8TAP_DPADD_H(vec4, vec5, vec6, vec7,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst52 = HEVC_FILT_8TAP_DPADD_H(vec8, vec9, vec10, vec11,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst63 = HEVC_FILT_8TAP_DPADD_H(vec12, vec13, vec14, vec15,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst10_r = __msa_ilvr_h(dst41, dst30);
- dst21_r = __msa_ilvr_h(dst52, dst41);
- dst32_r = __msa_ilvr_h(dst63, dst52);
-
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+
+ VSHF_B4_SB(src0, src3, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
+ VSHF_B4_SB(src1, src4, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
+ VSHF_B4_SB(src2, src5, mask0, mask1, mask2, mask3,
+ vec8, vec9, vec10, vec11);
+ VSHF_B4_SB(src3, src6, mask0, mask1, mask2, mask3,
+ vec12, vec13, vec14, vec15);
+ dst30 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst30, dst30, dst30, dst30);
+ dst41 = const_vec;
+ DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
+ dst41, dst41, dst41, dst41);
+ dst52 = const_vec;
+ DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
+ dst52, dst52, dst52, dst52);
+ dst63 = const_vec;
+ DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
+ dst63, dst63, dst63, dst63);
+
+ ILVR_H3_SH(dst41, dst30, dst52, dst41, dst63, dst52,
+ dst10_r, dst21_r, dst32_r);
dst43_r = __msa_ilvl_h(dst41, dst30);
dst54_r = __msa_ilvl_h(dst52, dst41);
dst65_r = __msa_ilvl_h(dst63, dst52);
-
dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
for (loop_cnt = height >> 1; loop_cnt--;) {
- LOAD_2VECS_SB(src, src_stride, src7, src8);
+ LD_SB2(src, src_stride, src7, src8);
src += (2 * src_stride);
+ XORI_B2_128_SB(src7, src8);
- src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
- src8 = (v16i8) __msa_xori_b((v16u8) src8, 128);
-
- vec0 = __msa_vshf_b(mask0, src8, src7);
- vec1 = __msa_vshf_b(mask1, src8, src7);
- vec2 = __msa_vshf_b(mask2, src8, src7);
- vec3 = __msa_vshf_b(mask3, src8, src7);
-
- dst87 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
+ VSHF_B4_SB(src7, src8, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst87 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst87, dst87, dst87, dst87);
dst76_r = __msa_ilvr_h(dst87, dst66);
-
- dst0_r = HEVC_FILT_8TAP_DPADD_W(dst10_r, dst32_r, dst54_r, dst76_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
-
+ dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
+ filt_h0, filt_h1, filt_h2, filt_h3);
dst87_r = __msa_vshf_h((v8i16) mask4, dst87, dst87);
-
- dst1_r = HEVC_FILT_8TAP_DPADD_W(dst21_r, dst43_r, dst65_r, dst87_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
-
+ dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r,
+ filt_h0, filt_h1, filt_h2, filt_h3);
dst0_r >>= 6;
dst1_r >>= 6;
dst0_r = (v4i32) __msa_pckev_h((v8i16) dst1_r, (v8i16) dst0_r);
-
- out0 = __msa_copy_u_d((v2i64) dst0_r, 0);
- out1 = __msa_copy_u_d((v2i64) dst0_r, 1);
-
- STORE_DWORD(dst, out0);
- dst += dst_stride;
- STORE_DWORD(dst, out1);
- dst += dst_stride;
+ ST8x2_UB(dst0_r, dst, (2 * dst_stride));
+ dst += (2 * dst_stride);
dst10_r = dst32_r;
dst32_r = dst54_r;
dst54_r = dst76_r;
-
dst21_r = dst43_r;
dst43_r = dst65_r;
dst65_r = dst87_r;
-
dst66 = (v8i16) __msa_splati_d((v2i64) dst87, 1);
}
}
-static void hevc_hv_8t_8multx2mult_msa(uint8_t * __restrict src,
+static void hevc_hv_8t_8multx2mult_msa(uint8_t *src,
int32_t src_stride,
- int16_t * __restrict dst,
+ int16_t *dst,
int32_t dst_stride,
- const int8_t * __restrict filter_x,
- const int8_t * __restrict filter_y,
+ const int8_t *filter_x,
+ const int8_t *filter_y,
int32_t height, int32_t width)
{
uint32_t loop_cnt, cnt;
uint8_t *src_tmp;
int16_t *dst_tmp;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
- v8i16 filt0, filt1, filt2, filt3, filter_vec;
+ v8i16 filt0, filt1, filt2, filt3;
v4i32 filt_h0, filt_h1, filt_h2, filt_h3;
v16i8 mask1, mask2, mask3;
- v8u16 const_vec;
+ v8i16 filter_vec, const_vec;
v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
@@ -2005,188 +1326,130 @@ static void hevc_hv_8t_8multx2mult_msa(uint8_t * __restrict src,
v8i16 dst10_l, dst32_l, dst54_l, dst76_l;
v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
v8i16 dst21_l, dst43_l, dst65_l, dst87_l;
- v16i8 tmp;
v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
src -= ((3 * src_stride) + 3);
+ filter_vec = LD_SH(filter_x);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- filter_vec = LOAD_SH(filter_x);
- filt0 = __msa_splati_h(filter_vec, 0);
- filt1 = __msa_splati_h(filter_vec, 1);
- filt2 = __msa_splati_h(filter_vec, 2);
- filt3 = __msa_splati_h(filter_vec, 3);
-
- filter_vec = LOAD_SH(filter_y);
- tmp = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(tmp, (v16i8) filter_vec);
+ filter_vec = LD_SH(filter_y);
+ vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
+ filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
- filt_h0 = __msa_splati_w((v4i32) filter_vec, 0);
- filt_h1 = __msa_splati_w((v4i32) filter_vec, 1);
- filt_h2 = __msa_splati_w((v4i32) filter_vec, 2);
- filt_h3 = __msa_splati_w((v4i32) filter_vec, 3);
+ SPLATI_W4_SW(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
- const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec = __msa_ldi_h(128);
const_vec <<= 6;
for (cnt = width >> 3; cnt--;) {
src_tmp = src;
dst_tmp = dst;
- LOAD_7VECS_SB(src_tmp, src_stride,
- src0, src1, src2, src3, src4, src5, src6);
+ LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
src_tmp += (7 * src_stride);
-
- XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
- src0, src1, src2, src3, src4, src5, src6, 128);
-
- /* Row 0 Row 1 Row 2 Row 3 */
- vec0 = __msa_vshf_b(mask0, src0, src0);
- vec1 = __msa_vshf_b(mask1, src0, src0);
- vec2 = __msa_vshf_b(mask2, src0, src0);
- vec3 = __msa_vshf_b(mask3, src0, src0);
-
- vec4 = __msa_vshf_b(mask0, src1, src1);
- vec5 = __msa_vshf_b(mask1, src1, src1);
- vec6 = __msa_vshf_b(mask2, src1, src1);
- vec7 = __msa_vshf_b(mask3, src1, src1);
-
- vec8 = __msa_vshf_b(mask0, src2, src2);
- vec9 = __msa_vshf_b(mask1, src2, src2);
- vec10 = __msa_vshf_b(mask2, src2, src2);
- vec11 = __msa_vshf_b(mask3, src2, src2);
-
- vec12 = __msa_vshf_b(mask0, src3, src3);
- vec13 = __msa_vshf_b(mask1, src3, src3);
- vec14 = __msa_vshf_b(mask2, src3, src3);
- vec15 = __msa_vshf_b(mask3, src3, src3);
-
- dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst1 = HEVC_FILT_8TAP_DPADD_H(vec4, vec5, vec6, vec7,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst2 = HEVC_FILT_8TAP_DPADD_H(vec8, vec9, vec10, vec11,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst3 = HEVC_FILT_8TAP_DPADD_H(vec12, vec13, vec14, vec15,
- filt0, filt1, filt2, filt3, const_vec);
-
- /* Row 4 Row 5 Row 6 */
- vec0 = __msa_vshf_b(mask0, src4, src4);
- vec1 = __msa_vshf_b(mask1, src4, src4);
- vec2 = __msa_vshf_b(mask2, src4, src4);
- vec3 = __msa_vshf_b(mask3, src4, src4);
-
- vec4 = __msa_vshf_b(mask0, src5, src5);
- vec5 = __msa_vshf_b(mask1, src5, src5);
- vec6 = __msa_vshf_b(mask2, src5, src5);
- vec7 = __msa_vshf_b(mask3, src5, src5);
-
- vec8 = __msa_vshf_b(mask0, src6, src6);
- vec9 = __msa_vshf_b(mask1, src6, src6);
- vec10 = __msa_vshf_b(mask2, src6, src6);
- vec11 = __msa_vshf_b(mask3, src6, src6);
-
- dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst5 = HEVC_FILT_8TAP_DPADD_H(vec4, vec5, vec6, vec7,
- filt0, filt1, filt2, filt3, const_vec);
-
- dst6 = HEVC_FILT_8TAP_DPADD_H(vec8, vec9, vec10, vec11,
- filt0, filt1, filt2, filt3, const_vec);
-
- ILVR_H_6VECS_SH(dst0, dst2, dst4, dst1, dst3, dst5,
- dst1, dst3, dst5, dst2, dst4, dst6,
- dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r);
-
- ILVL_H_6VECS_SH(dst0, dst2, dst4, dst1, dst3, dst5,
- dst1, dst3, dst5, dst2, dst4, dst6,
- dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+
+ /* row 0 row 1 row 2 row 3 */
+ VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
+ vec4, vec5, vec6, vec7);
+ VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
+ vec8, vec9, vec10, vec11);
+ VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
+ vec12, vec13, vec14, vec15);
+ dst0 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst0, dst0, dst0, dst0);
+ dst1 = const_vec;
+ DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
+ dst1, dst1, dst1, dst1);
+ dst2 = const_vec;
+ DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
+ dst2, dst2, dst2, dst2);
+ dst3 = const_vec;
+ DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
+ dst3, dst3, dst3, dst3);
+
+ /* row 4 row 5 row 6 */
+ VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3,
+ vec4, vec5, vec6, vec7);
+ VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3,
+ vec8, vec9, vec10, vec11);
+ dst4 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst4, dst4, dst4, dst4);
+ dst5 = const_vec;
+ DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
+ dst5, dst5, dst5, dst5);
+ dst6 = const_vec;
+ DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
+ dst6, dst6, dst6, dst6);
+
+ ILVR_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst2, dst1,
+ dst10_r, dst32_r, dst54_r, dst21_r);
+ ILVR_H2_SH(dst4, dst3, dst6, dst5, dst43_r, dst65_r);
+ ILVL_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst2, dst1,
+ dst10_l, dst32_l, dst54_l, dst21_l);
+ ILVL_H2_SH(dst4, dst3, dst6, dst5, dst43_l, dst65_l);
for (loop_cnt = height >> 1; loop_cnt--;) {
- src7 = LOAD_SB(src_tmp);
- src_tmp += src_stride;
-
- src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
-
- vec0 = __msa_vshf_b(mask0, src7, src7);
- vec1 = __msa_vshf_b(mask1, src7, src7);
- vec2 = __msa_vshf_b(mask2, src7, src7);
- vec3 = __msa_vshf_b(mask3, src7, src7);
-
- dst7 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- dst76_r = __msa_ilvr_h(dst7, dst6);
- dst76_l = __msa_ilvl_h(dst7, dst6);
-
- dst0_r = HEVC_FILT_8TAP_DPADD_W(dst10_r, dst32_r, dst54_r, dst76_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
-
- dst0_l = HEVC_FILT_8TAP_DPADD_W(dst10_l, dst32_l, dst54_l, dst76_l,
- filt_h0, filt_h1, filt_h2, filt_h3);
-
+ LD_SB2(src_tmp, src_stride, src7, src8);
+ XORI_B2_128_SB(src7, src8);
+ src_tmp += 2 * src_stride;
+
+ VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst7 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst7, dst7, dst7, dst7);
+
+ ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
+ dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+ dst0_l = HEVC_FILT_8TAP(dst10_l, dst32_l, dst54_l, dst76_l,
+ filt_h0, filt_h1, filt_h2, filt_h3);
dst0_r >>= 6;
dst0_l >>= 6;
dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
-
- STORE_SW(dst0_r, dst_tmp);
+ ST_SW(dst0_r, dst_tmp);
dst_tmp += dst_stride;
- /* Next row */
- src8 = LOAD_SB(src_tmp);
- src_tmp += src_stride;
-
- src8 = (v16i8) __msa_xori_b((v16u8) src8, 128);
-
- vec0 = __msa_vshf_b(mask0, src8, src8);
- vec1 = __msa_vshf_b(mask1, src8, src8);
- vec2 = __msa_vshf_b(mask2, src8, src8);
- vec3 = __msa_vshf_b(mask3, src8, src8);
-
- dst8 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
- filt0, filt1, filt2, filt3,
- const_vec);
-
- dst87_r = __msa_ilvr_h(dst8, dst7);
- dst87_l = __msa_ilvl_h(dst8, dst7);
+ VSHF_B4_SB(src8, src8, mask0, mask1, mask2, mask3,
+ vec0, vec1, vec2, vec3);
+ dst8 = const_vec;
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
+ dst8, dst8, dst8, dst8);
+ ILVRL_H2_SH(dst8, dst7, dst87_r, dst87_l);
dst6 = dst8;
-
- dst0_r = HEVC_FILT_8TAP_DPADD_W(dst21_r, dst43_r, dst65_r, dst87_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
-
- dst0_l = HEVC_FILT_8TAP_DPADD_W(dst21_l, dst43_l, dst65_l, dst87_l,
- filt_h0, filt_h1, filt_h2, filt_h3);
-
+ dst0_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+ dst0_l = HEVC_FILT_8TAP(dst21_l, dst43_l, dst65_l, dst87_l,
+ filt_h0, filt_h1, filt_h2, filt_h3);
dst0_r >>= 6;
dst0_l >>= 6;
dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
-
- STORE_SW(dst0_r, dst_tmp);
+ ST_SW(dst0_r, dst_tmp);
dst_tmp += dst_stride;
dst10_r = dst32_r;
dst32_r = dst54_r;
dst54_r = dst76_r;
-
dst10_l = dst32_l;
dst32_l = dst54_l;
dst54_l = dst76_l;
-
dst21_r = dst43_r;
dst43_r = dst65_r;
dst65_r = dst87_r;
-
dst21_l = dst43_l;
dst43_l = dst65_l;
dst65_l = dst87_l;
@@ -2197,20 +1460,18 @@ static void hevc_hv_8t_8multx2mult_msa(uint8_t * __restrict src,
}
}
-static void hevc_hv_8t_8w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter_x,
- const int8_t * __restrict filter_y,
+static void hevc_hv_8t_8w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter_x, const int8_t *filter_y,
int32_t height)
{
hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
filter_x, filter_y, height, 8);
}
-static void hevc_hv_8t_12w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter_x,
- const int8_t * __restrict filter_y,
+static void hevc_hv_8t_12w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter_x, const int8_t *filter_y,
int32_t height)
{
hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
@@ -2220,50 +1481,45 @@ static void hevc_hv_8t_12w_msa(uint8_t * __restrict src, int32_t src_stride,
filter_x, filter_y, height);
}
-static void hevc_hv_8t_16w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter_x,
- const int8_t * __restrict filter_y,
+static void hevc_hv_8t_16w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter_x, const int8_t *filter_y,
int32_t height)
{
hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
filter_x, filter_y, height, 16);
}
-static void hevc_hv_8t_24w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter_x,
- const int8_t * __restrict filter_y,
+static void hevc_hv_8t_24w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter_x, const int8_t *filter_y,
int32_t height)
{
hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
filter_x, filter_y, height, 24);
}
-static void hevc_hv_8t_32w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter_x,
- const int8_t * __restrict filter_y,
+static void hevc_hv_8t_32w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter_x, const int8_t *filter_y,
int32_t height)
{
hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
filter_x, filter_y, height, 32);
}
-static void hevc_hv_8t_48w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter_x,
- const int8_t * __restrict filter_y,
+static void hevc_hv_8t_48w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter_x, const int8_t *filter_y,
int32_t height)
{
hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
filter_x, filter_y, height, 48);
}
-static void hevc_hv_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
- int16_t * __restrict dst, int32_t dst_stride,
- const int8_t * __restrict filter_x,
- const int8_t * __restrict filter_y,
+static void hevc_hv_8t_64w_msa(uint8_t *src, int32_t src_stride,
+ int16_t *dst, int32_t dst_stride,
+ const int8_t *filter_x, const int8_t *filter_y,
int32_t height)
{
hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
--
2.3.2
More information about the ffmpeg-devel
mailing list