[FFmpeg-devel] [PATCH] avcodec/mips: MSA (MIPS-SIMD-Arch) optimizations for VP8 functions
shivraj.patil at imgtec.com
shivraj.patil at imgtec.com
Mon Jul 27 12:01:13 CEST 2015
From: Shivraj Patil <shivraj.patil at imgtec.com>
Signed-off-by: Shivraj Patil <shivraj.patil at imgtec.com>
---
libavcodec/mips/Makefile | 2 +
libavcodec/mips/vp8dsp_init_mips.c | 113 ++
libavcodec/mips/vp8dsp_mips.h | 172 ++
libavcodec/mips/vp8dsp_msa.c | 3408 ++++++++++++++++++++++++++++++++++++
libavcodec/vp8dsp.c | 2 +
libavcodec/vp8dsp.h | 1 +
6 files changed, 3698 insertions(+)
create mode 100644 libavcodec/mips/vp8dsp_init_mips.c
create mode 100644 libavcodec/mips/vp8dsp_mips.h
create mode 100644 libavcodec/mips/vp8dsp_msa.c
diff --git a/libavcodec/mips/Makefile b/libavcodec/mips/Makefile
index f543448..5ca8879 100644
--- a/libavcodec/mips/Makefile
+++ b/libavcodec/mips/Makefile
@@ -21,6 +21,7 @@ MIPSFPU-OBJS-$(CONFIG_AAC_ENCODER) += mips/iirfilter_mips.o
OBJS-$(CONFIG_HEVC_DECODER) += mips/hevcdsp_init_mips.o \
mips/hevcpred_init_mips.o
OBJS-$(CONFIG_VP9_DECODER) += mips/vp9dsp_init_mips.o
+OBJS-$(CONFIG_VP8_DECODER) += mips/vp8dsp_init_mips.o
OBJS-$(CONFIG_H264DSP) += mips/h264dsp_init_mips.o
OBJS-$(CONFIG_H264QPEL) += mips/h264qpel_init_mips.o
OBJS-$(CONFIG_H264CHROMA) += mips/h264chroma_init_mips.o
@@ -47,6 +48,7 @@ MSA-OBJS-$(CONFIG_VP9_DECODER) += mips/vp9_mc_msa.o \
mips/vp9_lpf_msa.o \
mips/vp9_idct_msa.o \
mips/vp9_intra_msa.o
+MSA-OBJS-$(CONFIG_VP8_DECODER) += mips/vp8dsp_msa.o
MSA-OBJS-$(CONFIG_H264DSP) += mips/h264dsp_msa.o \
mips/h264idct_msa.o
MSA-OBJS-$(CONFIG_H264QPEL) += mips/h264qpel_msa.o
diff --git a/libavcodec/mips/vp8dsp_init_mips.c b/libavcodec/mips/vp8dsp_init_mips.c
new file mode 100644
index 0000000..58d1b6c
--- /dev/null
+++ b/libavcodec/mips/vp8dsp_init_mips.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale at imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * VP8 compatible video decoder
+ */
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavcodec/vp8dsp.h"
+#include "vp8dsp_mips.h"
+
+#define VP8_MC_MIPS_FUNC(IDX, SIZE) \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][1] = \
+ ff_put_vp8_epel##SIZE##_h4_msa; \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][2] = \
+ ff_put_vp8_epel##SIZE##_h6_msa; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][0] = \
+ ff_put_vp8_epel##SIZE##_v4_msa; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][1] = \
+ ff_put_vp8_epel##SIZE##_h4v4_msa; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][2] = \
+ ff_put_vp8_epel##SIZE##_h6v4_msa; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][0] = \
+ ff_put_vp8_epel##SIZE##_v6_msa; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][1] = \
+ ff_put_vp8_epel##SIZE##_h4v6_msa; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][2] = \
+ ff_put_vp8_epel##SIZE##_h6v6_msa
+
+#define VP8_BILINEAR_MC_MIPS_FUNC(IDX, SIZE) \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = \
+ ff_put_vp8_bilinear##SIZE##_h_msa; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = \
+ ff_put_vp8_bilinear##SIZE##_h_msa; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = \
+ ff_put_vp8_bilinear##SIZE##_v_msa; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = \
+ ff_put_vp8_bilinear##SIZE##_hv_msa; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = \
+ ff_put_vp8_bilinear##SIZE##_hv_msa; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = \
+ ff_put_vp8_bilinear##SIZE##_v_msa; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = \
+ ff_put_vp8_bilinear##SIZE##_hv_msa; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = \
+ ff_put_vp8_bilinear##SIZE##_hv_msa
+
+#define VP8_MC_MIPS_COPY(IDX, SIZE) \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][0] = \
+ ff_put_vp8_pixels##SIZE##_msa; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = \
+ ff_put_vp8_pixels##SIZE##_msa;
+
+#if HAVE_MSA
+static av_cold void vp8dsp_init_msa(VP8DSPContext *dsp)
+{
+ dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_msa;
+ dsp->vp8_idct_add = ff_vp8_idct_add_msa;
+ dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_msa;
+ dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_msa;
+ dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_msa;
+
+ VP8_MC_MIPS_FUNC(0, 16);
+ VP8_MC_MIPS_FUNC(1, 8);
+ VP8_MC_MIPS_FUNC(2, 4);
+
+ VP8_BILINEAR_MC_MIPS_FUNC(0, 16);
+ VP8_BILINEAR_MC_MIPS_FUNC(1, 8);
+ VP8_BILINEAR_MC_MIPS_FUNC(2, 4);
+
+ VP8_MC_MIPS_COPY(0, 16);
+ VP8_MC_MIPS_COPY(1, 8);
+
+ dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_msa;
+ dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_msa;
+ dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_msa;
+ dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_msa;
+
+ dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_msa;
+ dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_msa;
+ dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_msa;
+ dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_msa;
+
+ dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_msa;
+ dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_msa;
+}
+#endif // #if HAVE_MSA
+
+av_cold void ff_vp8dsp_init_mips(VP8DSPContext *dsp)
+{
+#if HAVE_MSA
+ vp8dsp_init_msa(dsp);
+#endif // #if HAVE_MSA
+}
diff --git a/libavcodec/mips/vp8dsp_mips.h b/libavcodec/mips/vp8dsp_mips.h
new file mode 100644
index 0000000..8e715b5
--- /dev/null
+++ b/libavcodec/mips/vp8dsp_mips.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale at imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MIPS_VP8DSP_MIPS_H
+#define AVCODEC_MIPS_VP8DSP_MIPS_H
+
+void ff_put_vp8_pixels4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int x, int y);
+void ff_put_vp8_pixels8_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int x, int y);
+void ff_put_vp8_pixels16_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int x, int y);
+
+void ff_put_vp8_epel16_h4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_v4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_v6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h4v4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h6v4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h4v6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h6v6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+
+void ff_put_vp8_epel8_h4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_v4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_v6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h4v4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h6v4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h4v6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h6v6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+
+void ff_put_vp8_epel4_h4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel4_h6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel4_v4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel4_v6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel4_h4v4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel4_h6v4_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel4_h4v6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_epel4_h6v6_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+
+void ff_put_vp8_bilinear16_h_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_bilinear16_v_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_bilinear16_hv_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+
+void ff_put_vp8_bilinear8_h_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_bilinear8_v_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_bilinear8_hv_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+
+void ff_put_vp8_bilinear4_h_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_bilinear4_v_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+void ff_put_vp8_bilinear4_hv_msa(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
+ int h, int mx, int my);
+
+/* loop filter */
+void ff_vp8_h_loop_filter16_inner_msa(uint8_t *dst, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h);
+void ff_vp8_v_loop_filter16_inner_msa(uint8_t *dst, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h);
+void ff_vp8_h_loop_filter8uv_inner_msa(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_v_loop_filter8uv_inner_msa(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_h_loop_filter16_msa(uint8_t *dst, ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_v_loop_filter16_msa(uint8_t *dst, ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_h_loop_filter8uv_msa(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_v_loop_filter8uv_msa(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_h_loop_filter_simple_msa(uint8_t *dst, ptrdiff_t stride, int flim);
+void ff_vp8_v_loop_filter_simple_msa(uint8_t *dst, ptrdiff_t stride, int flim);
+
+/* Idct functions */
+void ff_vp8_luma_dc_wht_msa(int16_t block[4][4][16], int16_t dc[16]);
+void ff_vp8_idct_add_msa(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
+void ff_vp8_idct_dc_add_msa(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
+void ff_vp8_idct_dc_add4uv_msa(uint8_t *dst, int16_t block[4][16],
+ ptrdiff_t stride);
+void ff_vp8_idct_dc_add4y_msa(uint8_t *dst, int16_t block[4][16],
+ ptrdiff_t stride);
+
+#endif // #ifndef AVCODEC_MIPS_VP8DSP_MIPS_H
diff --git a/libavcodec/mips/vp8dsp_msa.c b/libavcodec/mips/vp8dsp_msa.c
new file mode 100644
index 0000000..5936da9
--- /dev/null
+++ b/libavcodec/mips/vp8dsp_msa.c
@@ -0,0 +1,3408 @@
+/*
+ * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale at imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+#include "libavcodec/vp8dsp.h"
+#include "libavutil/mips/generic_macros_msa.h"
+#include "vp8dsp_mips.h"
+
+static const uint8_t mc_filt_mask_arr[16 * 3] = {
+ /* 8 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ /* 4 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
+ /* 4 width cases */
+ 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
+};
+
+static const int8_t subpel_filters_msa[7][8] = {
+ {-6, 123, 12, -1, 0, 0, 0, 0},
+ {2, -11, 108, 36, -8, 1, 0, 0}, /* New 1/4 pel 6 tap filter */
+ {-9, 93, 50, -6, 0, 0, 0, 0},
+ {3, -16, 77, 77, -16, 3, 0, 0}, /* New 1/2 pel 6 tap filter */
+ {-6, 50, 93, -9, 0, 0, 0, 0},
+ {1, -8, 36, 108, -11, 2, 0, 0}, /* New 1/4 pel 6 tap filter */
+ {-1, 12, 123, -6, 0, 0, 0, 0},
+};
+
+static const int8_t bilinear_filters_msa[7][2] = {
+ {112, 16},
+ {96, 32},
+ {80, 48},
+ {64, 64},
+ {48, 80},
+ {32, 96},
+ {16, 112}
+};
+
+#define HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, \
+ filt_h0, filt_h1, filt_h2) \
+( { \
+ v16i8 vec0_m, vec1_m, vec2_m; \
+ v8i16 hz_out_m; \
+ \
+ VSHF_B3_SB(src0, src1, src0, src1, src0, src1, mask0, mask1, mask2, \
+ vec0_m, vec1_m, vec2_m); \
+ hz_out_m = DPADD_SH3_SH(vec0_m, vec1_m, vec2_m, \
+ filt_h0, filt_h1, filt_h2); \
+ \
+ hz_out_m = __msa_srari_h(hz_out_m, 7); \
+ hz_out_m = __msa_sat_s_h(hz_out_m, 7); \
+ \
+ hz_out_m; \
+} )
+
+#define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \
+ mask0, mask1, mask2, \
+ filt0, filt1, filt2, \
+ out0, out1) \
+{ \
+ v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \
+ \
+ VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \
+ DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, out0, out1); \
+ VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \
+ DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \
+ VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \
+ DPADD_SB2_SH(vec4_m, vec5_m, filt2, filt2, out0, out1); \
+}
+
+#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \
+ mask0, mask1, mask2, \
+ filt0, filt1, filt2, \
+ out0, out1, out2, out3) \
+{ \
+ v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
+ \
+ VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \
+ DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \
+ out0, out1, out2, out3); \
+ VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0_m, vec1_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \
+ VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec4_m, vec5_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec6_m, vec7_m); \
+ DPADD_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt1, filt1, filt1, filt1, \
+ out0, out1, out2, out3); \
+ DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt2, filt2, filt2, filt2, \
+ out0, out1, out2, out3); \
+}
+
+#define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \
+( { \
+ v8i16 tmp0; \
+ \
+ tmp0 = __msa_dotp_s_h((v16i8) vec0, (v16i8) filt0); \
+ tmp0 = __msa_dpadd_s_h(tmp0, (v16i8) vec1, (v16i8) filt1); \
+ \
+ tmp0; \
+} )
+
+#define HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_h0, filt_h1) \
+( { \
+ v16i8 vec0_m, vec1_m; \
+ v8i16 hz_out_m; \
+ \
+ VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0_m, vec1_m); \
+ hz_out_m = FILT_4TAP_DPADD_S_H(vec0_m, vec1_m, filt_h0, filt_h1); \
+ \
+ hz_out_m = __msa_srari_h(hz_out_m, 7); \
+ hz_out_m = __msa_sat_s_h(hz_out_m, 7); \
+ \
+ hz_out_m; \
+} )
+
+#define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \
+ mask0, mask1, filt0, filt1, \
+ out0, out1) \
+{ \
+ v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \
+ \
+ VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \
+ DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, out0, out1); \
+ VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \
+ DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \
+}
+
+#define HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \
+ mask0, mask1, filt0, filt1, \
+ out0, out1, out2, out3) \
+{ \
+ v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \
+ \
+ VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \
+ DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \
+ out0, out1, out2, out3); \
+ VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0_m, vec1_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \
+ DPADD_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt1, filt1, filt1, filt1, \
+ out0, out1, out2, out3); \
+}
+
+static void common_hz_6t_4x4_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2;
+ v16u8 mask0, mask1, mask2, out;
+ v8i16 filt, out0, out1;
+
+ mask0 = LD_UB(&mc_filt_mask_arr[16]);
+ src -= 2;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1);
+ SRARI_H2_SH(out0, out1, 7);
+ SAT_SH2_SH(out0, out1, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+}
+
+static void common_hz_6t_4x8_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2;
+ v16u8 mask0, mask1, mask2, out;
+ v8i16 filt, out0, out1, out2, out3;
+
+ mask0 = LD_UB(&mc_filt_mask_arr[16]);
+ src -= 2;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ src += (4 * src_stride);
+ HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+}
+
+void ff_put_vp8_epel4_h6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter = subpel_filters_msa[mx - 1];
+
+ if (4 == height) {
+ common_hz_6t_4x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (8 == height) {
+ common_hz_6t_4x8_msa(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+void ff_put_vp8_epel8_h6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_msa[mx - 1];
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2;
+ v16u8 mask0, mask1, mask2, tmp0, tmp1;
+ v8i16 filt, out0, out1, out2, out3;
+
+ mask0 = LD_UB(&mc_filt_mask_arr[0]);
+
+ src -= 2;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ src += (4 * src_stride);
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ tmp0 = PCKEV_XORI128_UB(out0, out1);
+ tmp1 = PCKEV_XORI128_UB(out2, out3);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ for (loop_cnt = (height >> 2) - 1; loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ src += (4 * src_stride);
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ tmp0 = PCKEV_XORI128_UB(out0, out1);
+ tmp1 = PCKEV_XORI128_UB(out2, out3);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+void ff_put_vp8_epel16_h6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_msa[mx - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1, filt2;
+ v16u8 mask0, mask1, mask2, out;
+ v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7;
+
+ mask0 = LD_UB(&mc_filt_mask_arr[0]);
+ src -= 2;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (4 * src_stride);
+
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+ HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2,
+ filt0, filt1, filt2, out4, out5, out6, out7);
+ SRARI_H4_SH(out0, out1, out2, out3, 7);
+ SRARI_H4_SH(out4, out5, out6, out7, 7);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out4, out5, out6, out7, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out4, out5);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out6, out7);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ }
+}
+
+void ff_put_vp8_epel4_v6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
+ v16i8 src87_r, src2110, src4332, src6554, src8776, filt0, filt1, filt2;
+ v16u8 out;
+ v8i16 filt, out10, out32;
+
+ src -= (2 * src_stride);
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
+ src32_r, src43_r);
+ ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
+ XORI_B2_128_SB(src2110, src4332);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r,
+ src65_r, src76_r, src87_r);
+ ILVR_D2_SB(src65_r, src54_r, src87_r, src76_r, src6554, src8776);
+ XORI_B2_128_SB(src6554, src8776);
+ out10 = DPADD_SH3_SH(src2110, src4332, src6554, filt0, filt1, filt2);
+ out32 = DPADD_SH3_SH(src4332, src6554, src8776, filt0, filt1, filt2);
+ SRARI_H2_SH(out10, out32, 7);
+ SAT_SH2_SH(out10, out32, 7);
+ out = PCKEV_XORI128_UB(out10, out32);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src2110 = src6554;
+ src4332 = src8776;
+ src4 = src8;
+ }
+}
+
+void ff_put_vp8_epel8_v6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src7, src8, src9, src10;
+ v16i8 src10_r, src32_r, src76_r, src98_r, src21_r, src43_r, src87_r;
+ v16i8 src109_r, filt0, filt1, filt2;
+ v16u8 tmp0, tmp1;
+ v8i16 filt, out0_r, out1_r, out2_r, out3_r;
+
+ src -= (2 * src_stride);
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ ILVR_B4_SB(src1, src0, src3, src2, src2, src1, src4, src3,
+ src10_r, src32_r, src21_r, src43_r);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
+ XORI_B4_128_SB(src7, src8, src9, src10);
+ src += (4 * src_stride);
+
+ ILVR_B4_SB(src7, src4, src8, src7, src9, src8, src10, src9, src76_r,
+ src87_r, src98_r, src109_r);
+ out0_r = DPADD_SH3_SH(src10_r, src32_r, src76_r, filt0, filt1, filt2);
+ out1_r = DPADD_SH3_SH(src21_r, src43_r, src87_r, filt0, filt1, filt2);
+ out2_r = DPADD_SH3_SH(src32_r, src76_r, src98_r, filt0, filt1, filt2);
+ out3_r = DPADD_SH3_SH(src43_r, src87_r, src109_r, filt0, filt1, filt2);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ tmp0 = PCKEV_XORI128_UB(out0_r, out1_r);
+ tmp1 = PCKEV_XORI128_UB(out2_r, out3_r);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src10_r = src76_r;
+ src32_r = src98_r;
+ src21_r = src87_r;
+ src43_r = src109_r;
+ src4 = src10;
+ }
+}
+
+void ff_put_vp8_epel16_v6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
+ v16i8 src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l;
+ v16i8 src65_l, src87_l, filt0, filt1, filt2;
+ v16u8 tmp0, tmp1, tmp2, tmp3;
+ v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l, filt;
+
+ src -= (2 * src_stride);
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ ILVR_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_r,
+ src32_r, src43_r, src21_r);
+ ILVL_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_l,
+ src32_l, src43_l, src21_l);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src5, src6, src7, src8);
+ ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r,
+ src65_r, src76_r, src87_r);
+ ILVL_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_l,
+ src65_l, src76_l, src87_l);
+ out0_r = DPADD_SH3_SH(src10_r, src32_r, src54_r, filt0, filt1,
+ filt2);
+ out1_r = DPADD_SH3_SH(src21_r, src43_r, src65_r, filt0, filt1,
+ filt2);
+ out2_r = DPADD_SH3_SH(src32_r, src54_r, src76_r, filt0, filt1,
+ filt2);
+ out3_r = DPADD_SH3_SH(src43_r, src65_r, src87_r, filt0, filt1,
+ filt2);
+ out0_l = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1,
+ filt2);
+ out1_l = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1,
+ filt2);
+ out2_l = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1,
+ filt2);
+ out3_l = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1,
+ filt2);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, 7);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
+ PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l,
+ out3_r, tmp0, tmp1, tmp2, tmp3);
+ XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
+ ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src10_r = src54_r;
+ src32_r = src76_r;
+ src21_r = src65_r;
+ src43_r = src87_r;
+ src10_l = src54_l;
+ src32_l = src76_l;
+ src21_l = src65_l;
+ src43_l = src87_l;
+ src4 = src8;
+ }
+}
+
+void ff_put_vp8_epel4_h6v6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_msa[mx - 1];
+ const int8_t *filter_vert = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 filt_hz0, filt_hz1, filt_hz2;
+ v16u8 mask0, mask1, mask2, out;
+ v8i16 tmp0, tmp1;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8i16 hz_out7, filt, filt_vt0, filt_vt1, filt_vt2, out0, out1, out2, out3;
+
+ mask0 = LD_UB(&mc_filt_mask_arr[16]);
+ src -= (2 + 2 * src_stride);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src3, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out1 = (v8i16) __msa_sldi_b((v16i8) hz_out2, (v16i8) hz_out0, 8);
+ hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB2(src, src_stride, src5, src6);
+ src += (2 * src_stride);
+
+ XORI_B2_128_SB(src5, src6);
+ hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out4 = (v8i16) __msa_sldi_b((v16i8) hz_out5, (v16i8) hz_out3, 8);
+
+ LD_SB2(src, src_stride, src7, src8);
+ src += (2 * src_stride);
+
+ XORI_B2_128_SB(src7, src8);
+ hz_out7 = HORIZ_6TAP_FILT(src7, src8, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out6 = (v8i16) __msa_sldi_b((v16i8) hz_out7, (v16i8) hz_out5, 8);
+
+ out2 = (v8i16) __msa_ilvev_b((v16i8) hz_out5, (v16i8) hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ out3 = (v8i16) __msa_ilvev_b((v16i8) hz_out7, (v16i8) hz_out6);
+ tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2);
+
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ SAT_SH2_SH(tmp0, tmp1, 7);
+ out = PCKEV_XORI128_UB(tmp0, tmp1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out3 = hz_out7;
+ out0 = out2;
+ out1 = out3;
+ }
+}
+
+void ff_put_vp8_epel8_h6v6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_msa[mx - 1];
+ const int8_t *filter_vert = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 filt_hz0, filt_hz1, filt_hz2;
+ v16u8 mask0, mask1, mask2, vec0, vec1;
+ v8i16 filt, filt_vt0, filt_vt1, filt_vt2;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7;
+ v8i16 tmp0, tmp1, tmp2, tmp3;
+
+ mask0 = LD_UB(&mc_filt_mask_arr[0]);
+ src -= (2 + 2 * src_stride);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out4 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2);
+
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1);
+ ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src5, src6, src7, src8);
+ hz_out5 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out2 = (v8i16) __msa_ilvev_b((v16i8) hz_out5, (v16i8) hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out6 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out5 = (v8i16) __msa_ilvev_b((v16i8) hz_out6, (v16i8) hz_out5);
+ tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = HORIZ_6TAP_FILT(src7, src7, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out7 = (v8i16) __msa_ilvev_b((v16i8) hz_out7, (v16i8) hz_out6);
+ tmp2 = DPADD_SH3_SH(out1, out2, out7, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out8 = HORIZ_6TAP_FILT(src8, src8, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out6 = (v8i16) __msa_ilvev_b((v16i8) hz_out8, (v16i8) hz_out7);
+ tmp3 = DPADD_SH3_SH(out4, out5, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ vec0 = PCKEV_XORI128_UB(tmp0, tmp1);
+ vec1 = PCKEV_XORI128_UB(tmp2, tmp3);
+ ST8x4_UB(vec0, vec1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out4 = hz_out8;
+ out0 = out2;
+ out1 = out7;
+ out3 = out5;
+ out4 = out6;
+ }
+}
+
+
+void ff_put_vp8_epel16_h6v6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h6v6_msa(dst, dst_stride, src, src_stride, height,
+ mx, my);
+
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void common_hz_4t_4x2_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 filt0, filt1, src0, src1, mask0, mask1, vec0, vec1;
+ v16u8 out;
+ v8i16 filt, res0;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[16]);
+ src -= 1;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB2(src, src_stride, src0, src1);
+ XORI_B2_128_SB(src0, src1);
+ VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
+ res0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1);
+ res0 = __msa_srari_h(res0, 7);
+ res0 = __msa_sat_s_h(res0, 7);
+ out = PCKEV_XORI128_UB(res0, res0);
+ ST4x2_UB(out, dst, dst_stride);
+}
+
+static void common_hz_4t_4x4_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1;
+ v8i16 filt, out0, out1;
+ v16u8 out;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[16]);
+ src -= 1;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
+ filt0, filt1, out0, out1);
+ SRARI_H2_SH(out0, out1, 7);
+ SAT_SH2_SH(out0, out1, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+}
+
+static void common_hz_4t_4x8_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1;
+ v16u8 out;
+ v8i16 filt, out0, out1, out2, out3;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[16]);
+ src -= 1;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
+ filt0, filt1, out0, out1);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
+ filt0, filt1, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+}
+
+static void common_hz_4t_4x16_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v16i8 filt0, filt1, mask0, mask1;
+ v16u8 out;
+ v8i16 filt, out0, out1, out2, out3;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[16]);
+ src -= 1;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+ XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
+ filt0, filt1, out0, out1);
+ HORIZ_4TAP_4WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1,
+ filt0, filt1, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+ XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
+ filt0, filt1, out0, out1);
+ HORIZ_4TAP_4WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1,
+ filt0, filt1, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+}
+
+void ff_put_vp8_epel4_h4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter = subpel_filters_msa[mx - 1];
+
+ if (2 == height) {
+ common_hz_4t_4x2_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (4 == height) {
+ common_hz_4t_4x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (8 == height) {
+ common_hz_4t_4x8_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (16 == height) {
+ common_hz_4t_4x16_msa(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_hz_4t_8x2mult_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt;
+ v16i8 src0, src1, filt0, filt1, mask0, mask1;
+ v16u8 out;
+ v8i16 filt, vec0, vec1, vec2, vec3;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[0]);
+ src -= 1;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ for (loop_cnt = (height >> 1); loop_cnt--;) {
+ LD_SB2(src, src_stride, src0, src1);
+ src += (2 * src_stride);
+
+ XORI_B2_128_SB(src0, src1);
+ VSHF_B2_SH(src0, src0, src1, src1, mask0, mask0, vec0, vec1);
+ DOTP_SB2_SH(vec0, vec1, filt0, filt0, vec0, vec1);
+ VSHF_B2_SH(src0, src0, src1, src1, mask1, mask1, vec2, vec3);
+ DPADD_SB2_SH(vec2, vec3, filt1, filt1, vec0, vec1);
+ SRARI_H2_SH(vec0, vec1, 7);
+ SAT_SH2_SH(vec0, vec1, 7);
+ out = PCKEV_XORI128_UB(vec0, vec1);
+ ST8x2_UB(out, dst, dst_stride);
+ dst += (2 * dst_stride);
+ }
+}
+
+static void common_hz_4t_8x4mult_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1;
+ v16u8 tmp0, tmp1;
+ v8i16 filt, out0, out1, out2, out3;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[0]);
+ src -= 1;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0,
+ filt1, out0, out1, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ tmp0 = PCKEV_XORI128_UB(out0, out1);
+ tmp1 = PCKEV_XORI128_UB(out2, out3);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+void ff_put_vp8_epel8_h4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter = subpel_filters_msa[mx - 1];
+
+ if ((2 == height) || (6 == height)) {
+ common_hz_4t_8x2mult_msa(src, src_stride, dst, dst_stride, filter,
+ height);
+ } else {
+ common_hz_4t_8x4mult_msa(src, src_stride, dst, dst_stride, filter,
+ height);
+ }
+}
+
+void ff_put_vp8_epel16_h4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_msa[mx - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v16i8 filt0, filt1, mask0, mask1;
+ v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7;
+ v16u8 out;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[0]);
+ src -= 1;
+
+ /* rearranging filter */
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
+
+ XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
+ HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0,
+ filt1, out0, out1, out2, out3);
+ HORIZ_4TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, filt0,
+ filt1, out4, out5, out6, out7);
+ SRARI_H4_SH(out0, out1, out2, out3, 7);
+ SRARI_H4_SH(out4, out5, out6, out7, 7);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out4, out5, out6, out7, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out4, out5);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out6, out7);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ }
+}
+
+static void common_vt_4t_4x2_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, src4, src10_r, src32_r, src21_r, src43_r;
+ v16i8 src2110, src4332, filt0, filt1;
+ v16u8 out;
+ v8i16 filt, out10;
+
+ src -= src_stride;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
+ src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
+ src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
+ LD_SB2(src, src_stride, src3, src4);
+ ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
+ src4332 = (v16i8) __msa_ilvr_d((v2i64) src43_r, (v2i64) src32_r);
+ src4332 = (v16i8) __msa_xori_b((v16u8) src4332, 128);
+ out10 = FILT_4TAP_DPADD_S_H(src2110, src4332, filt0, filt1);
+ out10 = __msa_srari_h(out10, 7);
+ out10 = __msa_sat_s_h(out10, 7);
+ out = PCKEV_XORI128_UB(out10, out10);
+ ST4x2_UB(out, dst, dst_stride);
+}
+
+static void common_vt_4t_4x4multiple_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5;
+ v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r;
+ v16i8 src2110, src4332, filt0, filt1;
+ v8i16 filt, out10, out32;
+ v16u8 out;
+
+ src -= src_stride;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
+
+ src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
+ src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB3(src, src_stride, src3, src4, src5);
+ src += (3 * src_stride);
+ ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
+ src4332 = (v16i8) __msa_ilvr_d((v2i64) src43_r, (v2i64) src32_r);
+ src4332 = (v16i8) __msa_xori_b((v16u8) src4332, 128);
+ out10 = FILT_4TAP_DPADD_S_H(src2110, src4332, filt0, filt1);
+
+ src2 = LD_SB(src);
+ src += (src_stride);
+ ILVR_B2_SB(src5, src4, src2, src5, src54_r, src65_r);
+ src2110 = (v16i8) __msa_ilvr_d((v2i64) src65_r, (v2i64) src54_r);
+ src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
+ out32 = FILT_4TAP_DPADD_S_H(src4332, src2110, filt0, filt1);
+ SRARI_H2_SH(out10, out32, 7);
+ SAT_SH2_SH(out10, out32, 7);
+ out = PCKEV_XORI128_UB(out10, out32);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+void ff_put_vp8_epel4_v4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter = subpel_filters_msa[my - 1];
+
+ if (2 == height) {
+ common_vt_4t_4x2_msa(src, src_stride, dst, dst_stride, filter);
+ } else {
+ common_vt_4t_4x4multiple_msa(src, src_stride, dst, dst_stride, filter,
+ height);
+ }
+}
+
+static void common_vt_4t_8x2_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, src4;
+ v8i16 src01, src12, src23, src34, tmp0, tmp1, filt, filt0, filt1;
+ v16u8 out;
+
+ src -= src_stride;
+
+ /* rearranging filter_y */
+ filt = LD_SH(filter);
+ SPLATI_H2_SH(filt, 0, 1, filt0, filt1);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ ILVR_B2_SH(src1, src0, src3, src2, src01, src23);
+ tmp0 = FILT_4TAP_DPADD_S_H(src01, src23, filt0, filt1);
+ ILVR_B2_SH(src2, src1, src4, src3, src12, src34);
+ tmp1 = FILT_4TAP_DPADD_S_H(src12, src34, filt0, filt1);
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ SAT_SH2_SH(tmp0, tmp1, 7);
+ out = PCKEV_XORI128_UB(tmp0, tmp1);
+ ST8x2_UB(out, dst, dst_stride);
+}
+
+static void common_vt_4t_8x6_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ uint32_t loop_cnt;
+ uint64_t out0, out1, out2;
+ v16i8 src0, src1, src2, src3, src4, src5;
+ v8i16 vec0, vec1, vec2, vec3, vec4, tmp0, tmp1, tmp2;
+ v8i16 filt, filt0, filt1;
+
+ src -= src_stride;
+
+ /* rearranging filter_y */
+ filt = LD_SH(filter);
+ SPLATI_H2_SH(filt, 0, 1, filt0, filt1);
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ ILVR_B2_SH(src1, src0, src2, src1, vec0, vec2);
+
+ for (loop_cnt = 2; loop_cnt--;) {
+ LD_SB3(src, src_stride, src3, src4, src5);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src3, src4, src5);
+ ILVR_B3_SH(src3, src2, src4, src3, src5, src4, vec1, vec3, vec4);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt0, filt1);
+ tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec4, filt0, filt1);
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ tmp2 = __msa_srari_h(tmp2, 7);
+ SAT_SH3_SH(tmp0, tmp1, tmp2, 7);
+ PCKEV_B2_SH(tmp1, tmp0, tmp2, tmp2, tmp0, tmp2);
+ XORI_B2_128_SH(tmp0, tmp2);
+
+ out0 = __msa_copy_u_d((v2i64) tmp0, 0);
+ out1 = __msa_copy_u_d((v2i64) tmp0, 1);
+ out2 = __msa_copy_u_d((v2i64) tmp2, 0);
+ SD(out0, dst);
+ dst += dst_stride;
+ SD(out1, dst);
+ dst += dst_stride;
+ SD(out2, dst);
+ dst += dst_stride;
+
+ src2 = src5;
+ vec0 = vec3;
+ vec2 = vec4;
+ }
+}
+
+static void common_vt_4t_8x4mult_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src7, src8, src9, src10;
+ v16i8 src10_r, src72_r, src98_r, src21_r, src87_r, src109_r, filt0, filt1;
+ v16u8 tmp0, tmp1;
+ v8i16 filt, out0_r, out1_r, out2_r, out3_r;
+
+ src -= src_stride;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src7, src8, src9, src10);
+ ILVR_B4_SB(src7, src2, src8, src7, src9, src8, src10, src9,
+ src72_r, src87_r, src98_r, src109_r);
+ out0_r = FILT_4TAP_DPADD_S_H(src10_r, src72_r, filt0, filt1);
+ out1_r = FILT_4TAP_DPADD_S_H(src21_r, src87_r, filt0, filt1);
+ out2_r = FILT_4TAP_DPADD_S_H(src72_r, src98_r, filt0, filt1);
+ out3_r = FILT_4TAP_DPADD_S_H(src87_r, src109_r, filt0, filt1);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ tmp0 = PCKEV_XORI128_UB(out0_r, out1_r);
+ tmp1 = PCKEV_XORI128_UB(out2_r, out3_r);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src10_r = src98_r;
+ src21_r = src109_r;
+ src2 = src10;
+ }
+}
+
+void ff_put_vp8_epel8_v4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter = subpel_filters_msa[my - 1];
+
+ if (2 == height) {
+ common_vt_4t_8x2_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (6 == height) {
+ common_vt_4t_8x6_msa(src, src_stride, dst, dst_stride, filter);
+ } else {
+ common_vt_4t_8x4mult_msa(src, src_stride, dst, dst_stride,
+ filter, height);
+ }
+}
+
+void ff_put_vp8_epel16_v4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6;
+ v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r, src10_l;
+ v16i8 src32_l, src54_l, src21_l, src43_l, src65_l, filt0, filt1;
+ v16u8 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
+
+ src -= src_stride;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
+ ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src3, src4, src5, src6);
+ ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5,
+ src32_r, src43_r, src54_r, src65_r);
+ ILVL_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5,
+ src32_l, src43_l, src54_l, src65_l);
+ out0_r = FILT_4TAP_DPADD_S_H(src10_r, src32_r, filt0, filt1);
+ out1_r = FILT_4TAP_DPADD_S_H(src21_r, src43_r, filt0, filt1);
+ out2_r = FILT_4TAP_DPADD_S_H(src32_r, src54_r, filt0, filt1);
+ out3_r = FILT_4TAP_DPADD_S_H(src43_r, src65_r, filt0, filt1);
+ out0_l = FILT_4TAP_DPADD_S_H(src10_l, src32_l, filt0, filt1);
+ out1_l = FILT_4TAP_DPADD_S_H(src21_l, src43_l, filt0, filt1);
+ out2_l = FILT_4TAP_DPADD_S_H(src32_l, src54_l, filt0, filt1);
+ out3_l = FILT_4TAP_DPADD_S_H(src43_l, src65_l, filt0, filt1);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, 7);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
+ PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l,
+ out3_r, tmp0, tmp1, tmp2, tmp3);
+ XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
+ ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src10_r = src54_r;
+ src21_r = src65_r;
+ src10_l = src54_l;
+ src21_l = src65_l;
+ src2 = src6;
+ }
+}
+
+void ff_put_vp8_epel4_h4v4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_msa[mx - 1];
+ const int8_t *filter_vert = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1;
+ v16u8 mask0, mask1, out;
+ v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5;
+
+ mask0 = LD_UB(&mc_filt_mask_arr[16]);
+ src -= (1 + 1 * src_stride);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = HORIZ_4TAP_FILT(src1, src2, mask0, mask1, filt_hz0, filt_hz1);
+ vec0 = (v8i16) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B2_128_SB(src3, src4);
+ hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = (v8i16) __msa_sldi_b((v16i8) hz_out3, (v16i8) hz_out1, 8);
+ vec1 = (v8i16) __msa_ilvev_b((v16i8) hz_out3, (v16i8) hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ XORI_B2_128_SB(src5, src6);
+ hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out4 = (v8i16) __msa_sldi_b((v16i8) hz_out5, (v16i8) hz_out3, 8);
+ vec2 = (v8i16) __msa_ilvev_b((v16i8) hz_out5, (v16i8) hz_out4);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1);
+
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ SAT_SH2_SH(tmp0, tmp1, 7);
+ out = PCKEV_XORI128_UB(tmp0, tmp1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out1 = hz_out5;
+ vec0 = vec2;
+ }
+}
+
+void ff_put_vp8_epel8_h4v4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_msa[mx - 1];
+ const int8_t *filter_vert = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1;
+ v16u8 mask0, mask1, out0, out1;
+ v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, tmp2, tmp3;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3;
+ v8i16 vec0, vec1, vec2, vec3, vec4;
+
+ mask0 = LD_UB(&mc_filt_mask_arr[0]);
+ src -= (1 + 1 * src_stride);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src3, src4, src5, src6);
+ hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1);
+ vec1 = (v8i16) __msa_ilvev_b((v16i8) hz_out3, (v16i8) hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out0 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1);
+ vec3 = (v8i16) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out3);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1);
+
+ hz_out1 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1);
+ vec4 = (v8i16) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+ tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec4, filt_vt0, filt_vt1);
+
+ hz_out2 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1);
+ ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec0, vec1);
+ tmp3 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ out0 = PCKEV_XORI128_UB(tmp0, tmp1);
+ out1 = PCKEV_XORI128_UB(tmp2, tmp3);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ vec0 = vec4;
+ vec2 = vec1;
+ }
+}
+
+void ff_put_vp8_epel16_h4v4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h4v4_msa(dst, dst_stride, src, src_stride, height,
+ mx, my);
+
+ src += 8;
+ dst += 8;
+ }
+}
+
+void ff_put_vp8_epel4_h6v4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_msa[mx - 1];
+ const int8_t *filter_vert = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6;
+ v16i8 filt_hz0, filt_hz1, filt_hz2;
+ v16u8 res0, res1, mask0, mask1, mask2;
+ v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5;
+
+ mask0 = LD_UB(&mc_filt_mask_arr[16]);
+ src -= (2 + 1 * src_stride);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src2, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec0 = (v8i16) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src3, src4, src5, src6);
+ hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = (v8i16) __msa_sldi_b((v16i8) hz_out3, (v16i8) hz_out1, 8);
+ vec1 = (v8i16) __msa_ilvev_b((v16i8) hz_out3, (v16i8) hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out4 = (v8i16) __msa_sldi_b((v16i8) hz_out5, (v16i8) hz_out3, 8);
+ vec2 = (v8i16) __msa_ilvev_b((v16i8) hz_out5, (v16i8) hz_out4);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1);
+
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ SAT_SH2_SH(tmp0, tmp1, 7);
+ PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1);
+ XORI_B2_128_UB(res0, res1);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out1 = hz_out5;
+ vec0 = vec2;
+ }
+}
+
+void ff_put_vp8_epel8_h6v4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_msa[mx - 1];
+ const int8_t *filter_vert = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6;
+ v16i8 filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2;
+ v8i16 filt, filt_vt0, filt_vt1, hz_out0, hz_out1, hz_out2, hz_out3;
+ v8i16 tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3;
+ v16u8 out0, out1;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[0]);
+ src -= (2 + src_stride);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src3, src4, src5, src6);
+
+ hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec1 = (v8i16) __msa_ilvev_b((v16i8) hz_out3, (v16i8) hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out0 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec3 = (v8i16) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out3);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1);
+
+ hz_out1 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec0 = (v8i16) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+ tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec0, filt_vt0, filt_vt1);
+
+ hz_out2 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec1, vec2);
+ tmp3 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1);
+
+ SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ out0 = PCKEV_XORI128_UB(tmp0, tmp1);
+ out1 = PCKEV_XORI128_UB(tmp2, tmp3);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+void ff_put_vp8_epel16_h6v4_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h6v4_msa(dst, dst_stride, src, src_stride, height,
+ mx, my);
+
+ src += 8;
+ dst += 8;
+ }
+}
+
+void ff_put_vp8_epel4_h4v6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_msa[mx - 1];
+ const int8_t *filter_vert = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 filt_hz0, filt_hz1, mask0, mask1;
+ v16u8 out;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8i16 hz_out7, tmp0, tmp1, out0, out1, out2, out3;
+ v8i16 filt, filt_vt0, filt_vt1, filt_vt2;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[16]);
+
+ src -= (1 + 2 * src_stride);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = HORIZ_4TAP_FILT(src2, src3, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = (v8i16) __msa_sldi_b((v16i8) hz_out2, (v16i8) hz_out0, 8);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ XORI_B4_128_SB(src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out4 = (v8i16) __msa_sldi_b((v16i8) hz_out5, (v16i8) hz_out3, 8);
+ out2 = (v8i16) __msa_ilvev_b((v16i8) hz_out5, (v16i8) hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = HORIZ_4TAP_FILT(src7, src8, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out6 = (v8i16) __msa_sldi_b((v16i8) hz_out7, (v16i8) hz_out5, 8);
+ out3 = (v8i16) __msa_ilvev_b((v16i8) hz_out7, (v16i8) hz_out6);
+ tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2);
+
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ SAT_SH2_SH(tmp0, tmp1, 7);
+ out = PCKEV_XORI128_UB(tmp0, tmp1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out3 = hz_out7;
+ out0 = out2;
+ out1 = out3;
+ }
+}
+
+void ff_put_vp8_epel8_h4v6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_msa[mx - 1];
+ const int8_t *filter_vert = subpel_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 filt_hz0, filt_hz1, mask0, mask1;
+ v8i16 filt, filt_vt0, filt_vt1, filt_vt2, tmp0, tmp1, tmp2, tmp3;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7;
+ v16u8 vec0, vec1;
+
+ mask0 = LD_SB(&mc_filt_mask_arr[0]);
+ src -= (1 + 2 * src_stride);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out4 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1);
+ ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src5, src6, src7, src8);
+
+ hz_out5 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1);
+ out2 = (v8i16) __msa_ilvev_b((v16i8) hz_out5, (v16i8) hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out6 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1);
+ out5 = (v8i16) __msa_ilvev_b((v16i8) hz_out6, (v16i8) hz_out5);
+ tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = HORIZ_4TAP_FILT(src7, src7, mask0, mask1, filt_hz0, filt_hz1);
+ out6 = (v8i16) __msa_ilvev_b((v16i8) hz_out7, (v16i8) hz_out6);
+ tmp2 = DPADD_SH3_SH(out1, out2, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out8 = HORIZ_4TAP_FILT(src8, src8, mask0, mask1, filt_hz0, filt_hz1);
+ out7 = (v8i16) __msa_ilvev_b((v16i8) hz_out8, (v16i8) hz_out7);
+ tmp3 = DPADD_SH3_SH(out4, out5, out7, filt_vt0, filt_vt1, filt_vt2);
+
+ SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ vec0 = PCKEV_XORI128_UB(tmp0, tmp1);
+ vec1 = PCKEV_XORI128_UB(tmp2, tmp3);
+ ST8x4_UB(vec0, vec1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out4 = hz_out8;
+ out0 = out2;
+ out1 = out6;
+ out3 = out5;
+ out4 = out7;
+ }
+}
+
+void ff_put_vp8_epel16_h4v6_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h4v6_msa(dst, dst_stride, src, src_stride, height,
+ mx, my);
+
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void common_hz_2t_4x4_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, mask;
+ v16u8 filt0, vec0, vec1, res0, res1;
+ v8u16 vec2, vec3, filt;
+
+ mask = LD_SB(&mc_filt_mask_arr[16]);
+
+ /* rearranging filter */
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3);
+ SRARI_H2_UH(vec2, vec3, 7);
+ PCKEV_B2_UB(vec2, vec2, vec3, vec3, res0, res1);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+}
+
+static void common_hz_2t_4x8_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16u8 vec0, vec1, vec2, vec3, filt0;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
+ v16i8 res0, res1, res2, res3;
+ v8u16 vec4, vec5, vec6, vec7, filt;
+
+ mask = LD_SB(&mc_filt_mask_arr[16]);
+
+ /* rearranging filter */
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ vec4, vec5, vec6, vec7);
+ SRARI_H4_UH(vec4, vec5, vec6, vec7, 7);
+ PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7,
+ res0, res1, res2, res3);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
+}
+
+void ff_put_vp8_bilinear4_h_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter = bilinear_filters_msa[mx - 1];
+
+ if (4 == height) {
+ common_hz_2t_4x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (8 == height) {
+ common_hz_2t_4x8_msa(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_hz_2t_8x4_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16u8 filt0;
+ v16i8 src0, src1, src2, src3, mask;
+ v8u16 vec0, vec1, vec2, vec3, filt;
+
+ mask = LD_SB(&mc_filt_mask_arr[0]);
+
+ /* rearranging filter */
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ vec0, vec1, vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, 7);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, src0, src1);
+ ST8x4_UB(src0, src1, dst, dst_stride);
+}
+
+static void common_hz_2t_8x8mult_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ v16u8 filt0;
+ v16i8 src0, src1, src2, src3, mask, out0, out1;
+ v8u16 vec0, vec1, vec2, vec3, filt;
+
+ mask = LD_SB(&mc_filt_mask_arr[0]);
+
+ /* rearranging filter */
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ vec0, vec1, vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, 7);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ vec0, vec1, vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, 7);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ if (16 == height) {
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ vec0, vec1, vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, 7);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ vec0, vec1, vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, 7);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst + 4 * dst_stride, dst_stride);
+ }
+}
+
+void ff_put_vp8_bilinear8_h_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter = bilinear_filters_msa[mx - 1];
+
+ if (4 == height) {
+ common_hz_2t_8x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else {
+ common_hz_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter,
+ height);
+ }
+}
+
+void ff_put_vp8_bilinear16_h_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = bilinear_filters_msa[mx - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
+ v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt;
+
+ mask = LD_SB(&mc_filt_mask_arr[0]);
+
+ loop_cnt = (height >> 2) - 1;
+
+ /* rearranging filter */
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
+
+ VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
+ VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ out0, out1, out2, out3);
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0,
+ out4, out5, out6, out7);
+ SRARI_H4_UH(out0, out1, out2, out3, 7);
+ SRARI_H4_UH(out4, out5, out6, out7, 7);
+ PCKEV_ST_SB(out0, out1, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out2, out3, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out4, out5, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out6, out7, dst);
+ dst += dst_stride;
+
+ for (; loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
+
+ VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
+ VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ out0, out1, out2, out3);
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0,
+ out4, out5, out6, out7);
+ SRARI_H4_UH(out0, out1, out2, out3, 7);
+ SRARI_H4_UH(out4, out5, out6, out7, 7);
+ PCKEV_ST_SB(out0, out1, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out2, out3, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out4, out5, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out6, out7, dst);
+ dst += dst_stride;
+ }
+}
+
+static void common_vt_2t_4x4_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, src4;
+ v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332;
+ v16u8 filt0;
+ v8i16 filt;
+ v8u16 tmp0, tmp1;
+
+ filt = LD_SH(filter);
+ filt0 = (v16u8) __msa_splati_h(filt, 0);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3,
+ src10_r, src21_r, src32_r, src43_r);
+ ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
+ DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, 7);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ src2110 = __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
+ ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
+}
+
+static void common_vt_2t_4x8_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r;
+ v16i8 src65_r, src87_r, src2110, src4332, src6554, src8776;
+ v8u16 tmp0, tmp1, tmp2, tmp3;
+ v16u8 filt0;
+ v8i16 filt;
+
+ filt = LD_SH(filter);
+ filt0 = (v16u8) __msa_splati_h(filt, 0);
+
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+
+ src8 = LD_SB(src);
+ src += src_stride;
+
+ ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
+ src32_r, src43_r);
+ ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
+ src76_r, src87_r);
+ ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
+ src87_r, src76_r, src2110, src4332, src6554, src8776);
+ DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0,
+ tmp0, tmp1, tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, src2110, src4332);
+ ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
+ ST4x4_UB(src4332, src4332, 0, 1, 2, 3, dst + 4 * dst_stride, dst_stride);
+}
+
+void ff_put_vp8_bilinear4_v_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter = bilinear_filters_msa[my - 1];
+
+ if (4 == height) {
+ common_vt_2t_4x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (8 == height) {
+ common_vt_2t_4x8_msa(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_vt_2t_8x4_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ v16u8 src0, src1, src2, src3, src4, vec0, vec1, vec2, vec3, filt0;
+ v16i8 out0, out1;
+ v8u16 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt;
+
+ /* rearranging filter_y */
+ filt = LD_SH(filter);
+ filt0 = (v16u8) __msa_splati_h(filt, 0);
+
+ LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
+ ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1);
+ ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ tmp0, tmp1, tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+}
+
+static void common_vt_2t_8x8mult_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt;
+ v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
+ v16i8 out0, out1;
+ v8u16 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt;
+
+ /* rearranging filter_y */
+ filt = LD_SH(filter);
+ filt0 = (v16u8) __msa_splati_h(filt, 0);
+
+ src0 = LD_UB(src);
+ src += src_stride;
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8);
+ src += (8 * src_stride);
+
+ ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
+ vec0, vec1, vec2, vec3);
+ ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7,
+ vec4, vec5, vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
+ tmp0, tmp1, tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0,
+ tmp0, tmp1, tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src0 = src8;
+ }
+}
+
+void ff_put_vp8_bilinear8_v_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter = bilinear_filters_msa[my - 1];
+
+ if (4 == height) {
+ common_vt_2t_8x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else {
+ common_vt_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter,
+ height);
+ }
+}
+
+void ff_put_vp8_bilinear16_v_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = bilinear_filters_msa[my - 1];
+ v16u8 src0, src1, src2, src3, src4;
+ v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
+ v8u16 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt;
+
+ /* rearranging filter_y */
+ filt = LD_SH(filter);
+ filt0 = (v16u8) __msa_splati_h(filt, 0);
+
+ src0 = LD_UB(src);
+ src += src_stride;
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_UB4(src, src_stride, src1, src2, src3, src4);
+ src += (4 * src_stride);
+
+ ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
+ ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, 7);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst);
+ dst += dst_stride;
+
+ ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
+ ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
+ DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, 7);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst);
+ dst += dst_stride;
+
+ DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, 7);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst);
+ dst += dst_stride;
+
+ DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, 7);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst);
+ dst += dst_stride;
+
+ src0 = src4;
+ }
+}
+
+static void common_hv_2ht_2vt_4x4_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert)
+{
+ v16i8 src0, src1, src2, src3, src4, mask;
+ v16u8 filt_vt, filt_hz, vec0, vec1, res0, res1;
+ v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, filt, tmp0, tmp1;
+
+ mask = LD_SB(&mc_filt_mask_arr[16]);
+
+ /* rearranging filter */
+ filt = LD_UH(filter_horiz);
+ filt_hz = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ filt = LD_UH(filter_vert);
+ filt_vt = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, 7);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, 7);
+ hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7);
+ hz_out1 = (v8u16) __msa_sldi_b((v16i8) hz_out2, (v16i8) hz_out0, 8);
+ hz_out3 = (v8u16) __msa_pckod_d((v2i64) hz_out4, (v2i64) hz_out2);
+
+ ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, 7);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+}
+
+static void common_hv_2ht_2vt_4x8_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert)
+{
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask;
+ v16i8 res0, res1, res2, res3;
+ v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3;
+ v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8u16 hz_out7, hz_out8, vec4, vec5, vec6, vec7, filt;
+
+ mask = LD_SB(&mc_filt_mask_arr[16]);
+
+ /* rearranging filter */
+ filt = LD_UH(filter_horiz);
+ filt_hz = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ filt = LD_UH(filter_vert);
+ filt_vt = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+ src8 = LD_SB(src);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, 7);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, 7);
+ hz_out4 = HORIZ_2TAP_FILT_UH(src4, src5, mask, filt_hz, 7);
+ hz_out6 = HORIZ_2TAP_FILT_UH(src6, src7, mask, filt_hz, 7);
+ hz_out8 = HORIZ_2TAP_FILT_UH(src8, src8, mask, filt_hz, 7);
+ SLDI_B3_UH(hz_out2, hz_out4, hz_out6, hz_out0, hz_out2, hz_out4, hz_out1,
+ hz_out3, hz_out5, 8);
+ hz_out7 = (v8u16) __msa_pckod_d((v2i64) hz_out8, (v2i64) hz_out6);
+
+ ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
+ ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt,
+ vec4, vec5, vec6, vec7);
+ SRARI_H4_UH(vec4, vec5, vec6, vec7, 7);
+ SAT_UH4_UH(vec4, vec5, vec6, vec7, 7);
+ PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7,
+ res0, res1, res2, res3);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
+}
+
+void ff_put_vp8_bilinear4_hv_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter_horiz = bilinear_filters_msa[mx - 1];
+ const int8_t *filter_vert = bilinear_filters_msa[my - 1];
+
+ if (4 == height) {
+ common_hv_2ht_2vt_4x4_msa(src, src_stride, dst, dst_stride,
+ filter_horiz, filter_vert);
+ } else if (8 == height) {
+ common_hv_2ht_2vt_4x8_msa(src, src_stride, dst, dst_stride,
+ filter_horiz, filter_vert);
+ }
+}
+
+static void common_hv_2ht_2vt_8x4_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert)
+{
+ v16i8 src0, src1, src2, src3, src4, mask, out0, out1;
+ v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3;
+ v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt;
+
+ mask = LD_SB(&mc_filt_mask_arr[0]);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ filt_hz = (v16u8) __msa_splati_h(filt, 0);
+
+ filt = LD_SH(filter_vert);
+ filt_vt = (v16u8) __msa_splati_h(filt, 0);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7);
+ hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7);
+ vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+ tmp0 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7);
+ vec1 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1);
+ tmp1 = __msa_dotp_u_h(vec1, filt_vt);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7);
+ vec2 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+ tmp2 = __msa_dotp_u_h(vec2, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7);
+ vec3 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1);
+ tmp3 = __msa_dotp_u_h(vec3, filt_vt);
+
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+}
+
+static void common_hv_2ht_2vt_8x8mult_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, mask, out0, out1;
+ v16u8 filt_hz, filt_vt, vec0;
+ v8u16 hz_out0, hz_out1, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ v8i16 filt;
+
+ mask = LD_SB(&mc_filt_mask_arr[0]);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ filt_hz = (v16u8) __msa_splati_h(filt, 0);
+
+ filt = LD_SH(filter_vert);
+ filt_vt = (v16u8) __msa_splati_h(filt, 0);
+
+ src0 = LD_SB(src);
+ src += src_stride;
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7);
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LD_SB4(src, src_stride, src1, src2, src3, src4);
+ src += (4 * src_stride);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7);
+ vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+ tmp1 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7);
+ vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1);
+ tmp2 = __msa_dotp_u_h(vec0, filt_vt);
+
+ SRARI_H2_UH(tmp1, tmp2, 7);
+ SAT_UH2_UH(tmp1, tmp2, 7);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7);
+ vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+ tmp3 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7);
+ LD_SB4(src, src_stride, src1, src2, src3, src4);
+ src += (4 * src_stride);
+ vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1);
+ tmp4 = __msa_dotp_u_h(vec0, filt_vt);
+
+ SRARI_H2_UH(tmp3, tmp4, 7);
+ SAT_UH2_UH(tmp3, tmp4, 7);
+ PCKEV_B2_SB(tmp2, tmp1, tmp4, tmp3, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7);
+ vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+ tmp5 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7);
+ vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1);
+ tmp6 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7);
+ vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0);
+ tmp7 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7);
+ vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1);
+ tmp8 = __msa_dotp_u_h(vec0, filt_vt);
+
+ SRARI_H4_UH(tmp5, tmp6, tmp7, tmp8, 7);
+ SAT_UH4_UH(tmp5, tmp6, tmp7, tmp8, 7);
+ PCKEV_B2_SB(tmp6, tmp5, tmp8, tmp7, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+void ff_put_vp8_bilinear8_hv_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ const int8_t *filter_horiz = bilinear_filters_msa[mx - 1];
+ const int8_t *filter_vert = bilinear_filters_msa[my - 1];
+
+ if (4 == height) {
+ common_hv_2ht_2vt_8x4_msa(src, src_stride, dst, dst_stride,
+ filter_horiz, filter_vert);
+ } else {
+ common_hv_2ht_2vt_8x8mult_msa(src, src_stride, dst, dst_stride,
+ filter_horiz, filter_vert, height);
+ }
+}
+
+void ff_put_vp8_bilinear16_hv_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = bilinear_filters_msa[mx - 1];
+ const int8_t *filter_vert = bilinear_filters_msa[my - 1];
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
+ v16u8 filt_hz, filt_vt, vec0, vec1;
+ v8u16 tmp1, tmp2, hz_out0, hz_out1, hz_out2, hz_out3;
+ v8i16 filt;
+
+ mask = LD_SB(&mc_filt_mask_arr[0]);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ filt_hz = (v16u8) __msa_splati_h(filt, 0);
+
+ filt = LD_SH(filter_vert);
+ filt_vt = (v16u8) __msa_splati_h(filt, 0);
+
+ LD_SB2(src, 8, src0, src1);
+ src += src_stride;
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7);
+
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7);
+ hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7);
+ ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2);
+ SRARI_H2_UH(tmp1, tmp2, 7);
+ SAT_UH2_UH(tmp1, tmp2, 7);
+ PCKEV_ST_SB(tmp1, tmp2, dst);
+ dst += dst_stride;
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7);
+ ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2);
+ SRARI_H2_UH(tmp1, tmp2, 7);
+ SAT_UH2_UH(tmp1, tmp2, 7);
+ PCKEV_ST_SB(tmp1, tmp2, dst);
+ dst += dst_stride;
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7);
+ hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, 7);
+ ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2);
+ SRARI_H2_UH(tmp1, tmp2, 7);
+ SAT_UH2_UH(tmp1, tmp2, 7);
+ PCKEV_ST_SB(tmp1, tmp2, dst);
+ dst += dst_stride;
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, 7);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, 7);
+ ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2);
+ SRARI_H2_UH(tmp1, tmp2, 7);
+ SAT_UH2_UH(tmp1, tmp2, 7);
+ PCKEV_ST_SB(tmp1, tmp2, dst);
+ dst += dst_stride;
+ }
+}
+
+void ff_put_vp8_pixels8_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t cnt;
+ uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
+ v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+ if (0 == height % 12) {
+ for (cnt = (height / 12); cnt--;) {
+ LD_UB8(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+
+ out0 = __msa_copy_u_d((v2i64) src0, 0);
+ out1 = __msa_copy_u_d((v2i64) src1, 0);
+ out2 = __msa_copy_u_d((v2i64) src2, 0);
+ out3 = __msa_copy_u_d((v2i64) src3, 0);
+ out4 = __msa_copy_u_d((v2i64) src4, 0);
+ out5 = __msa_copy_u_d((v2i64) src5, 0);
+ out6 = __msa_copy_u_d((v2i64) src6, 0);
+ out7 = __msa_copy_u_d((v2i64) src7, 0);
+
+ SD4(out0, out1, out2, out3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ SD4(out4, out5, out6, out7, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ LD_UB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ out0 = __msa_copy_u_d((v2i64) src0, 0);
+ out1 = __msa_copy_u_d((v2i64) src1, 0);
+ out2 = __msa_copy_u_d((v2i64) src2, 0);
+ out3 = __msa_copy_u_d((v2i64) src3, 0);
+
+ SD4(out0, out1, out2, out3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+ } else if (0 == height % 8) {
+ for (cnt = height >> 3; cnt--;) {
+ LD_UB8(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+
+ out0 = __msa_copy_u_d((v2i64) src0, 0);
+ out1 = __msa_copy_u_d((v2i64) src1, 0);
+ out2 = __msa_copy_u_d((v2i64) src2, 0);
+ out3 = __msa_copy_u_d((v2i64) src3, 0);
+ out4 = __msa_copy_u_d((v2i64) src4, 0);
+ out5 = __msa_copy_u_d((v2i64) src5, 0);
+ out6 = __msa_copy_u_d((v2i64) src6, 0);
+ out7 = __msa_copy_u_d((v2i64) src7, 0);
+
+ SD4(out0, out1, out2, out3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ SD4(out4, out5, out6, out7, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+ } else if (0 == height % 4) {
+ for (cnt = (height / 4); cnt--;) {
+ LD_UB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+ out0 = __msa_copy_u_d((v2i64) src0, 0);
+ out1 = __msa_copy_u_d((v2i64) src1, 0);
+ out2 = __msa_copy_u_d((v2i64) src2, 0);
+ out3 = __msa_copy_u_d((v2i64) src3, 0);
+
+ SD4(out0, out1, out2, out3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+ } else if (0 == height % 2) {
+ for (cnt = (height / 2); cnt--;) {
+ LD_UB2(src, src_stride, src0, src1);
+ src += (2 * src_stride);
+ out0 = __msa_copy_u_d((v2i64) src0, 0);
+ out1 = __msa_copy_u_d((v2i64) src1, 0);
+
+ SD(out0, dst);
+ dst += dst_stride;
+ SD(out1, dst);
+ dst += dst_stride;
+ }
+ }
+}
+
+static void copy_16multx8mult_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t height, int32_t width)
+{
+ int32_t cnt, loop_cnt;
+ uint8_t *src_tmp, *dst_tmp;
+ v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+ for (cnt = (width >> 4); cnt--;) {
+ src_tmp = src;
+ dst_tmp = dst;
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LD_UB8(src_tmp, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src_tmp += (8 * src_stride);
+
+ ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7,
+ dst_tmp, dst_stride);
+ dst_tmp += (8 * dst_stride);
+ }
+
+ src += 16;
+ dst += 16;
+ }
+}
+
+void ff_put_vp8_pixels16_msa(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t cnt;
+ v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+ if (0 == height % 12) {
+ for (cnt = (height / 12); cnt--;) {
+ LD_UB8(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+ ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7,
+ dst, dst_stride);
+ dst += (8 * dst_stride);
+
+ LD_UB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+ ST_UB4(src0, src1, src2, src3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+ } else if (0 == height % 8) {
+ copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16);
+ } else if (0 == height % 4) {
+ for (cnt = (height >> 2); cnt--;) {
+ LD_UB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ ST_UB4(src0, src1, src2, src3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+ }
+}
+
+#define VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask) \
+{ \
+ v16u8 p1_a_sub_q1, p0_a_sub_q0; \
+ \
+ p0_a_sub_q0 = __msa_asub_u_b(p0, q0); \
+ p1_a_sub_q1 = __msa_asub_u_b(p1, q1); \
+ p1_a_sub_q1 = (v16u8) __msa_srli_b((v16i8) p1_a_sub_q1, 1); \
+ p0_a_sub_q0 = __msa_adds_u_b(p0_a_sub_q0, p0_a_sub_q0); \
+ mask = __msa_adds_u_b(p0_a_sub_q0, p1_a_sub_q1); \
+ mask = ((v16u8) mask <= b_limit); \
+}
+
+#define VP8_LPF_FILTER4_4W(p1_in_out, p0_in_out, q0_in_out, q1_in_out, \
+ mask_in, hev_in) \
+{ \
+ v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \
+ v16i8 filt, filt1, filt2, cnst4b, cnst3b; \
+ v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \
+ \
+ p1_m = (v16i8) __msa_xori_b(p1_in_out, 0x80); \
+ p0_m = (v16i8) __msa_xori_b(p0_in_out, 0x80); \
+ q0_m = (v16i8) __msa_xori_b(q0_in_out, 0x80); \
+ q1_m = (v16i8) __msa_xori_b(q1_in_out, 0x80); \
+ \
+ filt = __msa_subs_s_b(p1_m, q1_m); \
+ \
+ filt = filt & (v16i8) hev_in; \
+ \
+ q0_sub_p0 = q0_m - p0_m; \
+ filt_sign = __msa_clti_s_b(filt, 0); \
+ \
+ cnst3h = __msa_ldi_h(3); \
+ q0_sub_p0_r = (v8i16) __msa_ilvr_b(q0_sub_p0, q0_sub_p0); \
+ q0_sub_p0_r = __msa_dotp_s_h((v16i8) q0_sub_p0_r, (v16i8) cnst3h); \
+ filt_r = (v8i16) __msa_ilvr_b(filt_sign, filt); \
+ filt_r += q0_sub_p0_r; \
+ filt_r = __msa_sat_s_h(filt_r, 7); \
+ \
+ q0_sub_p0_l = (v8i16) __msa_ilvl_b(q0_sub_p0, q0_sub_p0); \
+ q0_sub_p0_l = __msa_dotp_s_h((v16i8) q0_sub_p0_l, (v16i8) cnst3h); \
+ filt_l = (v8i16) __msa_ilvl_b(filt_sign, filt); \
+ filt_l += q0_sub_p0_l; \
+ filt_l = __msa_sat_s_h(filt_l, 7); \
+ \
+ filt = __msa_pckev_b((v16i8) filt_l, (v16i8) filt_r); \
+ filt = filt & (v16i8) mask_in; \
+ \
+ cnst4b = __msa_ldi_b(4); \
+ filt1 = __msa_adds_s_b(filt, cnst4b); \
+ filt1 >>= 3; \
+ \
+ cnst3b = __msa_ldi_b(3); \
+ filt2 = __msa_adds_s_b(filt, cnst3b); \
+ filt2 >>= 3; \
+ \
+ q0_m = __msa_subs_s_b(q0_m, filt1); \
+ q0_in_out = __msa_xori_b((v16u8) q0_m, 0x80); \
+ p0_m = __msa_adds_s_b(p0_m, filt2); \
+ p0_in_out = __msa_xori_b((v16u8) p0_m, 0x80); \
+ \
+ filt = __msa_srari_b(filt1, 1); \
+ hev_in = __msa_xori_b((v16u8) hev_in, 0xff); \
+ filt = filt & (v16i8) hev_in; \
+ \
+ q1_m = __msa_subs_s_b(q1_m, filt); \
+ q1_in_out = __msa_xori_b((v16u8) q1_m, 0x80); \
+ p1_m = __msa_adds_s_b(p1_m, filt); \
+ p1_in_out = __msa_xori_b((v16u8) p1_m, 0x80); \
+}
+
+#define VP8_SIMPLE_FILT(p1_in, p0_in, q0_in, q1_in, mask) \
+{ \
+ v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, q0_sub_p0_sign; \
+ v16i8 filt, filt1, filt2, cnst4b, cnst3b, filt_sign; \
+ v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \
+ \
+ p1_m = (v16i8) __msa_xori_b(p1_in, 0x80); \
+ p0_m = (v16i8) __msa_xori_b(p0_in, 0x80); \
+ q0_m = (v16i8) __msa_xori_b(q0_in, 0x80); \
+ q1_m = (v16i8) __msa_xori_b(q1_in, 0x80); \
+ \
+ filt = __msa_subs_s_b(p1_m, q1_m); \
+ \
+ q0_sub_p0 = q0_m - p0_m; \
+ filt_sign = __msa_clti_s_b(filt, 0); \
+ \
+ cnst3h = __msa_ldi_h(3); \
+ q0_sub_p0_sign = __msa_clti_s_b(q0_sub_p0, 0); \
+ q0_sub_p0_r = (v8i16) __msa_ilvr_b(q0_sub_p0_sign, q0_sub_p0); \
+ q0_sub_p0_r *= cnst3h; \
+ filt_r = (v8i16) __msa_ilvr_b(filt_sign, filt); \
+ filt_r += q0_sub_p0_r; \
+ filt_r = __msa_sat_s_h(filt_r, 7); \
+ \
+ q0_sub_p0_l = (v8i16) __msa_ilvl_b(q0_sub_p0_sign, q0_sub_p0); \
+ q0_sub_p0_l *= cnst3h; \
+ filt_l = (v8i16) __msa_ilvl_b(filt_sign, filt); \
+ filt_l += q0_sub_p0_l; \
+ filt_l = __msa_sat_s_h(filt_l, 7); \
+ \
+ filt = __msa_pckev_b((v16i8) filt_l, (v16i8) filt_r); \
+ filt = filt & (v16i8) (mask); \
+ \
+ cnst4b = __msa_ldi_b(4); \
+ filt1 = __msa_adds_s_b(filt, cnst4b); \
+ filt1 >>= 3; \
+ \
+ cnst3b = __msa_ldi_b(3); \
+ filt2 = __msa_adds_s_b(filt, cnst3b); \
+ filt2 >>= 3; \
+ \
+ q0_m = __msa_subs_s_b(q0_m, filt1); \
+ p0_m = __msa_adds_s_b(p0_m, filt2); \
+ q0_in = __msa_xori_b((v16u8) q0_m, 0x80); \
+ p0_in = __msa_xori_b((v16u8) p0_m, 0x80); \
+}
+
+#define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \
+{ \
+ v16i8 p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \
+ v16i8 filt, q0_sub_p0, cnst4b, cnst3b; \
+ v16i8 u, filt1, filt2, filt_sign, q0_sub_p0_sign; \
+ v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_r, u_r, u_l, filt_l; \
+ v8i16 cnst3h, cnst27h, cnst18h, cnst63h; \
+ \
+ cnst3h = __msa_ldi_h(3); \
+ \
+ p2_m = (v16i8) __msa_xori_b(p2, 0x80); \
+ p1_m = (v16i8) __msa_xori_b(p1, 0x80); \
+ p0_m = (v16i8) __msa_xori_b(p0, 0x80); \
+ q0_m = (v16i8) __msa_xori_b(q0, 0x80); \
+ q1_m = (v16i8) __msa_xori_b(q1, 0x80); \
+ q2_m = (v16i8) __msa_xori_b(q2, 0x80); \
+ \
+ filt = __msa_subs_s_b(p1_m, q1_m); \
+ q0_sub_p0 = q0_m - p0_m; \
+ q0_sub_p0_sign = __msa_clti_s_b(q0_sub_p0, 0); \
+ filt_sign = __msa_clti_s_b(filt, 0); \
+ \
+ /* right part */ \
+ q0_sub_p0_r = (v8i16) __msa_ilvr_b(q0_sub_p0_sign, q0_sub_p0); \
+ q0_sub_p0_r *= cnst3h; \
+ filt_r = (v8i16) __msa_ilvr_b(filt_sign, filt); \
+ filt_r = filt_r + q0_sub_p0_r; \
+ filt_r = __msa_sat_s_h(filt_r, 7); \
+ \
+ /* left part */ \
+ q0_sub_p0_l = (v8i16) __msa_ilvl_b(q0_sub_p0_sign, q0_sub_p0); \
+ q0_sub_p0_l *= cnst3h; \
+ filt_l = (v8i16) __msa_ilvl_b(filt_sign, filt); \
+ filt_l = filt_l + q0_sub_p0_l; \
+ filt_l = __msa_sat_s_h(filt_l, 7); \
+ \
+ /* combine left and right part */ \
+ filt = __msa_pckev_b((v16i8) filt_l, (v16i8) filt_r); \
+ filt = filt & (v16i8) mask; \
+ filt2 = filt & (v16i8) hev; \
+ \
+ /* filt_val &= ~hev */ \
+ hev = __msa_xori_b(hev, 0xff); \
+ filt = filt & (v16i8) hev; \
+ cnst4b = __msa_ldi_b(4); \
+ filt1 = __msa_adds_s_b(filt2, cnst4b); \
+ filt1 >>= 3; \
+ cnst3b = __msa_ldi_b(3); \
+ filt2 = __msa_adds_s_b(filt2, cnst3b); \
+ filt2 >>= 3; \
+ q0_m = __msa_subs_s_b(q0_m, filt1); \
+ p0_m = __msa_adds_s_b(p0_m, filt2); \
+ \
+ filt_sign = __msa_clti_s_b(filt, 0); \
+ ILVRL_B2_SH(filt_sign, filt, filt_r, filt_l); \
+ \
+ cnst27h = __msa_ldi_h(27); \
+ cnst63h = __msa_ldi_h(63); \
+ \
+ /* right part */ \
+ u_r = filt_r * cnst27h; \
+ u_r += cnst63h; \
+ u_r >>= 7; \
+ u_r = __msa_sat_s_h(u_r, 7); \
+ /* left part */ \
+ u_l = filt_l * cnst27h; \
+ u_l += cnst63h; \
+ u_l >>= 7; \
+ u_l = __msa_sat_s_h(u_l, 7); \
+ /* combine left and right part */ \
+ u = __msa_pckev_b((v16i8) u_l, (v16i8) u_r); \
+ q0_m = __msa_subs_s_b(q0_m, u); \
+ q0 = __msa_xori_b((v16u8) q0_m, 0x80); \
+ p0_m = __msa_adds_s_b(p0_m, u); \
+ p0 = __msa_xori_b((v16u8) p0_m, 0x80); \
+ cnst18h = __msa_ldi_h(18); \
+ u_r = filt_r * cnst18h; \
+ u_r += cnst63h; \
+ u_r >>= 7; \
+ u_r = __msa_sat_s_h(u_r, 7); \
+ \
+ /* left part */ \
+ u_l = filt_l * cnst18h; \
+ u_l += cnst63h; \
+ u_l >>= 7; \
+ u_l = __msa_sat_s_h(u_l, 7); \
+ /* combine left and right part */ \
+ u = __msa_pckev_b((v16i8) u_l, (v16i8) u_r); \
+ q1_m = __msa_subs_s_b(q1_m, u); \
+ q1 = __msa_xori_b((v16u8) q1_m, 0x80); \
+ p1_m = __msa_adds_s_b(p1_m, u); \
+ p1 = __msa_xori_b((v16u8) p1_m, 0x80); \
+ u_r = filt_r << 3; \
+ u_r += filt_r + cnst63h; \
+ u_r >>= 7; \
+ u_r = __msa_sat_s_h(u_r, 7); \
+ \
+ /* left part */ \
+ u_l = filt_l << 3; \
+ u_l += filt_l + cnst63h; \
+ u_l >>= 7; \
+ u_l = __msa_sat_s_h(u_l, 7); \
+ /* combine left and right part */ \
+ u = __msa_pckev_b((v16i8) u_l, (v16i8) u_r); \
+ q2_m = __msa_subs_s_b(q2_m, u); \
+ q2 = __msa_xori_b((v16u8) q2_m, 0x80); \
+ p2_m = __msa_adds_s_b(p2_m, u); \
+ p2 = __msa_xori_b((v16u8) p2_m, 0x80); \
+}
+
+#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, \
+ q0_in, q1_in, q2_in, q3_in, \
+ limit_in, b_limit_in, thresh_in, \
+ hev_out, mask_out, flat_out) \
+{ \
+ v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \
+ v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \
+ \
+ /* absolute subtraction of pixel values */ \
+ p3_asub_p2_m = __msa_asub_u_b((p3_in), (p2_in)); \
+ p2_asub_p1_m = __msa_asub_u_b((p2_in), (p1_in)); \
+ p1_asub_p0_m = __msa_asub_u_b((p1_in), (p0_in)); \
+ q1_asub_q0_m = __msa_asub_u_b((q1_in), (q0_in)); \
+ q2_asub_q1_m = __msa_asub_u_b((q2_in), (q1_in)); \
+ q3_asub_q2_m = __msa_asub_u_b((q3_in), (q2_in)); \
+ p0_asub_q0_m = __msa_asub_u_b((p0_in), (q0_in)); \
+ p1_asub_q1_m = __msa_asub_u_b((p1_in), (q1_in)); \
+ /* calculation of hev */ \
+ flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m); \
+ hev_out = (thresh_in) < (v16u8) flat_out; \
+ /* calculation of mask */ \
+ p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m); \
+ p1_asub_q1_m >>= 1; \
+ p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m); \
+ mask_out = (b_limit_in) < p0_asub_q0_m; \
+ mask_out = __msa_max_u_b(flat_out, mask_out); \
+ p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m); \
+ mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out); \
+ q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m); \
+ mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \
+ mask_out = (limit_in) < (v16u8) mask_out; \
+ mask_out = __msa_xori_b(mask_out, 0xff); \
+}
+
+#define VP8_ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) \
+{ \
+ uint16_t tmp0_h; \
+ uint32_t tmp0_w; \
+ \
+ tmp0_w = __msa_copy_u_w((v4i32) in0, in0_idx); \
+ tmp0_h = __msa_copy_u_h((v8i16) in1, in1_idx); \
+ SW(tmp0_w, pdst); \
+ SH(tmp0_h, pdst + stride); \
+}
+
+void ff_vp8_v_loop_filter16_msa(uint8_t *src, ptrdiff_t pitch, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ uint8_t *temp_src;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+
+ b_limit = (v16u8) __msa_fill_b(b_limit_in);
+ limit = (v16u8) __msa_fill_b(limit_in);
+ thresh = (v16u8) __msa_fill_b(thresh_in);
+ /* load vector elements */
+ temp_src = src - (pitch << 2);
+ LD_UB8(temp_src, pitch, p3, p2, p1, p0, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+ /* store vector elements */
+ temp_src = src - 3 * pitch;
+ ST_UB4(p2, p1, p0, q0, temp_src, pitch);
+ temp_src += (4 * pitch);
+ ST_UB2(q1, q2, temp_src, pitch);
+}
+
+void ff_vp8_v_loop_filter8uv_msa(uint8_t *src_u, uint8_t *src_v,
+ ptrdiff_t pitch, int b_limit_in, int limit_in,
+ int thresh_in)
+{
+ uint8_t *temp_src;
+ uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
+ v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
+
+ b_limit = (v16u8) __msa_fill_b(b_limit_in);
+ limit = (v16u8) __msa_fill_b(limit_in);
+ thresh = (v16u8) __msa_fill_b(thresh_in);
+
+ temp_src = src_u - (pitch << 2);
+ LD_UB8(temp_src, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u);
+ temp_src = src_v - (pitch << 2);
+ LD_UB8(temp_src, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v);
+
+ /* rht 8 element of p3 are u pixel and left 8 element of p3 are v pixel */
+ ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0);
+ ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ p2_d = __msa_copy_u_d((v2i64) p2, 0);
+ p1_d = __msa_copy_u_d((v2i64) p1, 0);
+ p0_d = __msa_copy_u_d((v2i64) p0, 0);
+ q0_d = __msa_copy_u_d((v2i64) q0, 0);
+ q1_d = __msa_copy_u_d((v2i64) q1, 0);
+ q2_d = __msa_copy_u_d((v2i64) q2, 0);
+ src_u -= (pitch * 3);
+ SD4(p2_d, p1_d, p0_d, q0_d, src_u, pitch);
+ src_u += 4 * pitch;
+ SD(q1_d, src_u);
+ src_u += pitch;
+ SD(q2_d, src_u);
+
+ p2_d = __msa_copy_u_d((v2i64) p2, 1);
+ p1_d = __msa_copy_u_d((v2i64) p1, 1);
+ p0_d = __msa_copy_u_d((v2i64) p0, 1);
+ q0_d = __msa_copy_u_d((v2i64) q0, 1);
+ q1_d = __msa_copy_u_d((v2i64) q1, 1);
+ q2_d = __msa_copy_u_d((v2i64) q2, 1);
+ src_v -= (pitch * 3);
+ SD4(p2_d, p1_d, p0_d, q0_d, src_v, pitch);
+ src_v += 4 * pitch;
+ SD(q1_d, src_v);
+ src_v += pitch;
+ SD(q2_d, src_v);
+}
+
+void ff_vp8_h_loop_filter16_msa(uint8_t *src, ptrdiff_t pitch, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ uint8_t *temp_src;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ v16u8 row9, row10, row11, row12, row13, row14, row15;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ b_limit = (v16u8) __msa_fill_b(b_limit_in);
+ limit = (v16u8) __msa_fill_b(limit_in);
+ thresh = (v16u8) __msa_fill_b(thresh_in);
+ temp_src = src - 4;
+ LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ temp_src += (8 * pitch);
+ LD_UB8(temp_src, pitch,
+ row8, row9, row10, row11, row12, row13, row14, row15);
+ TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
+ row8, row9, row10, row11, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+ ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4);
+ ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7);
+ ILVRL_B2_SH(q2, q1, tmp2, tmp5);
+
+ temp_src = src - 3;
+ VP8_ST6x1_UB(tmp3, 0, tmp2, 0, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp3, 1, tmp2, 1, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp3, 2, tmp2, 2, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp3, 3, tmp2, 3, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp4, 0, tmp2, 4, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp4, 1, tmp2, 5, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp4, 2, tmp2, 6, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp4, 3, tmp2, 7, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp7, 3, tmp5, 7, temp_src, 4);
+}
+
+void ff_vp8_h_loop_filter8uv_msa(uint8_t *src_u, uint8_t *src_v,
+ ptrdiff_t pitch, int b_limit_in, int limit_in,
+ int thresh_in)
+{
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ v16u8 row9, row10, row11, row12, row13, row14, row15;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ b_limit = (v16u8) __msa_fill_b(b_limit_in);
+ limit = (v16u8) __msa_fill_b(limit_in);
+ thresh = (v16u8) __msa_fill_b(thresh_in);
+
+ LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ LD_UB8(src_v - 4, pitch,
+ row8, row9, row10, row11, row12, row13, row14, row15);
+ TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
+ row8, row9, row10, row11, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4);
+ ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7);
+ ILVRL_B2_SH(q2, q1, tmp2, tmp5);
+
+ src_u -= 3;
+ VP8_ST6x1_UB(tmp3, 0, tmp2, 0, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp3, 1, tmp2, 1, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp3, 2, tmp2, 2, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp3, 3, tmp2, 3, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp4, 0, tmp2, 4, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp4, 1, tmp2, 5, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp4, 2, tmp2, 6, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp4, 3, tmp2, 7, src_u, 4);
+
+ src_v -= 3;
+ VP8_ST6x1_UB(tmp6, 0, tmp5, 0, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp6, 1, tmp5, 1, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp6, 2, tmp5, 2, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp6, 3, tmp5, 3, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp7, 0, tmp5, 4, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp7, 1, tmp5, 5, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp7, 2, tmp5, 6, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp7, 3, tmp5, 7, src_v, 4);
+}
+
+void ff_vp8_v_loop_filter_simple_msa(uint8_t *src, ptrdiff_t pitch,
+ int b_limit_ptr)
+{
+ v16u8 p1, p0, q1, q0;
+ v16u8 mask, b_limit;
+
+ b_limit = (v16u8) __msa_fill_b(b_limit_ptr);
+ /* load vector elements */
+ LD_UB4(src - (pitch << 1), pitch, p1, p0, q0, q1);
+ VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask);
+ VP8_SIMPLE_FILT(p1, p0, q0, q1, mask);
+ ST_UB2(p0, q0, (src - pitch), pitch);
+}
+
+void ff_vp8_h_loop_filter_simple_msa(uint8_t *src, ptrdiff_t pitch,
+ int b_limit_ptr)
+{
+ uint8_t *temp_src;
+ v16u8 p1, p0, q1, q0;
+ v16u8 mask, b_limit;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ v16u8 row9, row10, row11, row12, row13, row14, row15;
+ v8i16 tmp0, tmp1;
+
+ b_limit = (v16u8) __msa_fill_b(b_limit_ptr);
+ temp_src = src - 2;
+ LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ temp_src += (8 * pitch);
+ LD_UB8(temp_src, pitch,
+ row8, row9, row10, row11, row12, row13, row14, row15);
+ TRANSPOSE16x4_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
+ row8, row9, row10, row11, row12, row13, row14, row15,
+ p1, p0, q0, q1);
+ VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask);
+ VP8_SIMPLE_FILT(p1, p0, q0, q1, mask);
+ ILVRL_B2_SH(q0, p0, tmp1, tmp0);
+
+ src -= 1;
+ ST2x4_UB(tmp1, 0, src, pitch);
+ src += 4 * pitch;
+ ST2x4_UB(tmp1, 4, src, pitch);
+ src += 4 * pitch;
+ ST2x4_UB(tmp0, 0, src, pitch);
+ src += 4 * pitch;
+ ST2x4_UB(tmp0, 4, src, pitch);
+ src += 4 * pitch;
+}
+
+void ff_vp8_v_loop_filter8uv_inner_msa(uint8_t *src_u, uint8_t *src_v,
+ ptrdiff_t pitch, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ uint64_t p1_d, p0_d, q0_d, q1_d;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
+ v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
+
+ thresh = (v16u8) __msa_fill_b(thresh_in);
+ limit = (v16u8) __msa_fill_b(limit_in);
+ b_limit = (v16u8) __msa_fill_b(b_limit_in);
+
+ src_u = src_u - (pitch << 2);
+ LD_UB8(src_u, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u);
+ src_u += (5 * pitch);
+ src_v = src_v - (pitch << 2);
+ LD_UB8(src_v, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v);
+ src_v += (5 * pitch);
+
+ /* right 8 element of p3 are u pixel and
+ left 8 element of p3 are v pixel */
+ ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0);
+ ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ p1_d = __msa_copy_u_d((v2i64) p1, 0);
+ p0_d = __msa_copy_u_d((v2i64) p0, 0);
+ q0_d = __msa_copy_u_d((v2i64) q0, 0);
+ q1_d = __msa_copy_u_d((v2i64) q1, 0);
+ SD4(q1_d, q0_d, p0_d, p1_d, src_u, (- pitch));
+
+ p1_d = __msa_copy_u_d((v2i64) p1, 1);
+ p0_d = __msa_copy_u_d((v2i64) p0, 1);
+ q0_d = __msa_copy_u_d((v2i64) q0, 1);
+ q1_d = __msa_copy_u_d((v2i64) q1, 1);
+ SD4(q1_d, q0_d, p0_d, p1_d, src_v, (- pitch));
+}
+
+void ff_vp8_h_loop_filter8uv_inner_msa(uint8_t *src_u, uint8_t *src_v,
+ ptrdiff_t pitch, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ uint8_t *temp_src_u, *temp_src_v;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ v16u8 row9, row10, row11, row12, row13, row14, row15;
+ v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+
+ thresh = (v16u8) __msa_fill_b(thresh_in);
+ limit = (v16u8) __msa_fill_b(limit_in);
+ b_limit = (v16u8) __msa_fill_b(b_limit_in);
+
+ LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ LD_UB8(src_v - 4, pitch,
+ row8, row9, row10, row11, row12, row13, row14, row15);
+ TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
+ row8, row9, row10, row11, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+ ILVR_B2_SW(p0, p1, q1, q0, tmp0, tmp1);
+ ILVRL_H2_SW(tmp1, tmp0, tmp2, tmp3);
+ tmp0 = (v4i32) __msa_ilvl_b((v16i8) p0, (v16i8) p1);
+ tmp1 = (v4i32) __msa_ilvl_b((v16i8) q1, (v16i8) q0);
+ ILVRL_H2_SW(tmp1, tmp0, tmp4, tmp5);
+
+ temp_src_u = src_u - 2;
+ ST4x4_UB(tmp2, tmp2, 0, 1, 2, 3, temp_src_u, pitch);
+ temp_src_u += 4 * pitch;
+ ST4x4_UB(tmp3, tmp3, 0, 1, 2, 3, temp_src_u, pitch);
+
+ temp_src_v = src_v - 2;
+ ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, temp_src_v, pitch);
+ temp_src_v += 4 * pitch;
+ ST4x4_UB(tmp5, tmp5, 0, 1, 2, 3, temp_src_v, pitch);
+}
+
+void ff_vp8_v_loop_filter16_inner_msa(uint8_t *src, ptrdiff_t pitch,
+ int32_t e, int32_t i, int32_t h)
+{
+ v16u8 mask, hev, flat;
+ v16u8 thresh, b_limit, limit;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+
+ /* load vector elements */
+ LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
+ thresh = (v16u8) __msa_fill_b(h);
+ b_limit = (v16u8) __msa_fill_b(e);
+ limit = (v16u8) __msa_fill_b(i);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch);
+}
+
+void ff_vp8_h_loop_filter16_inner_msa(uint8_t *src, ptrdiff_t pitch,
+ int32_t e, int32_t i, int32_t h)
+{
+ v16u8 mask, hev, flat;
+ v16u8 thresh, b_limit, limit;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
+ v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+
+ LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ LD_UB8(src - 4 + (8 * pitch), pitch,
+ row8, row9, row10, row11, row12, row13, row14, row15);
+ TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
+ row8, row9, row10, row11, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ thresh = (v16u8) __msa_fill_b(h);
+ b_limit = (v16u8) __msa_fill_b(e);
+ limit = (v16u8) __msa_fill_b(i);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+ ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3);
+ ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5);
+
+ src -= 2;
+ ST4x8_UB(tmp2, tmp3, src, pitch);
+ src += (8 * pitch);
+ ST4x8_UB(tmp4, tmp5, src, pitch);
+}
+
+static const int cospi8sqrt2minus1 = 20091;
+static const int sinpi8sqrt2 = 35468;
+
+#define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \
+{ \
+ v4i32 a1_m, b1_m, c1_m, d1_m; \
+ v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
+ v4i32 const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m; \
+ \
+ const_cospi8sqrt2minus1_m = __msa_fill_w(cospi8sqrt2minus1); \
+ sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \
+ a1_m = in0 + in2; \
+ b1_m = in0 - in2; \
+ c_tmp1_m = ((in1) * sinpi8_sqrt2_m) >> 16; \
+ c_tmp2_m = in3 + (((in3) * const_cospi8sqrt2minus1_m) >> 16); \
+ c1_m = c_tmp1_m - c_tmp2_m; \
+ d_tmp1_m = (in1) + (((in1) * const_cospi8sqrt2minus1_m) >> 16); \
+ d_tmp2_m = ((in3) * sinpi8_sqrt2_m) >> 16; \
+ d1_m = d_tmp1_m + d_tmp2_m; \
+ BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
+}
+
+void ff_vp8_idct_add_msa(uint8_t *dst, int16_t input[16], ptrdiff_t stride)
+{
+ v8i16 input0, input1;
+ v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
+ v4i32 res0, res1, res2, res3;
+ v16i8 zero = { 0 };
+ v16i8 pred0, pred1, pred2, pred3, dest0, dest1;
+ v16i8 mask = { 0, 4, 8, 12, 16, 20, 24, 28, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ /* load short vector elements of 4x4 block */
+ LD_SH2(input, 8, input0, input1);
+ UNPCK_SH_SW(input0, in0, in1);
+ UNPCK_SH_SW(input1, in2, in3);
+ VP8_IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3);
+ /* transpose the block */
+ TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
+ VP8_IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3);
+ SRARI_W4_SW(vt0, vt1, vt2, vt3, 3);
+ /* transpose the block */
+ TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
+ LD_SB4(dst, stride, pred0, pred1, pred2, pred3);
+ ILVR_B4_SW(zero, pred0, zero, pred1, zero, pred2, zero, pred3,
+ res0, res1, res2, res3);
+ ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3,
+ res0, res1, res2, res3);
+ ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3);
+ res0 = CLIP_SW_0_255(res0);
+ res1 = CLIP_SW_0_255(res1);
+ res2 = CLIP_SW_0_255(res2);
+ res3 = CLIP_SW_0_255(res3);
+ VSHF_B2_SB(res0, res1, res2, res3, mask, mask, dest0, dest1);
+ ST4x4_UB(dest0, dest1, 0, 1, 0, 1, dst, stride);
+
+ memset(input, 0, 4 * 4 * sizeof(*input));
+}
+
+void ff_vp8_idct_dc_add_msa(uint8_t *dst, int16_t in_dc[16], ptrdiff_t stride)
+{
+ v8i16 vec;
+ v8i16 res0, res1, res2, res3;
+ v16i8 zero = { 0 };
+ v16i8 pred0, pred1, pred2, pred3, dest0, dest1;
+ v16i8 mask = { 0, 2, 4, 6, 16, 18, 20, 22, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ vec = __msa_fill_h(in_dc[0]);
+ vec = __msa_srari_h(vec, 3);
+ LD_SB4(dst, stride, pred0, pred1, pred2, pred3);
+ ILVR_B4_SH(zero, pred0, zero, pred1, zero, pred2, zero, pred3,
+ res0, res1, res2, res3);
+ ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
+ CLIP_SH4_0_255(res0, res1, res2, res3);
+ VSHF_B2_SB(res0, res1, res2, res3, mask, mask, dest0, dest1);
+ ST4x4_UB(dest0, dest1, 0, 1, 0, 1, dst, stride);
+
+ in_dc[0] = 0;
+}
+
+void ff_vp8_luma_dc_wht_msa(int16_t block[4][4][16], int16_t input[16])
+{
+ int16_t *mb_dq_coeff = &block[0][0][0];
+ v8i16 input0, input1;
+ v4i32 in0, in1, in2, in3, a1, b1, c1, d1;
+ v4i32 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
+
+ /* load short vector elements of 4x4 block */
+ LD_SH2(input, 8, input0, input1);
+ UNPCK_SH_SW(input0, in0, in1);
+ UNPCK_SH_SW(input1, in2, in3);
+ BUTTERFLY_4(in0, in1, in2, in3, a1, b1, c1, d1);
+ BUTTERFLY_4(a1, d1, c1, b1, hz0, hz1, hz3, hz2);
+ /* transpose the block */
+ TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
+ BUTTERFLY_4(hz0, hz1, hz2, hz3, a1, b1, c1, d1);
+ BUTTERFLY_4(a1, d1, c1, b1, vt0, vt1, vt3, vt2);
+ ADD4(vt0, 3, vt1, 3, vt2, 3, vt3, 3, vt0, vt1, vt2, vt3);
+ SRA_4V(vt0, vt1, vt2, vt3, 3);
+ mb_dq_coeff[0] = __msa_copy_s_h((v8i16) vt0, 0);
+ mb_dq_coeff[16] = __msa_copy_s_h((v8i16) vt1, 0);
+ mb_dq_coeff[32] = __msa_copy_s_h((v8i16) vt2, 0);
+ mb_dq_coeff[48] = __msa_copy_s_h((v8i16) vt3, 0);
+ mb_dq_coeff[64] = __msa_copy_s_h((v8i16) vt0, 2);
+ mb_dq_coeff[80] = __msa_copy_s_h((v8i16) vt1, 2);
+ mb_dq_coeff[96] = __msa_copy_s_h((v8i16) vt2, 2);
+ mb_dq_coeff[112] = __msa_copy_s_h((v8i16) vt3, 2);
+ mb_dq_coeff[128] = __msa_copy_s_h((v8i16) vt0, 4);
+ mb_dq_coeff[144] = __msa_copy_s_h((v8i16) vt1, 4);
+ mb_dq_coeff[160] = __msa_copy_s_h((v8i16) vt2, 4);
+ mb_dq_coeff[176] = __msa_copy_s_h((v8i16) vt3, 4);
+ mb_dq_coeff[192] = __msa_copy_s_h((v8i16) vt0, 6);
+ mb_dq_coeff[208] = __msa_copy_s_h((v8i16) vt1, 6);
+ mb_dq_coeff[224] = __msa_copy_s_h((v8i16) vt2, 6);
+ mb_dq_coeff[240] = __msa_copy_s_h((v8i16) vt3, 6);
+
+ memset(input, 0, 4 * 4 * sizeof(int16_t));
+}
+
+void ff_vp8_idct_dc_add4y_msa(uint8_t *dst, int16_t block[4][16],
+ ptrdiff_t stride)
+{
+ ff_vp8_idct_dc_add_msa(dst, &block[0][0], stride);
+ ff_vp8_idct_dc_add_msa(dst + 4, &block[1][0], stride);
+ ff_vp8_idct_dc_add_msa(dst + 8, &block[2][0], stride);
+ ff_vp8_idct_dc_add_msa(dst + 12, &block[3][0], stride);
+}
+
+void ff_vp8_idct_dc_add4uv_msa(uint8_t *dst, int16_t block[4][16],
+ ptrdiff_t stride)
+{
+ ff_vp8_idct_dc_add_msa(dst, &block[0][0], stride);
+ ff_vp8_idct_dc_add_msa(dst + 4, &block[1][0], stride);
+ ff_vp8_idct_dc_add_msa(dst + stride * 4, &block[2][0], stride);
+ ff_vp8_idct_dc_add_msa(dst + stride * 4 + 4, &block[3][0], stride);
+}
diff --git a/libavcodec/vp8dsp.c b/libavcodec/vp8dsp.c
index e1a91bb..07bea69 100644
--- a/libavcodec/vp8dsp.c
+++ b/libavcodec/vp8dsp.c
@@ -735,5 +735,7 @@ av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
ff_vp8dsp_init_arm(dsp);
if (ARCH_X86)
ff_vp8dsp_init_x86(dsp);
+ if (ARCH_MIPS)
+ ff_vp8dsp_init_mips(dsp);
}
#endif /* CONFIG_VP8_DECODER */
diff --git a/libavcodec/vp8dsp.h b/libavcodec/vp8dsp.h
index 5fdd3af..0401c92 100644
--- a/libavcodec/vp8dsp.h
+++ b/libavcodec/vp8dsp.h
@@ -98,6 +98,7 @@ void ff_vp78dsp_init_x86(VP8DSPContext *c);
void ff_vp8dsp_init(VP8DSPContext *c);
void ff_vp8dsp_init_arm(VP8DSPContext *c);
void ff_vp8dsp_init_x86(VP8DSPContext *c);
+void ff_vp8dsp_init_mips(VP8DSPContext *c);
#define IS_VP7 1
#define IS_VP8 0
--
1.7.9.5
More information about the ffmpeg-devel
mailing list