[FFmpeg-devel] [PATCH] avfilter: add mergeplanes
Paul B Mahol
onemda at gmail.com
Tue Oct 1 19:19:18 CEST 2013
Signed-off-by: Paul B Mahol <onemda at gmail.com>
---
doc/filters.texi | 33 ++++
libavfilter/Makefile | 1 +
libavfilter/allfilters.c | 1 +
libavfilter/vf_mergeplanes.c | 356 +++++++++++++++++++++++++++++++++++++++++++
4 files changed, 391 insertions(+)
create mode 100644 libavfilter/vf_mergeplanes.c
diff --git a/doc/filters.texi b/doc/filters.texi
index 93cdad8..ca0bd2b 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -5248,6 +5248,39 @@ lutyuv=y='bitand(val, 128+64+32)'
@end example
@end itemize
+ at section mergeplanes
+
+Merge color channel components from several video streams.
+
+This filter accepts the following options:
+ at table @option
+ at item mode
+Set flags for operation. Default is 'r+g+b' (0xF).
+
+Available values for @code{mode} are:
+ at table @samp
+ at item r
+ at item g
+ at item b
+ at item a
+ at item y
+ at item u
+ at item v
+ at item rgb
+ at item yuv
+ at end table
+ at end table
+
+ at subsection Examples
+
+ at itemize
+ at item
+Merge four gray video streams into single video stream:
+ at example
+mergeplanes=y+u+v+a
+ at end example
+ at end itemize
+
@section mcdeint
Apply motion-compensation deinterlacing.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index b2d3587..0200e29 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -157,6 +157,7 @@ OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o
+OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o
OBJS-$(CONFIG_MP_FILTER) += vf_mp.o
OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index ed11d67..8f33281 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -153,6 +153,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(LUTRGB, lutrgb, vf);
REGISTER_FILTER(LUTYUV, lutyuv, vf);
REGISTER_FILTER(MCDEINT, mcdeint, vf);
+ REGISTER_FILTER(MERGEPLANES, mergeplanes, vf);
REGISTER_FILTER(MP, mp, vf);
REGISTER_FILTER(MPDECIMATE, mpdecimate, vf);
REGISTER_FILTER(NEGATE, negate, vf);
diff --git a/libavfilter/vf_mergeplanes.c b/libavfilter/vf_mergeplanes.c
new file mode 100644
index 0000000..b68a3ea
--- /dev/null
+++ b/libavfilter/vf_mergeplanes.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "internal.h"
+#include "framesync.h"
+
+typedef struct MergePlanesContext {
+ const AVClass *class;
+ unsigned mode;
+ int nb_inputs;
+
+ int nb_planes[4];
+ int planewidth[4][4];
+ int planeheight[4][4];
+
+ const enum AVPixelFormat *in[4];
+ const enum AVPixelFormat *out;
+
+ FFFrameSync fs;
+ FFFrameSyncIn fsi[3]; /* must be immediately after fs */
+} MergePlanesContext;
+
+#define F_R 0x001
+#define F_G 0x002
+#define F_B 0x004
+#define F_A 0x008
+#define F_Y 0x010
+#define F_U 0x020
+#define F_V 0x040
+#define F_RGB 0x080
+#define F_YUV 0x100
+
+#define OFFSET(x) offsetof(MergePlanesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption mergeplanes_options[] = {
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_FLAGS, {.i64=0xf}, 0x1, 0x1ff, FLAGS, "flags"},
+ { "r", NULL, 0, AV_OPT_TYPE_CONST, {.i64=F_R}, 0, 0, FLAGS, "flags"},
+ { "g", NULL, 0, AV_OPT_TYPE_CONST, {.i64=F_G}, 0, 0, FLAGS, "flags"},
+ { "b", NULL, 0, AV_OPT_TYPE_CONST, {.i64=F_B}, 0, 0, FLAGS, "flags"},
+ { "a", NULL, 0, AV_OPT_TYPE_CONST, {.i64=F_A}, 0, 0, FLAGS, "flags"},
+ { "y", NULL, 0, AV_OPT_TYPE_CONST, {.i64=F_Y}, 0, 0, FLAGS, "flags"},
+ { "u", NULL, 0, AV_OPT_TYPE_CONST, {.i64=F_U}, 0, 0, FLAGS, "flags"},
+ { "v", NULL, 0, AV_OPT_TYPE_CONST, {.i64=F_V}, 0, 0, FLAGS, "flags"},
+ { "rgb", NULL, 0, AV_OPT_TYPE_CONST, {.i64=F_RGB}, 0, 0, FLAGS, "flags"},
+ { "yuv", NULL, 0, AV_OPT_TYPE_CONST, {.i64=F_YUV}, 0, 0, FLAGS, "flags"},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mergeplanes);
+
+static const enum AVPixelFormat gray_pixfmts[] = {
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat yuv_pixfmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat yuva_pixfmts[] = {
+ AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat rgb_pixfmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat rgba_pixfmts[] = {
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ MergePlanesContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < s->nb_inputs; i++)
+ ff_formats_ref(ff_make_format_list(s->in[i]), &ctx->inputs[i]->out_formats);
+
+ ff_formats_ref(ff_make_format_list(s->out), &ctx->outputs[0]->in_formats);
+
+ return 0;
+}
+
+static void merge_planes(AVFilterContext *ctx, AVFrame *in[4], AVFrame *out)
+{
+ MergePlanesContext *s = ctx->priv;
+ int i, p, j = 0;
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ for (p = 0; p < s->nb_planes[i]; p++, j++) {
+ av_image_copy_plane(out->data[j], out->linesize[j],
+ in[i]->data[p], in[i]->linesize[p],
+ s->planewidth[i][p], s->planeheight[i][p]);
+ }
+ }
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ AVFilterLink *outlink = ctx->outputs[0];
+ MergePlanesContext *s = fs->opaque;
+ AVFrame *in[4] = { NULL };
+ AVFrame *out;
+ int i, ret = 0;
+
+ av_assert0(s->nb_inputs <= 4);
+ for (i = 0; i < s->nb_inputs; i++) {
+ if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
+ return ret;
+ }
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
+ merge_planes(ctx, in, out);
+ ret = ff_filter_frame(outlink, out);
+
+ return ret;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MergePlanesContext *s = ctx->priv;
+ FFFrameSyncIn *in = s->fs.in;
+ enum AVPixelFormat pix_fmt;
+ int depth[4][4];
+ int i, j, k;
+
+ ff_framesync_init(&s->fs, ctx, s->nb_inputs);
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ outlink->w = ctx->inputs[0]->w;
+ outlink->h = ctx->inputs[0]->h;
+ outlink->time_base = ctx->inputs[0]->time_base;
+ outlink->frame_rate = ctx->inputs[0]->frame_rate;
+ outlink->sample_aspect_ratio = ctx->inputs[0]->sample_aspect_ratio;
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ AVFilterLink *inlink = ctx->inputs[i];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ if (outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
+ outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "input #%d link %s SAR %d:%d "
+ "do not match output link %s SAR %d:%d\n",
+ i, ctx->input_pads[i].name,
+ inlink->sample_aspect_ratio.num,
+ inlink->sample_aspect_ratio.den,
+ ctx->output_pads[0].name,
+ outlink->sample_aspect_ratio.num,
+ outlink->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ s->planewidth[i][1] =
+ s->planewidth[i][2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[i][0] =
+ s->planewidth[i][3] = inlink->w;
+ s->planeheight[i][1] =
+ s->planeheight[i][2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[i][0] =
+ s->planeheight[i][3] = inlink->h;
+ s->nb_planes[i] = av_pix_fmt_count_planes(inlink->format);
+ for (j = 0; j < s->nb_planes[i]; j++)
+ depth[i][j] = desc->comp[j].depth_minus1;
+
+ in[i].time_base = inlink->time_base;
+ in[i].sync = 1;
+ in[i].before = EXT_STOP;
+ in[i].after = EXT_STOP;
+ }
+
+ i = 0;
+ while ((pix_fmt = s->out[i]) != AV_PIX_FMT_NONE) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int planewidth[4], planeheight[4];
+ int nb_planes, plane = 0;
+
+ planewidth[1] =
+ planewidth[2] = FF_CEIL_RSHIFT(outlink->w, desc->log2_chroma_w);
+ planewidth[0] =
+ planewidth[3] = outlink->w;
+ planeheight[1] =
+ planeheight[2] = FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h);
+ planeheight[0] =
+ planeheight[3] = outlink->h;
+ nb_planes = av_pix_fmt_count_planes(pix_fmt);
+
+ for (j = 0; j < s->nb_inputs; j++) {
+ for (k = 0; k < s->nb_planes[j]; k++, plane++) {
+ if (plane >= nb_planes)
+ goto next;
+ if (desc->comp[plane].depth_minus1 != depth[j][k])
+ goto next;
+ if (planewidth[plane] != s->planewidth[j][k])
+ goto next;
+ if (planeheight[plane] != s->planeheight[j][k])
+ goto next;
+ }
+ }
+
+ break;
+next:
+ i++;
+ }
+
+ if (pix_fmt == AV_PIX_FMT_NONE) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to find format that matches inputs dimensions and/or depth.\n");
+ return AVERROR(EINVAL);
+ }
+ outlink->format = pix_fmt;
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ MergePlanesContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, in);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ MergePlanesContext *s = ctx->priv;
+ int i, ret;
+
+ switch (s->mode) {
+ case F_YUV|F_A:
+ case F_RGB|F_A:
+ s->nb_inputs = 2;
+ s->in[0] = s->mode & F_RGB ? rgb_pixfmts: yuv_pixfmts;
+ s->in[1] = gray_pixfmts;
+ break;
+ case F_Y|F_U|F_V:
+ case F_R|F_G|F_B:
+ s->nb_inputs = 3;
+ s->in[0] = s->in[1] = s->in[2] = gray_pixfmts;
+ break;
+ case F_Y|F_U|F_V|F_A:
+ case F_R|F_G|F_B|F_A:
+ s->nb_inputs = 4;
+ s->in[0] = s->in[1] = s->in[2] = s->in[3] = gray_pixfmts;
+ break;
+ }
+
+ if (!s->nb_inputs)
+ return AVERROR(EINVAL);
+
+ switch (s->mode) {
+ case F_Y|F_U|F_V|F_A:
+ case F_YUV|F_A:
+ s->out = yuva_pixfmts;
+ break;
+ case F_R|F_G|F_B|F_A:
+ case F_RGB|F_A:
+ s->out = rgba_pixfmts;
+ break;
+ case F_Y|F_U|F_V:
+ s->out = yuv_pixfmts;
+ break;
+ case F_R|F_G|F_B:
+ s->out = rgb_pixfmts;
+ break;
+ }
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ AVFilterPad pad = { 0 };
+
+ pad.type = AVMEDIA_TYPE_VIDEO;
+ pad.name = av_asprintf("in%d", i);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ pad.filter_frame = filter_frame;
+
+ if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0){
+ av_freep(&pad.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MergePlanesContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MergePlanesContext *s = ctx->priv;
+ int i;
+
+ ff_framesync_uninit(&s->fs);
+
+ for (i = 0; i < s->nb_inputs; i++)
+ av_freep(&ctx->input_pads[i].name);
+}
+
+static const AVFilterPad mergeplanes_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter avfilter_vf_mergeplanes = {
+ .name = "mergeplanes",
+ .description = NULL_IF_CONFIG_SMALL("Merge planes."),
+ .priv_size = sizeof(MergePlanesContext),
+ .priv_class = &mergeplanes_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = mergeplanes_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
--
1.7.11.2
More information about the ffmpeg-devel
mailing list