[FFmpeg-devel] [PATCH 5/6] avfilter: add paletteuse filter
Clément Bœsch
u at pkh.me
Sun Jan 25 19:55:22 CET 2015
---
doc/filters.texi | 31 +++++
libavfilter/Makefile | 1 +
libavfilter/allfilters.c | 1 +
libavfilter/vf_paletteuse.c | 282 ++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 315 insertions(+)
create mode 100644 libavfilter/vf_paletteuse.c
diff --git a/doc/filters.texi b/doc/filters.texi
index 5e27ae5..77415b4 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -6884,6 +6884,7 @@ pad="2*iw:2*ih:ow-iw:oh-ih"
@end example
@end itemize
+ at anchor{palettegen}
@section palettegen
Generate one palette for a whole video stream.
@@ -6908,6 +6909,36 @@ ffmpeg -i input.mkv -vf palettegen palette.png
@end example
@end itemize
+ at section paletteuse
+
+Use a palette to downsample an input video stream.
+
+The filter takes two inputs: one video stream and a palette. The palette must
+be a 256 pixels image.
+
+It accepts the following option:
+
+ at table @option
+ at item dither
+Select dithering mode. Available algorithms are:
+ at table @samp
+ at item heckbert
+dithering as defined by Paul Heckbert in 1982.
+ at end table
+Default is @var{heckbert}.
+ at end table
+
+ at subsection Examples
+
+ at itemize
+ at item
+Use a palette (generated for example with @ref{palettegen}) to encode a GIF
+using @command{ffmpeg}:
+ at example
+ffmpeg -i input.mkv -i palette.png -lavfi paletteuse output.gif
+ at end example
+ at end itemize
+
@section perspective
Correct perspective of video not recorded perpendicular to the screen.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 6ea7e9c..9328453 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -160,6 +160,7 @@ OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o framesy
OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o
+OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o
OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o
OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o
OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index a3443a0..333c05f 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -175,6 +175,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(OWDENOISE, owdenoise, vf);
REGISTER_FILTER(PAD, pad, vf);
REGISTER_FILTER(PALETTEGEN, palettegen, vf);
+ REGISTER_FILTER(PALETTEUSE, paletteuse, vf);
REGISTER_FILTER(PERMS, perms, vf);
REGISTER_FILTER(PERSPECTIVE, perspective, vf);
REGISTER_FILTER(PHASE, phase, vf);
diff --git a/libavfilter/vf_paletteuse.c b/libavfilter/vf_paletteuse.c
new file mode 100644
index 0000000..67b20ff
--- /dev/null
+++ b/libavfilter/vf_paletteuse.c
@@ -0,0 +1,282 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Use a palette to downsample an input video stream.
+ *
+ * @todo add an option for smaller palettes
+ */
+
+#include "libavutil/opt.h"
+#include "dualinput.h"
+#include "avfilter.h"
+
+enum dithering_mode {
+ DITHERING_NONE,
+ DITHERING_HECKBERT,
+ NB_DITHERING
+};
+
+typedef struct {
+ const AVClass *class;
+ FFDualInputContext dinput;
+ uint32_t map[1<<15];
+ uint32_t palette[AVPALETTE_COUNT];
+ int palette_loaded;
+ enum dithering_mode dither;
+} PaletteUseContext;
+
+#define OFFSET(x) offsetof(PaletteUseContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption paletteuse_options[] = {
+ { "dither", "select dithering mode", OFFSET(dither), AV_OPT_TYPE_INT, {.i64=DITHERING_HECKBERT}, 0, NB_DITHERING-1, FLAGS, "dithering_mode" },
+ { "heckbert", "dithering as defined by Paul Heckbert in 1982", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_HECKBERT}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(paletteuse);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat in_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
+ static const enum AVPixelFormat inpal_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
+ static const enum AVPixelFormat out_fmts[] = {AV_PIX_FMT_PAL8, AV_PIX_FMT_NONE};
+ AVFilterFormats *in = ff_make_format_list(in_fmts);
+ AVFilterFormats *inpal = ff_make_format_list(inpal_fmts);
+ AVFilterFormats *out = ff_make_format_list(out_fmts);
+ if (!in || !inpal || !out)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(in, &ctx->inputs[0]->out_formats);
+ ff_formats_ref(inpal, &ctx->inputs[1]->out_formats);
+ ff_formats_ref(out, &ctx->outputs[0]->in_formats);
+ return 0;
+}
+
+static av_always_inline int dither_color(uint32_t px, int er, int eg, int eb, int scale)
+{
+ // FIXME: can be made faster
+ return av_clip_uint8((px >> 16 & 0xff) + (er * scale / 8)) << 16
+ | av_clip_uint8((px >> 8 & 0xff) + (eg * scale / 8)) << 8
+ | av_clip_uint8((px & 0xff) + (eb * scale / 8));
+}
+
+static int diff(uint32_t c1, uint32_t c2)
+{
+ // XXX: should we use something smarter?
+ const int r1 = c1 >> 16 & 0xff, g1 = c1 >> 8 & 0xff, b1 = c1 & 0xff;
+ const int r2 = c2 >> 16 & 0xff, g2 = c2 >> 8 & 0xff, b2 = c2 & 0xff;
+ const int dr = r1-r2;
+ const int dg = g1-g2;
+ const int db = b1-b2;
+ return dr*dr + dg*dg + db*db;
+}
+
+static void set_frame(PaletteUseContext *s, AVFrame *out, AVFrame *in)
+{
+ int x, y;
+ const int *map = s->map;
+ const uint32_t *palette = s->palette;
+ uint32_t *src = (uint32_t *)in ->data[0];
+ uint8_t *dst = out->data[0];
+ const int src_linesize = in ->linesize[0] >> 2;
+ const int dst_linesize = out->linesize[0];
+
+ for (y = 0; y < in->height; y++) {
+ for (x = 0; x < in->width; x++) {
+ // XXX: should we avoid this downsampling?
+ const uint8_t r = src[x] >> (16+3) & 0x1f;
+ const uint8_t g = src[x] >> ( 8+3) & 0x1f;
+ const uint8_t b = src[x] >> ( 3) & 0x1f;
+ dst[x] = map[r<<10 | g<<5 | b];
+
+ // XXX: make sure compiler are not dumb and take this out of the
+ // inner loop
+ if (s->dither == DITHERING_HECKBERT) {
+ const uint32_t dstc = palette[dst[x]];
+ const int er = (src[x] >> 16 & 0xff) - (dstc >> 16 & 0xff);
+ const int eg = (src[x] >> 8 & 0xff) - (dstc >> 8 & 0xff);
+ const int eb = (src[x] & 0xff) - (dstc & 0xff);
+ const int right = x < in->width - 1, down = y < in->height - 1;
+
+ if (right) src[ x + 1] = dither_color(src[ x + 1], er, eg, eb, 3);
+ if ( down) src[src_linesize + x ] = dither_color(src[src_linesize + x ], er, eg, eb, 3);
+ if (right && down) src[src_linesize + x + 1] = dither_color(src[src_linesize + x + 1], er, eg, eb, 2);
+ }
+ }
+ src += src_linesize;
+ dst += dst_linesize;
+ }
+}
+
+static AVFrame *apply_palette(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PaletteUseContext *s = ctx->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return NULL;
+ }
+ av_frame_copy_props(out, in);
+ set_frame(s, out, in);
+ memcpy(out->data[1], s->palette, AVPALETTE_SIZE);
+ av_frame_free(&in);
+ return out;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ int ret;
+ AVFilterContext *ctx = outlink->src;
+ PaletteUseContext *s = ctx->priv;
+
+ outlink->w = ctx->inputs[0]->w;
+ outlink->h = ctx->inputs[0]->h;
+ outlink->time_base = ctx->inputs[0]->time_base;
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
+ return 0;
+}
+
+static int config_input_palette(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+
+ if (inlink->w * inlink->h != AVPALETTE_COUNT) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Palette input must contain exactly %d pixels. "
+ "Specified input has %dx%d=%d pixels\n",
+ AVPALETTE_COUNT, inlink->w, inlink->h,
+ inlink->w * inlink->h);
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static int find_nearest_representative(const uint32_t *palette, uint32_t rgb555)
+{
+ int i, pal_id = -1, min_dist = INT_MAX;
+
+ for (i = 0; i < AVPALETTE_COUNT; i++) {
+ if ((palette[i] & 0xff000000) == 0xff000000) {
+ const uint8_t r = ((rgb555>>10 ) * 8423 + (1<<9)) >> 10;
+ const uint8_t g = ((rgb555>> 5 & 0x1f) * 8423 + (1<<9)) >> 10;
+ const uint8_t b = ((rgb555 & 0x1f) * 8423 + (1<<9)) >> 10;
+ const uint32_t color = r<<16 | g<<8 | b;
+ const int d = diff(palette[i], color);
+
+ if (d < min_dist) {
+ pal_id = i;
+ min_dist = d;
+ }
+ }
+ }
+ av_dlog(NULL, "rgb555:%04x (#%08x) => palette[%d] (#%08x)\n",
+ rgb555, (rgb555>>10)<<(16+3) | (rgb555>>5&0x1f)<<(8+3) | (rgb555&0x1f)<<3,
+ pal_id, palette[pal_id]);
+ return pal_id;
+}
+
+static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame)
+{
+ int i, x, y;
+ const uint32_t *p = (const uint32_t *)palette_frame->data[0];
+ const int p_linesize = palette_frame->linesize[0] >> 2;
+
+ i = 0;
+ for (y = 0; y < palette_frame->height; y++) {
+ for (x = 0; x < palette_frame->width; x++)
+ s->palette[i++] = p[x];
+ p += p_linesize;
+ }
+
+ for (i = 0; i < 1<<15; i++)
+ s->map[i] = find_nearest_representative(s->palette, i);
+
+ s->palette_loaded = 1;
+}
+
+static AVFrame *load_apply_palette(AVFilterContext *ctx, AVFrame *main,
+ const AVFrame *second)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ PaletteUseContext *s = ctx->priv;
+ if (!s->palette_loaded) {
+ load_palette(s, second);
+ }
+ return apply_palette(inlink, main);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ PaletteUseContext *s = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&s->dinput, inlink, in);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ PaletteUseContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PaletteUseContext *s = ctx->priv;
+ s->dinput.repeatlast = 1; // only 1 frame in the palette
+ s->dinput.process = load_apply_palette;
+ return 0;
+}
+
+static const AVFilterPad paletteuse_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .needs_writable = 1, // for dithering
+ },{
+ .name = "palette",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input_palette,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad paletteuse_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_paletteuse = {
+ .name = "paletteuse",
+ .description = NULL_IF_CONFIG_SMALL("Use a palette to downsample an input video stream."),
+ .priv_size = sizeof(PaletteUseContext),
+ .query_formats = query_formats,
+ .init = init,
+ .inputs = paletteuse_inputs,
+ .outputs = paletteuse_outputs,
+ .priv_class = &paletteuse_class,
+};
--
2.2.2
More information about the ffmpeg-devel
mailing list