[Ffmpeg-devel] [PATCH, RFC] 16-bit grayscale depth support suite
Kostya
kostya.shishkov
Sat Oct 21 14:39:47 CEST 2006
Here is support for grayscale 16-bit and some trivial patches
for 16-bit support in some formats. Comments, suggestions and
SWScale support is welcomed ;)
-------------- next part --------------
Index: libavutil/avutil.h
===================================================================
--- libavutil/avutil.h (revision 6734)
+++ libavutil/avutil.h (working copy)
@@ -105,6 +105,8 @@
PIX_FMT_RGB32_1, ///< Packed RGB 8:8:8, 32bpp, (msb)8R 8G 8B 8A(lsb), in cpu endianness
PIX_FMT_BGR32_1, ///< Packed RGB 8:8:8, 32bpp, (msb)8B 8G 8R 8A(lsb), in cpu endianness
+ PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
};
@@ -113,11 +115,13 @@
#define PIX_FMT_BGRA PIX_FMT_BGR32_1
#define PIX_FMT_ARGB PIX_FMT_RGB32
#define PIX_FMT_ABGR PIX_FMT_BGR32
+#define PIX_FMT_GRAY16 PIX_FMT_GRAY16BE
#else
#define PIX_FMT_RGBA PIX_FMT_BGR32
#define PIX_FMT_BGRA PIX_FMT_RGB32
#define PIX_FMT_ARGB PIX_FMT_BGR32_1
#define PIX_FMT_ABGR PIX_FMT_RGB32_1
+#define PIX_FMT_GRAY16 PIX_FMT_GRAY16LE
#endif
#if LIBAVUTIL_VERSION_INT < (50<<16)
Index: libavcodec/utils.c
===================================================================
--- libavcodec/utils.c (revision 6734)
+++ libavcodec/utils.c (working copy)
@@ -177,6 +177,8 @@
case PIX_FMT_YUV422P:
case PIX_FMT_YUV444P:
case PIX_FMT_GRAY8:
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
case PIX_FMT_YUVJ420P:
case PIX_FMT_YUVJ422P:
case PIX_FMT_YUVJ444P:
Index: libavcodec/imgconvert.c
===================================================================
--- libavcodec/imgconvert.c (revision 6734)
+++ libavcodec/imgconvert.c (working copy)
@@ -193,6 +193,20 @@
},
/* gray / mono formats */
+ [PIX_FMT_GRAY16BE] = {
+ .name = "gray16be",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_GRAY,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 16,
+ },
+ [PIX_FMT_GRAY16LE] = {
+ .name = "gray16le",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_GRAY,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 16,
+ },
[PIX_FMT_GRAY8] = {
.name = "gray",
.nb_channels = 1,
@@ -427,6 +441,8 @@
picture->data[2] = NULL;
picture->linesize[0] = width * 4;
return size * 4;
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
case PIX_FMT_BGR555:
case PIX_FMT_BGR565:
case PIX_FMT_RGB555:
@@ -1842,6 +1858,101 @@
gray_to_mono(dst, src, width, height, 0x00);
}
+static void gray_to_gray16le(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ int x, y, src_wrap, dst_wrap;
+ uint8_t *s, *d;
+ s = src->data[0];
+ src_wrap = src->linesize[0] - width;
+ d = dst->data[0];
+ dst_wrap = dst->linesize[0] - width * 2;
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ *d++ = 0;
+ *d++ = *s++;
+ }
+ s += src_wrap;
+ d += dst_wrap;
+ }
+}
+
+static void gray_to_gray16be(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ int x, y, src_wrap, dst_wrap;
+ uint8_t *s, *d;
+ s = src->data[0];
+ src_wrap = src->linesize[0] - width;
+ d = dst->data[0];
+ dst_wrap = dst->linesize[0] - width * 2;
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ *d++ = *s++;
+ *d++ = 0;
+ }
+ s += src_wrap;
+ d += dst_wrap;
+ }
+}
+
+static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ int x, y, src_wrap, dst_wrap;
+ uint8_t *s, *d;
+ s = src->data[0];
+ src_wrap = src->linesize[0] - width * 2;
+ d = dst->data[0];
+ dst_wrap = dst->linesize[0] - width;
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ *d++ = *s++;
+ s++;
+ }
+ s += src_wrap;
+ d += dst_wrap;
+ }
+}
+
+static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ int x, y, src_wrap, dst_wrap;
+ uint8_t *s, *d;
+ s = src->data[0];
+ src_wrap = src->linesize[0] - width * 2;
+ d = dst->data[0];
+ dst_wrap = dst->linesize[0] - width;
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ s++;
+ *d++ = *s++;
+ }
+ s += src_wrap;
+ d += dst_wrap;
+ }
+}
+
+static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ int x, y, src_wrap, dst_wrap;
+ uint16_t *s, *d;
+ s = src->data[0];
+ src_wrap = (src->linesize[0] - width * 2)/2;
+ d = dst->data[0];
+ dst_wrap = (dst->linesize[0] - width * 2)/2;
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ *d++ = bswap_16(*s++);
+ }
+ s += src_wrap;
+ d += dst_wrap;
+ }
+}
+
+
typedef struct ConvertEntry {
void (*convert)(AVPicture *dst,
const AVPicture *src, int width, int height);
@@ -2024,6 +2135,22 @@
.convert = rgb565_to_gray
},
},
+ [PIX_FMT_GRAY16BE] = {
+ [PIX_FMT_GRAY8] = {
+ .convert = gray16be_to_gray
+ },
+ [PIX_FMT_GRAY16LE] = {
+ .convert = gray16_to_gray16
+ },
+ },
+ [PIX_FMT_GRAY16LE] = {
+ [PIX_FMT_GRAY8] = {
+ .convert = gray16le_to_gray
+ },
+ [PIX_FMT_GRAY16BE] = {
+ .convert = gray16_to_gray16
+ },
+ },
[PIX_FMT_GRAY8] = {
[PIX_FMT_RGB555] = {
.convert = gray_to_rgb555
@@ -2046,6 +2173,12 @@
[PIX_FMT_MONOBLACK] = {
.convert = gray_to_monoblack
},
+ [PIX_FMT_GRAY16LE] = {
+ .convert = gray_to_gray16le
+ },
+ [PIX_FMT_GRAY16BE] = {
+ .convert = gray_to_gray16be
+ },
},
[PIX_FMT_MONOWHITE] = {
[PIX_FMT_GRAY8] = {
-------------- next part --------------
Index: libavcodec/png.c
===================================================================
--- libavcodec/png.c (revision 6734)
+++ libavcodec/png.c (working copy)
@@ -567,6 +567,9 @@
} else if (s->bit_depth == 8 &&
s->color_type == PNG_COLOR_TYPE_GRAY) {
avctx->pix_fmt = PIX_FMT_GRAY8;
+ } else if (s->bit_depth == 16 &&
+ s->color_type == PNG_COLOR_TYPE_GRAY) {
+ avctx->pix_fmt = PIX_FMT_GRAY16BE;
} else if (s->bit_depth == 1 &&
s->color_type == PNG_COLOR_TYPE_GRAY) {
avctx->pix_fmt = PIX_FMT_MONOBLACK;
-------------- next part --------------
Index: libavcodec/pnm.c
===================================================================
--- libavcodec/pnm.c (revision 6734)
+++ libavcodec/pnm.c (working copy)
@@ -70,7 +70,7 @@
static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
char buf1[32], tuple_type[32];
- int h, w, depth, maxval;;
+ int h, w, depth, maxval;
pnm_get(s, buf1, sizeof(buf1));
if (!strcmp(buf1, "P4")) {
@@ -142,8 +142,9 @@
return -1;
if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
pnm_get(s, buf1, sizeof(buf1));
+ if(atoi(buf1) == 65535 && avctx->pix_fmt == PIX_FMT_GRAY8)
+ avctx->pix_fmt = PIX_FMT_GRAY16BE;
}
-
/* more check if YUV420 */
if (avctx->pix_fmt == PIX_FMT_YUV420P) {
if ((avctx->width & 1) != 0)
@@ -194,6 +195,9 @@
case PIX_FMT_GRAY8:
n = avctx->width;
goto do_read;
+ case PIX_FMT_GRAY16BE:
+ n = avctx->width * 2;
+ goto do_read;
case PIX_FMT_MONOWHITE:
case PIX_FMT_MONOBLACK:
n = (avctx->width + 7) >> 3;
@@ -292,6 +296,10 @@
c = '5';
n = avctx->width;
break;
+ case PIX_FMT_GRAY16BE:
+ c = '5';
+ n = avctx->width * 2;
+ break;
case PIX_FMT_RGB24:
c = '6';
n = avctx->width * 3;
@@ -310,7 +318,7 @@
s->bytestream += strlen(s->bytestream);
if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
snprintf(s->bytestream, s->bytestream_end - s->bytestream,
- "%d\n", 255);
+ "%d\n", (avctx->pix_fmt != PIX_FMT_GRAY16BE) ? 255 : 65535);
s->bytestream += strlen(s->bytestream);
}
@@ -537,7 +545,7 @@
pnm_encode_frame,
NULL, //encode_end,
pnm_decode_frame,
- .pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, -1},
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, -1},
};
#endif // CONFIG_PGM_ENCODER
-------------- next part --------------
Index: libavcodec/mjpeg.c
===================================================================
--- libavcodec/mjpeg.c (revision 6734)
+++ libavcodec/mjpeg.c (working copy)
@@ -1125,10 +1125,6 @@
av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n");
return -1;
}
- if (s->bits > 8 && s->ls){
- av_log(s->avctx, AV_LOG_ERROR, "only <= 8 bits/component accepted for JPEG-LS\n");
- return -1;
- }
height = get_bits(&s->gb, 16);
width = get_bits(&s->gb, 16);
@@ -1141,6 +1137,10 @@
if (nb_components <= 0 ||
nb_components > MAX_COMPONENTS)
return -1;
+ if (s->ls && !(s->bits <= 8 || nb_components == 1)){
+ av_log(s->avctx, AV_LOG_ERROR, "only <= 8 bits/component or 16-bit gray accepted for JPEG-LS\n");
+ return -1;
+ }
s->nb_components = nb_components;
s->h_max = 1;
s->v_max = 1;
@@ -1223,8 +1223,10 @@
if(s->ls){
if(s->nb_components > 1)
s->avctx->pix_fmt = PIX_FMT_RGB24;
+ else if(s->bits <= 8)
+ s->avctx->pix_fmt = PIX_FMT_GRAY8;
else
- s->avctx->pix_fmt = PIX_FMT_GRAY8;
+ s->avctx->pix_fmt = PIX_FMT_GRAY16;
}
if(s->picture.data[0])
Index: libavcodec/jpeg_ls.c
===================================================================
--- libavcodec/jpeg_ls.c (revision 6734)
+++ libavcodec/jpeg_ls.c (working copy)
@@ -401,11 +401,126 @@
}
}
+/**
+ * Decode one line of image - 16bpp version
+ */
+static inline void ls_decode_line_16bpp(JLSState *state, MJpegDecodeContext *s, uint16_t *last, uint16_t *dst, int last2, int w, int stride, int comp){
+ int i, x = 0;
+ int Ra, Rb, Rc, Rd;
+ int D0, D1, D2;
+
+ while(x < w) {
+ int err, pred;
+
+ /* compute gradients */
+ Ra = x ? dst[x - stride] : last[x];
+ Rb = last[x];
+ Rc = x ? last[x - stride] : last2;
+ Rd = (x >= w - stride) ? last[x] : last[x + stride];
+ D0 = Rd - Rb;
+ D1 = Rb - Rc;
+ D2 = Rc - Ra;
+ /* run mode */
+ if((FFABS(D0) <= state->near) && (FFABS(D1) <= state->near) && (FFABS(D2) <= state->near)) {
+ int r;
+ int RItype;
+
+ /* decode full runs while available */
+ while(get_bits1(&s->gb)) {
+ int r;
+ r = 1 << log2_run[state->run_index[comp]];
+ if(x + r * stride > w) {
+ r = (w - x) / stride;
+ }
+ for(i = 0; i < r; i++) {
+ dst[x] = Ra;
+ x += stride;
+ }
+ /* if EOL reached, we stop decoding */
+ if(r != (1 << log2_run[state->run_index[comp]]))
+ return;
+ if(state->run_index[comp] < 31)
+ state->run_index[comp]++;
+ if(x + stride > w)
+ return;
+ }
+ /* decode aborted run */
+ r = log2_run[state->run_index[comp]];
+ if(r)
+ r = get_bits_long(&s->gb, r);
+ for(i = 0; i < r; i++) {
+ dst[x] = Ra;
+ x += stride;
+ }
+
+ /* decode run termination value */
+ Rb = last[x];
+ RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
+ err = ls_get_code_runterm(&s->gb, state, RItype, log2_run[state->run_index[comp]]);
+ if(state->run_index[comp])
+ state->run_index[comp]--;
+
+ if(state->near && RItype){
+ pred = Ra + err;
+ } else {
+ if(Rb < Ra)
+ pred = Rb - err;
+ else
+ pred = Rb + err;
+ }
+
+ if(state->near){
+ if(pred < -state->near)
+ pred += state->range * state->twonear;
+ else if(pred > state->maxval + state->near)
+ pred -= state->range * state->twonear;
+ pred = clip(pred, 0, state->maxval);
+ }
+
+ dst[x] = pred;
+ x += stride;
+ } else { /* regular mode */
+ int context, sign;
+
+ context = quantize(state, D0) * 81 + quantize(state, D1) * 9 + quantize(state, D2);
+ pred = mid_pred(Ra, Ra + Rb - Rc, Rb);
+
+ if(context < 0){
+ context = -context;
+ sign = 1;
+ }else{
+ sign = 0;
+ }
+
+ if(sign){
+ pred = clip(pred - state->C[context], 0, state->maxval);
+ err = -ls_get_code_regular(&s->gb, state, context);
+ } else {
+ pred = clip(pred + state->C[context], 0, state->maxval);
+ err = ls_get_code_regular(&s->gb, state, context);
+ }
+
+ /* we have to do something more for near-lossless coding */
+ pred += err;
+ if(state->near) {
+ if(pred < -state->near)
+ pred += state->range * state->twonear;
+ else if(pred > state->maxval + state->near)
+ pred -= state->range * state->twonear;
+ pred = clip(pred, 0, state->maxval);
+ }
+
+ dst[x] = pred;
+ x += stride;
+ }
+ }
+}
+
static int ls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv){
int i, t = 0;
uint8_t *zero, *last, *cur;
JLSState *state;
- int off, stride, width;
+ int off = 0, stride = 1, width, shift;
zero = av_mallocz(s->picture.linesize[0]);
last = zero;
@@ -423,6 +538,11 @@
reset_ls_coding_parameters(state, 0);
ls_init_state(state);
+ if(s->bits <= 8)
+ shift = point_transform + (8 - s->bits);
+ else
+ shift = point_transform + (16 - s->bits);
+
// av_log(s->avctx, AV_LOG_DEBUG, "JPEG-LS params: %ix%i NEAR=%i MV=%i T(%i,%i,%i) RESET=%i, LIMIT=%i, qbpp=%i, RANGE=%i\n",s->width,s->height,state->near,state->maxval,state->T1,state->T2,state->T3,state->reset,state->limit,state->qbpp, state->range);
// av_log(s->avctx, AV_LOG_DEBUG, "JPEG params: ILV=%i Pt=%i BPP=%i, scan = %i\n", ilv, point_transform, s->bits, s->cur_scan);
if(ilv == 0) { /* separate planes */
@@ -431,8 +551,13 @@
width = s->width * stride;
cur += off;
for(i = 0; i < s->height; i++) {
- ls_decode_line(state, s, last, cur, t, width, stride, off);
- t = last[0];
+ if(s->bits <= 8){
+ ls_decode_line(state, s, last, cur, t, width, stride, off);
+ t = last[0];
+ }else{
+ ls_decode_line_16bpp(state, s, last, cur, t, width, stride, off);
+ t = *((uint16_t*)last);
+ }
last = cur;
cur += s->picture.linesize[0];
@@ -466,6 +591,31 @@
return -1;
}
+ if(shift){ /* we need to do point transform or normalize samples */
+ int x, w;
+
+ w = s->width * s->nb_components;
+
+ if(s->bits <= 8){
+ uint8_t *src = s->picture.data[0];
+
+ for(i = 0; i < s->height; i++){
+ for(x = off; x < w; x+= stride){
+ src[x] <<= shift;
+ }
+ src += s->picture.linesize[0];
+ }
+ }else{
+ uint16_t *src = s->picture.data[0];
+
+ for(i = 0; i < s->height; i++){
+ for(x = 0; x < w; x++){
+ src[x] <<= shift;
+ }
+ src += s->picture.linesize[0]/2;
+ }
+ }
+ }
av_free(state);
av_free(zero);
@@ -676,11 +826,110 @@
}
}
+/**
+ * Encode one line of image with 16 bpp sample size
+ */
+static inline void ls_encode_line_16bpp(JLSState *state, PutBitContext *pb, uint16_t *last, uint16_t *cur, int last2, int w, int stride, int comp){
+ int x = 0;
+ int Ra, Rb, Rc, Rd;
+ int D0, D1, D2;
+
+ while(x < w) {
+ int err, pred, sign;
+
+ /* compute gradients */
+ Ra = x ? cur[x - stride] : last[x];
+ Rb = last[x];
+ Rc = x ? last[x - stride] : last2;
+ Rd = (x >= w - stride) ? last[x] : last[x + stride];
+ D0 = Rd - Rb;
+ D1 = Rb - Rc;
+ D2 = Rc - Ra;
+
+ /* run mode */
+ if((FFABS(D0) <= state->near) && (FFABS(D1) <= state->near) && (FFABS(D2) <= state->near)) {
+ int RUNval, RItype, run;
+
+ run = 0;
+ RUNval = Ra;
+ while(x < w && (FFABS(cur[x] - RUNval) <= state->near)){
+ run++;
+ cur[x] = Ra;
+ x += stride;
+ }
+ ls_encode_run(state, pb, run, comp, x < w);
+ if(x >= w)
+ return;
+ Rb = last[x];
+ RItype = (FFABS(Ra - Rb) <= state->near);
+ pred = RItype ? Ra : Rb;
+ err = cur[x] - pred;
+
+ if(!RItype && Ra > Rb)
+ err = -err;
+
+ if(state->near){
+ if(err > 0)
+ err = (state->near + err) / state->twonear;
+ else
+ err = -(state->near - err) / state->twonear;
+
+ if(RItype || (Rb >= Ra))
+ Ra = clip(pred + err * state->twonear, 0, state->maxval);
+ else
+ Ra = clip(pred - err * state->twonear, 0, state->maxval);
+ cur[x] = Ra;
+ }
+ if(err < 0)
+ err += state->range;
+ if(err >= ((state->range + 1) >> 1))
+ err -= state->range;
+
+ ls_encode_runterm(state, pb, RItype, err, log2_run[state->run_index[comp]]);
+
+ if(state->run_index[comp] > 0)
+ state->run_index[comp]--;
+ x += stride;
+ } else { /* regular mode */
+ int context;
+
+ context = quantize(state, D0) * 81 + quantize(state, D1) * 9 + quantize(state, D2);
+ pred = mid_pred(Ra, Ra + Rb - Rc, Rb);
+
+ if(context < 0){
+ context = -context;
+ sign = 1;
+ pred = clip(pred - state->C[context], 0, state->maxval);
+ err = pred - cur[x];
+ }else{
+ sign = 0;
+ pred = clip(pred + state->C[context], 0, state->maxval);
+ err = cur[x] - pred;
+ }
+
+ if(state->near){
+ if(err > 0)
+ err = (state->near + err) / state->twonear;
+ else
+ err = -(state->near - err) / state->twonear;
+ if(!sign)
+ Ra = clip(pred + err * state->twonear, 0, state->maxval);
+ else
+ Ra = clip(pred - err * state->twonear, 0, state->maxval);
+ cur[x] = Ra;
+ }
+
+ ls_encode_regular(state, pb, context, err);
+ x += stride;
+ }
+ }
+}
+
static void ls_store_lse(JLSState *state, PutBitContext *pb){
/* Test if we have default params and don't need to store LSE */
JLSState state2;
memset(&state2, 0, sizeof(JLSState));
- state2.bpp = 8;
+ state2.bpp = state->bpp;
state2.near = state->near;
reset_ls_coding_parameters(&state2, 1);
if(state->T1 == state2.T1 && state->T2 == state2.T2 && state->T3 == state2.T3 && state->reset == state2.reset)
@@ -717,13 +966,16 @@
p->pict_type= FF_I_TYPE;
p->key_frame= 1;
- comps = (avctx->pix_fmt == PIX_FMT_GRAY8) ? 1 : 3;
+ if(avctx->pix_fmt == PIX_FMT_GRAY8 || avctx->pix_fmt == PIX_FMT_GRAY16)
+ comps = 1;
+ else
+ comps = 3;
/* write our own JPEG header, can't use mjpeg_picture_header */
put_marker(&pb, SOI);
put_marker(&pb, SOF48);
put_bits(&pb, 16, 8 + comps * 3); // header size depends on components
- put_bits(&pb, 8, 8); // bpp
+ put_bits(&pb, 8, (avctx->pix_fmt == PIX_FMT_GRAY16) ? 16 : 8); // bpp
put_bits(&pb, 16, avctx->height);
put_bits(&pb, 16, avctx->width);
put_bits(&pb, 8, comps); // components
@@ -747,7 +999,7 @@
state = av_mallocz(sizeof(JLSState));
/* initialize JPEG-LS state from JPEG parameters */
state->near = near;
- state->bpp = 8;
+ state->bpp = (avctx->pix_fmt == PIX_FMT_GRAY16) ? 16 : 8;
reset_ls_coding_parameters(state, 0);
ls_init_state(state);
@@ -765,6 +1017,15 @@
last = cur;
cur += p->linesize[0];
}
+ }else if(avctx->pix_fmt == PIX_FMT_GRAY16){
+ int t = 0;
+
+ for(i = 0; i < avctx->height; i++) {
+ ls_encode_line_16bpp(state, &pb2, last, cur, t, avctx->width, 1, 0);
+ t = *((uint16_t*)last);
+ last = cur;
+ cur += p->linesize[0];
+ }
}else if(avctx->pix_fmt == PIX_FMT_RGB24){
int j, width;
int Rc[3] = {0, 0, 0};
@@ -827,7 +1088,7 @@
c->avctx = ctx;
ctx->coded_frame = &c->picture;
- if(ctx->pix_fmt != PIX_FMT_GRAY8 && ctx->pix_fmt != PIX_FMT_RGB24 && ctx->pix_fmt != PIX_FMT_BGR24){
+ if(ctx->pix_fmt != PIX_FMT_GRAY8 && ctx->pix_fmt != PIX_FMT_GRAY16 && ctx->pix_fmt != PIX_FMT_RGB24 && ctx->pix_fmt != PIX_FMT_BGR24){
av_log(ctx, AV_LOG_ERROR, "Only grayscale and RGB24/BGR24 images are supported\n");
return -1;
}
@@ -842,6 +1103,6 @@
encode_init_ls,
encode_picture_ls,
NULL,
- .pix_fmts= (enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, -1},
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, PIX_FMT_GRAY16, -1},
};
#endif
More information about the ffmpeg-devel
mailing list