[PATCH 3/4] Implement dynamic memory allocation for MJPEG's picture
anatoly
anatoly.nenashev
Thu Feb 17 17:16:08 CET 2011
---
libavcodec/jpeglsdec.c | 18 +++++++++---------
libavcodec/mjpegbdec.c | 2 +-
libavcodec/mjpegdec.c | 44 +++++++++++++++++++++++++++-----------------
libavcodec/mjpegdec.h | 2 +-
4 files changed, 38 insertions(+), 28 deletions(-)
diff --git a/libavcodec/jpeglsdec.c b/libavcodec/jpeglsdec.c
index 7278e02..a5fb729 100644
--- a/libavcodec/jpeglsdec.c
+++ b/libavcodec/jpeglsdec.c
@@ -262,9 +262,9 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
JLSState *state;
int off = 0, stride = 1, width, shift;
- zero = av_mallocz(s->picture.linesize[0]);
+ zero = av_mallocz(s->picture_ptr->linesize[0]);
last = zero;
- cur = s->picture.data[0];
+ cur = s->picture_ptr->data[0];
state = av_mallocz(sizeof(JLSState));
/* initialize JPEG-LS state from JPEG parameters */
@@ -299,7 +299,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
t = *((uint16_t*)last);
}
last = cur;
- cur += s->picture.linesize[0];
+ cur += s->picture_ptr->linesize[0];
if (s->restart_interval && !--s->restart_count) {
align_get_bits(&s->gb);
@@ -309,7 +309,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
} else if(ilv == 1) { /* line interleaving */
int j;
int Rc[3] = {0, 0, 0};
- memset(cur, 0, s->picture.linesize[0]);
+ memset(cur, 0, s->picture_ptr->linesize[0]);
width = s->width * 3;
for(i = 0; i < s->height; i++) {
for(j = 0; j < 3; j++) {
@@ -322,7 +322,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
}
}
last = cur;
- cur += s->picture.linesize[0];
+ cur += s->picture_ptr->linesize[0];
}
} else if(ilv == 2) { /* sample interleaving */
av_log(s->avctx, AV_LOG_ERROR, "Sample interleaved images are not supported.\n");
@@ -337,22 +337,22 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
w = s->width * s->nb_components;
if(s->bits <= 8){
- uint8_t *src = s->picture.data[0];
+ uint8_t *src = s->picture_ptr->data[0];
for(i = 0; i < s->height; i++){
for(x = off; x < w; x+= stride){
src[x] <<= shift;
}
- src += s->picture.linesize[0];
+ src += s->picture_ptr->linesize[0];
}
}else{
- uint16_t *src = (uint16_t*) s->picture.data[0];
+ uint16_t *src = (uint16_t*) s->picture_ptr->data[0];
for(i = 0; i < s->height; i++){
for(x = 0; x < w; x++){
src[x] <<= shift;
}
- src += s->picture.linesize[0]/2;
+ src += s->picture_ptr->linesize[0]/2;
}
}
}
diff --git a/libavcodec/mjpegbdec.c b/libavcodec/mjpegbdec.c
index fc045ba..5f86343 100644
--- a/libavcodec/mjpegbdec.c
+++ b/libavcodec/mjpegbdec.c
@@ -129,7 +129,7 @@ read_header:
//XXX FIXME factorize, this looks very similar to the EOI code
- *picture= s->picture;
+ *picture= *s->picture_ptr;
*data_size = sizeof(AVFrame);
if(!s->lossless){
diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c
index 6f517d0..b6f19dd 100644
--- a/libavcodec/mjpegdec.c
+++ b/libavcodec/mjpegdec.c
@@ -81,6 +81,13 @@ av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
{
MJpegDecodeContext *s = avctx->priv_data;
+ if (!s->picture_ptr) {
+ s->picture_ptr = av_mallocz(sizeof(AVFrame));
+ if (!s->picture_ptr)
+ return AVERROR(ENOMEM);
+ s->picture_ptr->reference = 0;
+ }
+
s->avctx = avctx;
dsputil_init(&s->dsp, avctx);
ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
@@ -282,8 +289,8 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
s->height < ((s->org_height * 3) / 4)) {
s->interlaced = 1;
s->bottom_field = s->interlace_polarity;
- s->picture.interlaced_frame = 1;
- s->picture.top_field_first = !s->interlace_polarity;
+ s->picture_ptr->interlaced_frame = 1;
+ s->picture_ptr->top_field_first = !s->interlace_polarity;
height *= 2;
}
@@ -342,20 +349,19 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
s->avctx->pix_fmt = PIX_FMT_GRAY16;
}
- if(s->picture.data[0])
- s->avctx->release_buffer(s->avctx, &s->picture);
+ if(s->picture_ptr->data[0])
+ s->avctx->release_buffer(s->avctx, s->picture_ptr);
- s->picture.reference= 0;
- if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){
+ if(s->avctx->get_buffer(s->avctx, s->picture_ptr) < 0){
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- s->picture.pict_type= FF_I_TYPE;
- s->picture.key_frame= 1;
+ s->picture_ptr->pict_type= FF_I_TYPE;
+ s->picture_ptr->key_frame= 1;
s->got_picture = 1;
for(i=0; i<3; i++){
- s->linesize[i]= s->picture.linesize[i] << s->interlaced;
+ s->linesize[i]= s->picture_ptr->linesize[i] << s->interlaced;
}
// printf("%d %d %d %d %d %d\n", s->width, s->height, s->linesize[0], s->linesize[1], s->interlaced, s->avctx->height);
@@ -635,7 +641,7 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int predictor, int point
}
for(mb_y = 0; mb_y < s->mb_height; mb_y++) {
const int modified_predictor= mb_y ? predictor : 1;
- uint8_t *ptr = s->picture.data[0] + (linesize * mb_y);
+ uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
if (s->interlaced && s->bottom_field)
ptr += linesize >> 1;
@@ -712,7 +718,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point
for(j=0; j<n; j++) {
int pred;
- ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
+ ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
if(y==0 && mb_y==0){
if(x==0 && mb_x==0){
pred= 128 << point_transform;
@@ -752,7 +758,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point
for(j=0; j<n; j++) {
int pred;
- ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
+ ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
*ptr= pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform);
if (++x == h) {
@@ -804,7 +810,7 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, i
}
for(i=0; i < nb_components; i++) {
int c = s->comp_index[i];
- data[c] = s->picture.data[c];
+ data[c] = s->picture_ptr->data[c];
reference_data[c] = reference ? reference->data[c] : NULL;
linesize[c]=s->linesize[c];
s->coefs_finished[c] |= 1;
@@ -891,7 +897,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int s
int mb_x, mb_y;
int EOBRUN = 0;
int c = s->comp_index[0];
- uint8_t* data = s->picture.data[c];
+ uint8_t* data = s->picture_ptr->data[c];
const uint8_t *reference_data = reference ? reference->data[c] : NULL;
int linesize = s->linesize[c];
int last_scan = 0;
@@ -1525,7 +1531,7 @@ eoi_parser:
if (s->bottom_field == !s->interlace_polarity)
goto not_the_end;
}
- *picture = s->picture;
+ *picture = *s->picture_ptr;
*data_size = sizeof(AVFrame);
if(!s->lossless){
@@ -1597,8 +1603,12 @@ av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
MJpegDecodeContext *s = avctx->priv_data;
int i, j;
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
+ if (s->picture_ptr) {
+ if (s->picture_ptr->data[0]) {
+ avctx->release_buffer(avctx, s->picture_ptr);
+ }
+ av_freep(&s->picture_ptr);
+ }
av_free(s->buffer);
av_free(s->qscale_table);
diff --git a/libavcodec/mjpegdec.h b/libavcodec/mjpegdec.h
index 9c78ab2..a1dee59 100644
--- a/libavcodec/mjpegdec.h
+++ b/libavcodec/mjpegdec.h
@@ -80,7 +80,7 @@ typedef struct MJpegDecodeContext {
int h_max, v_max; /* maximum h and v counts */
int quant_index[4]; /* quant table index for each component */
int last_dc[MAX_COMPONENTS]; /* last DEQUANTIZED dc (XXX: am I right to do that ?) */
- AVFrame picture; /* picture structure */
+ AVFrame *picture_ptr; /* pointer to picture structure */
int got_picture; ///< we found a SOF and picture is valid, too.
int linesize[MAX_COMPONENTS]; ///< linesize << interlaced
int8_t *qscale_table;
--
1.7.1
--------------050901080905070707060606--
More information about the ffmpeg-devel
mailing list