[FFmpeg-devel] [PATCH 3/3] lavf/movenc: support iTunes cover art
Matthieu Bouron
matthieu.bouron at gmail.com
Fri Jul 5 14:18:57 CEST 2013
On Thu, Jul 04, 2013 at 01:20:56PM +0200, Matthieu Bouron wrote:
> On Wed, Jul 3, 2013 at 10:50 PM, Michael Niedermayer <michaelni at gmx.at>wrote:
>
> > On Wed, Jul 03, 2013 at 12:08:38PM +0200, Matthieu Bouron wrote:
> > > On Tue, Jul 02, 2013 at 10:43:39PM +0200, Michael Niedermayer wrote:
> > > > On Tue, Jul 02, 2013 at 09:33:04PM +0200, Matthieu Bouron wrote:
> > > > > On Tue, Jul 2, 2013 at 7:46 PM, Michael Niedermayer <
> > michaelni at gmx.at>wrote:
> > > > >
> > > > > > On Sun, Jun 30, 2013 at 04:15:46PM +0200, Matthieu Bouron wrote:
> > > > > > > Cover art muxing is done by introducing the -cover_stream_index
> > option
> > > > > > > which takes an output stream index as argument.
> > > > > > > The stream used for the cover art is not muxed as a track in the
> > > > > > > resulting file.
> > > > > > > ---
> > > > > > > libavformat/movenc.c | 157
> > > > > > +++++++++++++++++++++++++++++++++++++++++++----
> > > > > > > libavformat/movenc.h | 5 ++
> > > > > > > libavformat/movenchint.c | 1 +
> > > > > > > 3 files changed, 152 insertions(+), 11 deletions(-)
> > > > > > >
> > > > > > > diff --git a/libavformat/movenc.c b/libavformat/movenc.c
> > > > > > > index 5577530..f411493 100644
> > > > > > > --- a/libavformat/movenc.c
> > > > > > > +++ b/libavformat/movenc.c
> > > > > > > @@ -63,6 +63,7 @@ static const AVOption options[] = {
> > > > > > > { "ism_lookahead", "Number of lookahead entries for ISM
> > files",
> > > > > > offsetof(MOVMuxContext, ism_lookahead), AV_OPT_TYPE_INT, {.i64 =
> > 0}, 0,
> > > > > > INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
> > > > > > > { "use_editlist", "use edit list", offsetof(MOVMuxContext,
> > > > > > use_editlist), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1,
> > > > > > AV_OPT_FLAG_ENCODING_PARAM},
> > > > > > > { "video_track_timescale", "set timescale of all video
> > tracks",
> > > > > > offsetof(MOVMuxContext, video_track_timescale), AV_OPT_TYPE_INT,
> > {.i64 =
> > > > > > 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
> > > > > > > + { "cover_stream_index", "video stream index to use for
> > cover art",
> > > > > > offsetof(MOVMuxContext, cover_stream_index), AV_OPT_TYPE_INT,
> > {.i64 = -1},
> > > > > > -1, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
> > > > > > > { NULL },
> > > > > >
> > > > > > isnt AV_DISPOSITION_ATTACHED_PIC enough ?
> > > > > > why is this option needed ?
> > > > > >
> > > > >
> > > > > I guess AV_DISPOSITION_ATTACHED_PIC could be enough.
> > > > > My idea here was to let the user choose the cover art from any output
> > > > > streams he likes with -cover_stream_index. If the input does not
> > have any
> > > > > cover arts, can a custom stream be flagged as an attached pic ?
> > > >
> > > > if it cannot then such feature should be added but certainly doesnt
> > > > belong in a single individual muxer
> > >
> > > You're right, using AV_DISPOSITON_ATTACHED_PIC seems a better solution
> > > than introducing the cover_stream_index.
> > >
> > > Here is a new patch which uses all stream with AV_DISPOSITON_ATTACHED_PIC
> > > as cover arts. It works for all modes which use the iTunes metadata (!3gp
> > > and !mov).
> > >
> > > Actually the cover art codec is only checked at the end (when writing the
> > > metadata). If not BMP, MJPEG or PNG the user is warned that the cover art
> > > is ignored.
> > >
> > > Do you think this check should be included in the new mov_stream_is_apic
> > > function and discard all apic streams which have not the right codec (and
> > > use them as normal stream ? (IMHO it do not feel right to me to use them
> > > as normal stream)).
> >
> > probably not
> >
> > did you test this with subtitles & chapters ?
> >
>
> I'm currently testing it with chapters and rtphint, i'll send an updated
> patch as soon as all problems are fixed.
>
New patch attached tested with chapters + subtitles + cover art + rtphint.
TODO: bump lavf version
Regards,
Matthieu
-------------- next part --------------
>From 904a00366a0c47229a40b1b82f1ec8284821d61c Mon Sep 17 00:00:00 2001
From: Matthieu Bouron <matthieu.bouron at gmail.com>
Date: Thu, 27 Jun 2013 18:12:50 +0200
Subject: [PATCH] lavf/movenc: support iTunes cover art
Video streams with AV_DISPOSITON_ATTACHED_PIC will be used as cover arts
and won't be muxed as normal tracks in the resulting file.
---
libavformat/movenc.c | 192 +++++++++++++++++++++++++++++++++++++++++------
libavformat/movenc.h | 10 ++-
libavformat/movenchint.c | 14 +++-
3 files changed, 188 insertions(+), 28 deletions(-)
diff --git a/libavformat/movenc.c b/libavformat/movenc.c
index 47d1cd7..80e1865 100644
--- a/libavformat/movenc.c
+++ b/libavformat/movenc.c
@@ -104,6 +104,15 @@ static int is_co64_required(const MOVTrack *track)
return 0;
}
+static int mov_stream_is_apic(MOVMuxContext *mov, AVStream *st)
+{
+ if ((mov->mode & MODE_3GP) || (mov->mode & MODE_MOV))
+ return 0;
+ if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
+ return 1;
+ return 0;
+}
+
/* Chunk offset atom */
static int mov_write_stco_tag(AVIOContext *pb, MOVTrack *track)
{
@@ -2067,6 +2076,46 @@ static int mov_write_int8_metadata(AVFormatContext *s, AVIOContext *pb,
return size;
}
+static int mov_write_covr(AVFormatContext *s, AVIOContext *pb, AVPacketList *covers)
+{
+ int64_t pos = avio_tell(pb);
+
+ avio_wb32(pb, 0);
+ ffio_wfourcc(pb, "covr");
+
+ while (covers) {
+ int type;
+ AVPacket *pkt = &covers->pkt;
+ enum AVCodecID codec_id = s->streams[pkt->stream_index]->codec->codec_id;
+
+ switch(codec_id) {
+ case CODEC_ID_MJPEG:
+ type = 0xD;
+ break;
+ case CODEC_ID_PNG:
+ type = 0xE;
+ break;
+ case CODEC_ID_BMP:
+ type = 0x1B;
+ break;
+ default:
+ covers = covers->next;
+ av_log(s, AV_LOG_ERROR, "unsupported codec %s for cover, skipping", s->streams[pkt->stream_index]->codec->codec_name);
+ continue;
+ }
+
+ avio_wb32(pb, 16 + pkt->size);
+ ffio_wfourcc(pb, "data");
+ avio_wb32(pb, type);
+ avio_wb32(pb , 0);
+ avio_write(pb, pkt->data, pkt->size);
+
+ covers = covers->next;
+ }
+
+ return update_size(pb, pos);
+}
+
/* iTunes meta data list */
static int mov_write_ilst_tag(AVIOContext *pb, MOVMuxContext *mov,
AVFormatContext *s)
@@ -2096,6 +2145,10 @@ static int mov_write_ilst_tag(AVIOContext *pb, MOVMuxContext *mov,
mov_write_int8_metadata (s, pb, "stik", "media_type",1);
mov_write_int8_metadata (s, pb, "hdvd", "hd_video", 1);
mov_write_int8_metadata (s, pb, "pgap", "gapless_playback",1);
+
+ if (mov->covers)
+ mov_write_covr(s, pb, mov->covers);
+
mov_write_trkn_tag(pb, mov, s);
mov_write_tmpo_tag(pb, s);
return update_size(pb, pos);
@@ -2198,7 +2251,7 @@ static int mov_write_udta_tag(AVIOContext *pb, MOVMuxContext *mov,
int i, ret, size;
uint8_t *buf;
- for (i = 0; i < s->nb_streams; i++)
+ for (i = 0; i < mov->nb_input_streams; i++)
if (mov->tracks[i].enc->flags & CODEC_FLAG_BITEXACT) {
return 0;
}
@@ -2341,7 +2394,7 @@ static int mov_write_moov_tag(AVIOContext *pb, MOVMuxContext *mov,
}
if (mov->chapter_track)
- for (i=0; i<s->nb_streams; i++) {
+ for (i=0; i<mov->nb_input_streams; i++) {
mov->tracks[i].tref_tag = MKTAG('c','h','a','p');
mov->tracks[i].tref_id = mov->tracks[mov->chapter_track].track_id;
}
@@ -2366,14 +2419,14 @@ static int mov_write_moov_tag(AVIOContext *pb, MOVMuxContext *mov,
mov_write_iods_tag(pb, mov);
for (i=0; i<mov->nb_streams; i++) {
if (mov->tracks[i].entry > 0 || mov->flags & FF_MOV_FLAG_FRAGMENT) {
- if(i < s->nb_streams){
+ if(i < mov->nb_input_streams){
int codec_type= s->streams[i]->codec->codec_type;
if(codec_type==AVMEDIA_TYPE_AUDIO || codec_type==AVMEDIA_TYPE_SUBTITLE){
mov->tracks[i].secondary= not_first[codec_type];
not_first[codec_type]= 1;
}
}
- mov_write_trak_tag(pb, mov, &(mov->tracks[i]), i < s->nb_streams ? s->streams[i] : NULL);
+ mov_write_trak_tag(pb, mov, &(mov->tracks[i]), i < mov->nb_input_streams ? s->streams[i] : NULL);
}
}
if (mov->flags & FF_MOV_FLAG_FRAGMENT)
@@ -2801,7 +2854,8 @@ static int mov_write_ftyp_tag(AVIOContext *pb, AVFormatContext *s)
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
- if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
+ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+ !mov_stream_is_apic(mov, st))
has_video = 1;
if (st->codec->codec_id == AV_CODEC_ID_H264)
has_h264 = 1;
@@ -3008,6 +3062,18 @@ static int get_moov_size(AVFormatContext *s)
return ret;
}
+int ff_mov_get_track_index(MOVMuxContext *mov, int stream_index)
+{
+ int i;
+
+ for (i = 0; i < mov->nb_streams; i++) {
+ MOVTrack *track = &mov->tracks[i];
+ if (track->stream_index == stream_index)
+ return i;
+ }
+ return -1;
+}
+
static int mov_flush_fragment(AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
@@ -3137,12 +3203,17 @@ int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt)
{
MOVMuxContext *mov = s->priv_data;
AVIOContext *pb = s->pb;
- MOVTrack *trk = &mov->tracks[pkt->stream_index];
- AVCodecContext *enc = trk->enc;
+ int trk_index = ff_mov_get_track_index(mov, pkt->stream_index);
+ MOVTrack *trk;
+ AVCodecContext *enc;
unsigned int samples_in_chunk = 0;
int size= pkt->size;
uint8_t *reformatted_data = NULL;
+ if (trk_index < 0) return 0;
+ trk = &mov->tracks[trk_index];
+ enc = trk->enc;
+
if (mov->flags & FF_MOV_FLAG_FRAGMENT) {
int ret;
if (mov->fragments > 0) {
@@ -3290,11 +3361,16 @@ int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt)
static int mov_write_single_packet(AVFormatContext *s, AVPacket *pkt)
{
MOVMuxContext *mov = s->priv_data;
- MOVTrack *trk = &mov->tracks[pkt->stream_index];
- AVCodecContext *enc = trk->enc;
+ int trk_index = ff_mov_get_track_index(mov, pkt->stream_index);
+ MOVTrack *trk;
+ AVCodecContext *enc;
int64_t frag_duration = 0;
int size = pkt->size;
+ if (trk_index < 0) return 0;
+ trk = &mov->tracks[trk_index];
+ enc = trk->enc;
+
if (!pkt->size) return 0; /* Discard 0 sized packets */
if (trk->entry && pkt->stream_index < s->nb_streams)
@@ -3343,6 +3419,7 @@ static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
} else {
int i;
MOVMuxContext *mov = s->priv_data;
+ AVStream *st = s->streams[pkt->stream_index];
if (!pkt->size) return 0; /* Discard 0 sized packets */
@@ -3378,24 +3455,55 @@ static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
}
}
+ if (mov_stream_is_apic(mov, st)) {
+
+ /* warn only once */
+ if (st->nb_frames == 1) {
+ av_log(s, AV_LOG_WARNING, "Got more than one picture in stream %d,"
+ " ignoring.\n", pkt->stream_index);
+ }
+
+ if (st->nb_frames == 0) {
+ int ret;
+ AVPacketList *last, *covers = av_mallocz(sizeof(AVPacketList));
+ if (!covers)
+ return AVERROR(ENOMEM);
+
+ if ((ret = av_copy_packet(&covers->pkt, pkt)) < 0) {
+ av_freep(&covers);
+ return ret;
+ }
+
+ if (!mov->covers)
+ mov->covers = covers;
+ else {
+ last = mov->covers;
+ while (last->next)
+ last = last->next;
+ last->next = covers;
+ }
+ }
+ }
+
return mov_write_single_packet(s, pkt);
}
}
// QuickTime chapters involve an additional text track with the chapter names
// as samples, and a tref pointing from the other tracks to the chapter one.
-static void mov_create_chapter_track(AVFormatContext *s, int tracknum)
+static void mov_create_chapter_track(AVFormatContext *s, int tracknum, int stream_index)
{
AVIOContext *pb;
MOVMuxContext *mov = s->priv_data;
MOVTrack *track = &mov->tracks[tracknum];
- AVPacket pkt = { .stream_index = tracknum, .flags = AV_PKT_FLAG_KEY };
+ AVPacket pkt = { .stream_index = stream_index, .flags = AV_PKT_FLAG_KEY };
int i, len;
track->mode = mov->mode;
track->tag = MKTAG('t','e','x','t');
track->timescale = MOV_TIMESCALE;
+ track->stream_index = stream_index;
track->enc = avcodec_alloc_context3(NULL);
track->enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
@@ -3463,14 +3571,14 @@ static void mov_create_chapter_track(AVFormatContext *s, int tracknum)
}
}
-static int mov_create_timecode_track(AVFormatContext *s, int index, int src_index, const char *tcstr)
+static int mov_create_timecode_track(AVFormatContext *s, int index, int stream_index, int src_index, const char *tcstr)
{
int ret;
MOVMuxContext *mov = s->priv_data;
MOVTrack *track = &mov->tracks[index];
AVStream *src_st = s->streams[src_index];
AVTimecode tc;
- AVPacket pkt = {.stream_index = index, .flags = AV_PKT_FLAG_KEY, .size = 4};
+ AVPacket pkt = {.stream_index = stream_index, .flags = AV_PKT_FLAG_KEY, .size = 4};
AVRational rate = {src_st->codec->time_base.den, src_st->codec->time_base.num};
/* if the codec time base makes no sense, try to fallback on stream frame rate */
@@ -3489,6 +3597,7 @@ static int mov_create_timecode_track(AVFormatContext *s, int index, int src_inde
track->mode = mov->mode;
track->tag = MKTAG('t','m','c','d');
track->src_track = src_index;
+ track->stream_index = stream_index;
track->timescale = mov->tracks[src_index].timescale;
if (tc.flags & AV_TIMECODE_FLAG_DROPFRAME)
track->timecode_flags |= MOV_TIMECODE_FLAG_DROPFRAME;
@@ -3512,7 +3621,8 @@ static int mov_write_header(AVFormatContext *s)
AVIOContext *pb = s->pb;
MOVMuxContext *mov = s->priv_data;
AVDictionaryEntry *t, *global_tcr = av_dict_get(s->metadata, "timecode", NULL, 0);
- int i, hint_track = 0, tmcd_track = 0;
+ int i, stream_index = -1, hint_track = 0, hint_stream_index = 0;
+ int tmcd_track = 0, tmcd_stream_index = 0;
/* Set the FRAGMENT flag if any of the fragmentation methods are
* enabled. */
@@ -3566,29 +3676,45 @@ static int mov_write_header(AVFormatContext *s)
}
}
- mov->nb_streams = s->nb_streams;
- if (mov->mode & (MODE_MOV|MODE_IPOD) && s->nb_chapters)
+ mov->next_stream_index = mov->nb_streams = s->nb_streams;
+
+ for (i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ if (mov_stream_is_apic(mov, st))
+ mov->nb_streams--;
+ }
+
+ mov->nb_input_streams = mov->nb_streams;
+
+ if (mov->mode & (MODE_MOV|MODE_IPOD) && s->nb_chapters) {
mov->chapter_track = mov->nb_streams++;
+ mov->chapter_stream_index = mov->next_stream_index++;
+ }
if (mov->flags & FF_MOV_FLAG_RTP_HINT) {
/* Add hint tracks for each audio and video stream */
hint_track = mov->nb_streams;
+ hint_stream_index = mov->next_stream_index++;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
- if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
+ if ((st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+ !mov_stream_is_apic(mov, st)) ||
st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
mov->nb_streams++;
+ mov->next_stream_index++;
}
}
}
if (mov->mode == MODE_MOV) {
tmcd_track = mov->nb_streams;
+ tmcd_stream_index = mov->next_stream_index;
/* +1 tmcd track for each video stream with a timecode */
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
- if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+ if ((st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+ !mov_stream_is_apic(mov, st)) &&
(global_tcr || av_dict_get(st->metadata, "timecode", NULL, 0)))
mov->nb_meta_tmcd++;
}
@@ -3606,6 +3732,7 @@ static int mov_write_header(AVFormatContext *s)
}
mov->nb_streams += mov->nb_meta_tmcd;
+ mov->next_stream_index += mov->nb_meta_tmcd;
}
mov->tracks = av_mallocz(mov->nb_streams*sizeof(*mov->tracks));
@@ -3614,7 +3741,15 @@ static int mov_write_header(AVFormatContext *s)
for(i=0; i<s->nb_streams; i++){
AVStream *st= s->streams[i];
- MOVTrack *track= &mov->tracks[i];
+ MOVTrack *track;
+
+ /* skip cover art streams */
+ if (mov_stream_is_apic(mov, st))
+ continue;
+
+ stream_index++;
+ track = &mov->tracks[stream_index];
+ track->stream_index = i;
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL,0);
track->enc = st->codec;
@@ -3725,17 +3860,19 @@ static int mov_write_header(AVFormatContext *s)
mov->time += 0x7C25B080; // 1970 based -> 1904 based
if (mov->chapter_track)
- mov_create_chapter_track(s, mov->chapter_track);
+ mov_create_chapter_track(s, mov->chapter_track, mov->chapter_stream_index);
if (mov->flags & FF_MOV_FLAG_RTP_HINT) {
/* Initialize the hint tracks for each audio and video stream */
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
- if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
+ if ((st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+ !mov_stream_is_apic(mov, st)) ||
st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
- if (ff_mov_init_hinting(s, hint_track, i) < 0)
+ if (ff_mov_init_hinting(s, hint_track, hint_stream_index, i) < 0)
goto error;
hint_track++;
+ hint_stream_index++;
}
}
}
@@ -3751,9 +3888,10 @@ static int mov_write_header(AVFormatContext *s)
t = av_dict_get(st->metadata, "timecode", NULL, 0);
if (!t)
continue;
- if (mov_create_timecode_track(s, tmcd_track, i, t->value) < 0)
+ if (mov_create_timecode_track(s, tmcd_track, tmcd_stream_index, i, t->value) < 0)
goto error;
tmcd_track++;
+ tmcd_stream_index++;
}
}
}
@@ -3872,6 +4010,7 @@ static int mov_write_trailer(AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
AVIOContext *pb = s->pb;
+ AVPacketList *covers = mov->covers;
int64_t moov_pos;
int res = 0;
int i;
@@ -3963,6 +4102,13 @@ static int mov_write_trailer(AVFormatContext *s)
av_freep(&mov->tracks);
+ while (covers) {
+ AVPacketList *next = covers->next;
+ av_free_packet(&covers->pkt);
+ av_freep(&covers);
+ covers = next;
+ }
+
return res;
}
diff --git a/libavformat/movenc.h b/libavformat/movenc.h
index a6571d5..56709fd 100644
--- a/libavformat/movenc.h
+++ b/libavformat/movenc.h
@@ -141,6 +141,8 @@ typedef struct MOVTrack {
int packet_entry;
int slices;
} vc1_info;
+
+ int stream_index;
} MOVTrack;
typedef struct MOVMuxContext {
@@ -172,6 +174,11 @@ typedef struct MOVMuxContext {
int use_editlist;
int video_track_timescale;
+
+ AVPacketList *covers;
+ int nb_input_streams; ///< number of input stream used
+ int next_stream_index; ///< next stream index available
+ int chapter_stream_index; ///< chapter stream index
} MOVMuxContext;
#define FF_MOV_FLAG_RTP_HINT 1
@@ -185,10 +192,11 @@ typedef struct MOVMuxContext {
int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt);
-int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index);
+int ff_mov_init_hinting(AVFormatContext *s, int index, int stream_index, int src_index);
int ff_mov_add_hinted_packet(AVFormatContext *s, AVPacket *pkt,
int track_index, int sample,
uint8_t *sample_data, int sample_size);
void ff_mov_close_hinting(MOVTrack *track);
+int ff_mov_get_track_index(MOVMuxContext *mov, int stream_index);
#endif /* AVFORMAT_MOVENC_H */
diff --git a/libavformat/movenchint.c b/libavformat/movenchint.c
index cc90f0b..86a40c2 100644
--- a/libavformat/movenchint.c
+++ b/libavformat/movenchint.c
@@ -20,22 +20,28 @@
*/
#include "movenc.h"
+#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "internal.h"
#include "rtpenc_chain.h"
#include "avio_internal.h"
#include "rtp.h"
-int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index)
+int ff_mov_init_hinting(AVFormatContext *s, int index, int stream_index, int src_index)
{
MOVMuxContext *mov = s->priv_data;
MOVTrack *track = &mov->tracks[index];
- MOVTrack *src_track = &mov->tracks[src_index];
AVStream *src_st = s->streams[src_index];
+ int src_track_index = ff_mov_get_track_index(mov, src_index);
+ MOVTrack *src_track;
int ret = AVERROR(ENOMEM);
+ av_assert0(src_track_index >= 0);
+ src_track = &mov->tracks[src_track_index];
+
track->tag = MKTAG('r','t','p',' ');
- track->src_track = src_index;
+ track->src_track = src_track_index;
+ track->stream_index = stream_index;
track->enc = avcodec_alloc_context3(NULL);
if (!track->enc)
@@ -434,7 +440,7 @@ int ff_mov_add_hinted_packet(AVFormatContext *s, AVPacket *pkt,
hint_pkt.size = size = avio_close_dyn_buf(hintbuf, &buf);
hint_pkt.data = buf;
hint_pkt.pts = hint_pkt.dts;
- hint_pkt.stream_index = track_index;
+ hint_pkt.stream_index = trk->stream_index;
if (pkt->flags & AV_PKT_FLAG_KEY)
hint_pkt.flags |= AV_PKT_FLAG_KEY;
if (count > 0)
--
1.8.3.2
More information about the ffmpeg-devel
mailing list