[FFmpeg-devel] [PATCH] avcodec: Implement DM PAR Muxer/Demuxer

Tom Needham 06needhamt at gmail.com
Fri Jul 5 23:44:57 EEST 2019


Samples are Available from

https://transfernow.net/131xk9g4u0jt

Signed-off-by: Thomas Needham <06needhamt at gmail.com>
---
 Changelog                     |    1 +
 configure                     |    7 +
 libavformat/Makefile          |   10 +
 libavformat/adaudio.c         |  137 ++++
 libavformat/adbinary.c        |  609 ++++++++++++++++
 libavformat/adcommon.c        | 1106 +++++++++++++++++++++++++++++
 libavformat/adffmpeg_errors.h |   94 +++
 libavformat/adjfif.c          |  551 ++++++++++++++
 libavformat/adjfif.h          |   41 ++
 libavformat/admime.c          |  822 +++++++++++++++++++++
 libavformat/adpic.h           |  116 +++
 libavformat/adraw.c           |  131 ++++
 libavformat/allformats.c      |    7 +
 libavformat/ds.c              | 1262 +++++++++++++++++++++++++++++++++
 libavformat/ds.h              |  135 ++++
 libavformat/ds_exports.h      |  173 +++++
 libavformat/dsenc.c           |  488 +++++++++++++
 libavformat/dsenc.h           |   35 +
 libavformat/dspic.c           |  317 +++++++++
 libavformat/libpar.c          | 1030 +++++++++++++++++++++++++++
 libavformat/libpar.h          |   40 ++
 libavformat/netvu.c           |  214 ++++++
 libavformat/netvu.h           |   21 +
 libavformat/version.h         |    4 +-
 24 files changed, 7349 insertions(+), 2 deletions(-)
 create mode 100644 libavformat/adaudio.c
 create mode 100644 libavformat/adbinary.c
 create mode 100644 libavformat/adcommon.c
 create mode 100644 libavformat/adffmpeg_errors.h
 create mode 100644 libavformat/adjfif.c
 create mode 100644 libavformat/adjfif.h
 create mode 100644 libavformat/admime.c
 create mode 100644 libavformat/adpic.h
 create mode 100644 libavformat/adraw.c
 create mode 100644 libavformat/ds.c
 create mode 100644 libavformat/ds.h
 create mode 100644 libavformat/ds_exports.h
 create mode 100644 libavformat/dsenc.c
 create mode 100644 libavformat/dsenc.h
 create mode 100644 libavformat/dspic.c
 create mode 100644 libavformat/libpar.c
 create mode 100644 libavformat/libpar.h
 create mode 100644 libavformat/netvu.c
 create mode 100644 libavformat/netvu.h

diff --git a/Changelog b/Changelog
index 86167b76a1..41d12c092e 100644
--- a/Changelog
+++ b/Changelog
@@ -35,6 +35,7 @@ version <next>:
 - IFV demuxer
 - derain filter
 - deesser filter
+- AD Holdings PAR Muxer and Demuxer


 version 4.1:
diff --git a/configure b/configure
index 7cea9d4d73..39c4356c00 100755
--- a/configure
+++ b/configure
@@ -317,6 +317,7 @@ External library support:
   --enable-vapoursynth     enable VapourSynth demuxer [no]
   --disable-xlib           disable xlib [autodetect]
   --disable-zlib           disable zlib [autodetect]
+  --enable-libparreader    enable PAR (de)muxing via libparreader [no]

   The following libraries provide various hardware acceleration features:
   --disable-amf            disable AMF video encoding code [autodetect]
@@ -1720,6 +1721,7 @@ EXTERNAL_LIBRARY_NONFREE_LIST="
     libfdk_aac
     openssl
     libtls
+    libparreader
 "

 EXTERNAL_LIBRARY_VERSION3_LIST="
@@ -2768,6 +2770,8 @@ on2avc_decoder_select="mdct"
 opus_decoder_deps="swresample"
 opus_decoder_select="mdct15"
 opus_encoder_select="audio_frame_queue mdct15"
+libparreader_demuxer_deps="libparreader"
+libparreader_muxer_deps="libparreader"
 png_decoder_deps="zlib"
 png_encoder_deps="zlib"
 png_encoder_select="llvidencdsp"
@@ -6136,7 +6140,10 @@ for func in $COMPLEX_FUNCS; do
 done

 # these are off by default, so fail if requested and not available
+
 enabled cuda_nvcc         && { check_nvcc || die "ERROR: failed checking
for nvcc."; }
+enabled libparreader  && require libparreader "parreader.h
parreader_types.h" parReader_getPicStructSize -lparreader
+enabled cuda_sdk          && require cuda_sdk cuda.h cuCtxCreate -lcuda
 enabled chromaprint       && require chromaprint chromaprint.h
chromaprint_get_version -lchromaprint
 enabled decklink          && { require_headers DeckLinkAPI.h &&
                                { test_cpp_condition DeckLinkAPIVersion.h
"BLACKMAGIC_DECKLINK_API_VERSION >= 0x0a090500" || die "ERROR: Decklink API
version must be >= 10.9.5."; } }
diff --git a/libavformat/Makefile b/libavformat/Makefile
index a434b005a4..363159009d 100644
--- a/libavformat/Makefile
+++ b/libavformat/Makefile
@@ -569,11 +569,21 @@ OBJS-$(CONFIG_YUV4MPEGPIPE_DEMUXER)      +=
yuv4mpegdec.o
 OBJS-$(CONFIG_YUV4MPEGPIPE_MUXER)        += yuv4mpegenc.o

 # external library muxers/demuxers
+OBJS-$(CONFIG_ADAUDIO_DEMUXER)           += adaudio.o
+OBJS-$(CONFIG_ADBINARY_DEMUXER)          += adbinary.o adcommon.o adjfif.o
+OBJS-$(CONFIG_ADMIME_DEMUXER)            += admime.o adcommon.o adjfif.o
+OBJS-$(CONFIG_ADRAW_DEMUXER)             += adraw.o adcommon.o adjfif.o
 OBJS-$(CONFIG_AVISYNTH_DEMUXER)          += avisynth.o
 OBJS-$(CONFIG_CHROMAPRINT_MUXER)         += chromaprint.o
+OBJS-$(CONFIG_DM_PROTOCOL)               += dsenc.o ds.o
+OBJS-$(CONFIG_DSPIC_DEMUXER)             += ds.o dspic.o adcommon.o
 OBJS-$(CONFIG_LIBGME_DEMUXER)            += libgme.o
 OBJS-$(CONFIG_LIBMODPLUG_DEMUXER)        += libmodplug.o
 OBJS-$(CONFIG_LIBOPENMPT_DEMUXER)        += libopenmpt.o
+OBJS-$(CONFIG_LIBPARREADER_DEMUXER)      += libpar.o adcommon.o
+OBJS-$(CONFIG_LIBPARREADER_MUXER)     += libpar.o adcommon.o
+OBJS-$(CONFIG_LIBMODPLUG_DEMUXER)        += libmodplug.o
+OBJS-$(CONFIG_NETVU_PROTOCOL)            += netvu.o
 OBJS-$(CONFIG_VAPOURSYNTH_DEMUXER)       += vapoursynth.o

 # protocols I/O
diff --git a/libavformat/adaudio.c b/libavformat/adaudio.c
new file mode 100644
index 0000000000..0c28497953
--- /dev/null
+++ b/libavformat/adaudio.c
@@ -0,0 +1,137 @@
+/*
+ * AD-Holdings demuxer for AD audio stream format
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+/**
+ * @file
+ * AD-Holdings demuxer for AD audio stream format
+ */
+
+#include "avformat.h"
+#include "ds_exports.h"
+#include "adpic.h"
+
+
+#define SIZEOF_RTP_HEADER       12
+
+
+static int adaudio_probe(AVProbeData *p);
+static int adaudio_read_header(AVFormatContext *s);
+static int adaudio_read_packet(struct AVFormatContext *s, AVPacket *pkt);
+
+
+static int adaudio_probe(AVProbeData *p)
+{
+    if( p->buf_size < 4 )
+        return 0;
+
+    /* Check the value of the first byte and the payload byte */
+    if(
+        p->buf[0] == 0x80 &&
+        (
+            p->buf[1] == RTP_PAYLOAD_TYPE_8000HZ_ADPCM ||
+            p->buf[1] == RTP_PAYLOAD_TYPE_11025HZ_ADPCM ||
+            p->buf[1] == RTP_PAYLOAD_TYPE_16000HZ_ADPCM ||
+            p->buf[1] == RTP_PAYLOAD_TYPE_22050HZ_ADPCM
+        )
+    ) {
+        return AVPROBE_SCORE_MAX;
+    }
+
+    return 0;
+}
+
+static int adaudio_read_header(AVFormatContext *s)
+{
+    s->ctx_flags |= AVFMTCTX_NOHEADER;
+
+    return 0;
+}
+
+static int adaudio_read_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+    AVIOContext *         ioContext = s->pb;
+    int                   retVal = AVERROR(EIO);
+    int                   packetSize = 0;
+    int                   sampleSize = 0;
+    AVStream *            st = NULL;
+    int                   isPacketAlloced = 0;
+#ifdef AD_SIDEDATA_IN_PRIV
+    struct ADFrameData *    frameData = NULL;
+#endif
+
+    /* Get the next packet */
+    if( (packetSize = ioContext->read_packet( ioContext->opaque,
ioContext->buf_ptr, ioContext->buffer_size )) > 0 ) {
+        /* Validate the 12 byte RTP header as best we can */
+        if( ioContext->buf_ptr[1] == RTP_PAYLOAD_TYPE_8000HZ_ADPCM ||
+            ioContext->buf_ptr[1] == RTP_PAYLOAD_TYPE_11025HZ_ADPCM ||
+            ioContext->buf_ptr[1] == RTP_PAYLOAD_TYPE_16000HZ_ADPCM ||
+            ioContext->buf_ptr[1] == RTP_PAYLOAD_TYPE_22050HZ_ADPCM
+          ) {
+            /* Calculate the size of the sample data */
+            sampleSize = packetSize - SIZEOF_RTP_HEADER;
+
+            /* Create a new AVPacket */
+            if( av_new_packet( pkt, sampleSize ) >= 0 ) {
+                isPacketAlloced = 1;
+
+                /* Copy data into packet */
+                audiodata_network2host(pkt->data,
&ioContext->buf_ptr[SIZEOF_RTP_HEADER], sampleSize);
+
+                /* Configure stream info */
+                if( (st = ad_get_audio_stream(s, NULL)) != NULL ) {
+                    pkt->stream_index = st->index;
+                    pkt->duration =  ((int)(AV_TIME_BASE * 1.0));
+
+#ifdef AD_SIDEDATA_IN_PRIV
+                    if( (frameData = av_malloc(sizeof(*frameData))) !=
NULL )  {
+                        /* Set the frame info up */
+                        frameData->frameType = RTPAudio;
+                        frameData->frameData =
(void*)(&ioContext->buf_ptr[1]);
+                        frameData->additionalData = NULL;
+
+                        pkt->priv = (void*)frameData;
+                        retVal = 0;
+                    }
+                    else
+                        retVal = AVERROR(ENOMEM);
+#endif
+                }
+            }
+        }
+    }
+
+    /* Check whether we need to release the packet data we allocated */
+    if( retVal < 0 && isPacketAlloced != 0 ) {
+        av_free_packet( pkt );
+    }
+
+    return retVal;
+}
+
+
+AVInputFormat ff_adaudio_demuxer = {
+    .name           = "adaudio",
+    .long_name      = NULL_IF_CONFIG_SMALL("AD-Holdings audio format"),
+    .read_probe     = adaudio_probe,
+    .read_header    = adaudio_read_header,
+    .read_packet    = adaudio_read_packet,
+};
diff --git a/libavformat/adbinary.c b/libavformat/adbinary.c
new file mode 100644
index 0000000000..5808d41c8f
--- /dev/null
+++ b/libavformat/adbinary.c
@@ -0,0 +1,609 @@
+/*
+ * AD-Holdings demuxer for AD stream format (binary)
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+/**
+ * @file
+ * AD-Holdings demuxer for AD stream format (binary)
+ */
+
+#include <strings.h>
+
+#include "avformat.h"
+#include "libavutil/avstring.h"
+#include "libavutil/intreadwrite.h"
+
+#include "adpic.h"
+#include "adffmpeg_errors.h"
+
+
+enum pkt_offsets { PKT_DATATYPE,
+                   PKT_DATACHANNEL,
+                   PKT_SIZE_BYTE_0,
+                   PKT_SIZE_BYTE_1,
+                   PKT_SIZE_BYTE_2,
+                   PKT_SIZE_BYTE_3,
+                   PKT_SEPARATOR_SIZE
+                 };
+
+typedef struct  {    // PRC 002
+    uint32_t t;
+    uint16_t ms;
+    uint16_t mode;
+} MinimalAudioHeader;
+
+
+static void audioheader_network2host(struct NetVuAudioData *dst, const
uint8_t *src)
+{
+    dst->version              = AV_RB32(src);
+    dst->mode                 = AV_RB32(src + 4);
+    dst->channel              = AV_RB32(src + 8);
+    dst->sizeOfAdditionalData = AV_RB32(src + 12);
+    dst->sizeOfAudioData      = AV_RB32(src + 16);
+    dst->seconds              = AV_RB32(src + 20);
+    dst->msecs                = AV_RB32(src + 24);
+    if ((void*)dst != (const void*)src) // Copy additionalData pointer if
needed
+        memcpy(&dst->additionalData, src + 28, sizeof(unsigned char *));
+}
+
+/**
+ * MPEG4 or H264 video frame with a Netvu header
+ */
+static int adbinary_mpeg(AVFormatContext *s,
+                         AVPacket *pkt,
+                         struct NetVuImageData *vidDat,
+                         char **txtDat)
+{
+    static const int hdrSize = NetVuImageDataHeaderSize;
+    AVIOContext *pb = s->pb;
+    int textSize = 0;
+    int n, status, errorVal = 0;
+
+    n = avio_read(pb, (uint8_t *)vidDat, hdrSize);
+    if (n < hdrSize) {
+        av_log(s, AV_LOG_ERROR, "%s: short of data reading header, "
+                                "expected %d, read %d\n",
+               __func__, hdrSize, n);
+        return ADFFMPEG_AD_ERROR_MPEG4_PIC_BODY;
+    }
+    ad_network2host(vidDat, (uint8_t *)vidDat);
+    if (!pic_version_valid(vidDat->version)) {
+        av_log(s, AV_LOG_ERROR, "%s: invalid pic version 0x%08X\n",
__func__,
+               vidDat->version);
+        return ADFFMPEG_AD_ERROR_MPEG4_PIC_VERSION_VALID;
+    }
+
+    // Get the additional text block
+    textSize = vidDat->start_offset;
+    *txtDat = av_malloc(textSize + 1);
+
+    if (*txtDat == NULL)  {
+        av_log(s, AV_LOG_ERROR, "%s: Failed to allocate memory for
text\n", __func__);
+        return AVERROR(ENOMEM);
+    }
+
+    // Copy the additional text block
+    n = avio_get_str(pb, textSize, *txtDat, textSize+1);
+    if (n < textSize)
+        avio_skip(pb, textSize - n);
+
+    status = av_get_packet(pb, pkt, vidDat->size);
+    if (status < 0)  {
+        av_log(s, AV_LOG_ERROR, "%s: av_get_packet (size %d) failed,
status %d\n",
+               __func__, vidDat->size, status);
+        return ADFFMPEG_AD_ERROR_MPEG4_NEW_PACKET;
+    }
+
+    if ( (vidDat->vid_format == PIC_MODE_MPEG4_411_I) ||
(vidDat->vid_format == PIC_MODE_MPEG4_411_GOV_I) )
+        pkt->flags |= AV_PKT_FLAG_KEY;
+
+    return errorVal;
+}
+
+/**
+ * MPEG4 or H264 video frame with a minimal header
+ */
+static int adbinary_mpeg_minimal(AVFormatContext *s,
+                                AVPacket *pkt, int size, int channel,
+                                struct NetVuImageData *vidDat, char
**text_data,
+                                int adDataType)
+{
+    static const int titleLen  = sizeof(vidDat->title) /
sizeof(vidDat->title[0]);
+    AdContext*       adContext = s->priv_data;
+    AVIOContext *    pb        = s->pb;
+    int              dataSize  = size - (4 + 2);
+    int              errorVal  = 0;
+
+    // Get the minimal video header and copy into generic video data
structure
+    memset(vidDat, 0, sizeof(struct NetVuImageData));
+    vidDat->session_time  = avio_rb32(pb);
+    vidDat->milliseconds  = avio_rb16(pb);
+
+    if ( pb->error || (vidDat->session_time == 0) )  {
+        av_log(s, AV_LOG_ERROR, "%s: Reading header, errorcode %d\n",
+               __func__, pb->error);
+        return ADFFMPEG_AD_ERROR_MPEG4_MINIMAL_GET_BUFFER;
+    }
+    vidDat->version = PIC_VERSION;
+    vidDat->cam = channel + 1;
+    vidDat->utc_offset = adContext->utc_offset;
+    snprintf(vidDat->title, titleLen, "Camera %d", vidDat->cam);
+
+    // Now get the main frame data into a new packet
+    errorVal = av_get_packet(pb, pkt, dataSize);
+    if( errorVal < 0 )  {
+        av_log(s, AV_LOG_ERROR, "%s: av_get_packet (size %d) failed,
status %d\n",
+               __func__, dataSize, errorVal);
+        return ADFFMPEG_AD_ERROR_MPEG4_MINIMAL_NEW_PACKET;
+    }
+
+    if (adContext->streamDatatype == 0)  {
+        //if (adDataType == AD_DATATYPE_MININAL_H264)
+        //    adContext->streamDatatype = PIC_MODE_H264I;
+        //else
+        adContext->streamDatatype = mpegOrH264(AV_RB32(pkt->data));
+    }
+    vidDat->vid_format = adContext->streamDatatype;
+
+    return errorVal;
+}
+
+/**
+ * Audio frame with a Netvu header
+ */
+static int ad_read_audio(AVFormatContext *s,
+                         AVPacket *pkt, int size,
+                         struct NetVuAudioData *data,
+                         enum AVCodecID codec_id)
+{
+    AVIOContext *pb = s->pb;
+    int status;
+
+    // Get the fixed size portion of the audio header
+    size = NetVuAudioDataHeaderSize - sizeof(unsigned char *);
+    if (avio_read( pb, (uint8_t*)data, size) != size)
+        return ADFFMPEG_AD_ERROR_AUDIO_ADPCM_GET_BUFFER;
+
+    // endian fix it...
+    audioheader_network2host(data, (uint8_t*)data);
+
+    // Now get the additional bytes
+    if( data->sizeOfAdditionalData > 0 ) {
+        data->additionalData = av_malloc( data->sizeOfAdditionalData );
+        if( data->additionalData == NULL )
+            return AVERROR(ENOMEM);
+
+        if (avio_read( pb, data->additionalData,
data->sizeOfAdditionalData) != data->sizeOfAdditionalData)
+            return ADFFMPEG_AD_ERROR_AUDIO_ADPCM_GET_BUFFER2;
+    }
+    else
+        data->additionalData = NULL;
+
+    status = av_get_packet(pb, pkt, data->sizeOfAudioData);
+    if (status  < 0)  {
+        av_log(s, AV_LOG_ERROR, "%s: av_get_packet (size %d) failed,
status %d\n",
+               __func__, data->sizeOfAudioData, status);
+        return ADFFMPEG_AD_ERROR_AUDIO_ADPCM_MIME_NEW_PACKET;
+    }
+
+    if (codec_id == AV_CODEC_ID_ADPCM_IMA_WAV)
+        audiodata_network2host(pkt->data, pkt->data,
data->sizeOfAudioData);
+
+    return status;
+}
+
+/**
+ * Audio frame with a minimal header
+ */
+static int adbinary_audio_minimal(AVFormatContext *s,
+                                  AVPacket *pkt, int size,
+                                  struct NetVuAudioData *data)
+{
+    AVIOContext *pb = s->pb;
+    int dataSize = size - (4 + 2 + 2);
+    int status;
+
+    // Get the minimal audio header and copy into generic audio data
structure
+    memset(data, 0, sizeof(struct NetVuAudioData));
+    data->seconds = avio_rb32(pb);
+    data->msecs   = avio_rb16(pb);
+    data->mode    = avio_rb16(pb);
+    if ( pb->error || (data->seconds == 0) )  {
+        av_log(s, AV_LOG_ERROR, "%s: Reading header, errorcode %d\n",
+               __func__, pb->error);
+        return ADFFMPEG_AD_ERROR_MINIMAL_AUDIO_ADPCM_GET_BUFFER;
+    }
+
+    // Now get the main frame data into a new packet
+    status = av_get_packet(pb, pkt, dataSize);
+    if (status < 0)  {
+        av_log(s, AV_LOG_ERROR, "%s: av_get_packet (size %d) failed,
status %d\n",
+               __func__, data->sizeOfAudioData, status);
+        return ADFFMPEG_AD_ERROR_MINIMAL_AUDIO_ADPCM_NEW_PACKET;
+    }
+
+    audiodata_network2host(pkt->data, pkt->data, dataSize);
+
+    return status;
+}
+
+
+
+/**
+ * Identify if the stream as an AD binary stream
+ */
+static int adbinary_probe(AVProbeData *p)
+{
+    int score = 0;
+    unsigned char *dataPtr;
+    uint32_t dataSize;
+    int bufferSize = p->buf_size;
+    uint8_t *bufPtr = p->buf;
+
+    // Netvu protocol can only send adbinary or admime
+    if ( (p->filename) && (av_stristart(p->filename, "netvu://", NULL) ==
1))
+        score += AVPROBE_SCORE_MAX / 4;
+
+    while ((bufferSize >= PKT_SEPARATOR_SIZE) && (score <
AVPROBE_SCORE_MAX))  {
+        dataSize =  (bufPtr[PKT_SIZE_BYTE_0] << 24) +
+                    (bufPtr[PKT_SIZE_BYTE_1] << 16) +
+                    (bufPtr[PKT_SIZE_BYTE_2] << 8 ) +
+                    (bufPtr[PKT_SIZE_BYTE_3]);
+
+        // Sanity check on dataSize
+        if ((dataSize < 6) || (dataSize > 0x1000000))
+            return 0;
+
+        // Maximum of 32 cameras can be connected to a system
+        if (bufPtr[PKT_DATACHANNEL] > 32)
+            return 0;
+
+        dataPtr = &bufPtr[PKT_SEPARATOR_SIZE];
+        bufferSize -= PKT_SEPARATOR_SIZE;
+
+        switch (bufPtr[PKT_DATATYPE])  {
+            case AD_DATATYPE_JPEG:
+            case AD_DATATYPE_MPEG4I:
+            case AD_DATATYPE_MPEG4P:
+            case AD_DATATYPE_H264I:
+            case AD_DATATYPE_H264P:
+                if (bufferSize >= NetVuImageDataHeaderSize) {
+                    struct NetVuImageData test;
+                    ad_network2host(&test, dataPtr);
+                    if (pic_version_valid(test.version))  {
+                        av_log(NULL, AV_LOG_DEBUG, "%s: Detected video
packet\n", __func__);
+                        score += AVPROBE_SCORE_MAX;
+                    }
+                }
+                break;
+            case AD_DATATYPE_JFIF:
+                if (bufferSize >= 2)  {
+                    if ( (*dataPtr == 0xFF) && (*(dataPtr + 1) == 0xD8) )
 {
+                        av_log(NULL, AV_LOG_DEBUG, "%s: Detected JFIF
packet\n", __func__);
+                        score += AVPROBE_SCORE_MAX;
+                    }
+                }
+                break;
+            case AD_DATATYPE_AUDIO_ADPCM:
+                if (bufferSize >= NetVuAudioDataHeaderSize)  {
+                    struct NetVuAudioData test;
+                    audioheader_network2host(&test, dataPtr);
+                    if (test.version == AUD_VERSION)  {
+                        av_log(NULL, AV_LOG_DEBUG, "%s: Detected audio
packet\n", __func__);
+                        score += AVPROBE_SCORE_MAX;
+                    }
+                }
+                break;
+            case AD_DATATYPE_AUDIO_RAW:
+                // We don't handle this format
+                av_log(NULL, AV_LOG_DEBUG, "%s: Detected raw audio packet
(unsupported)\n", __func__);
+                break;
+            case AD_DATATYPE_MINIMAL_MPEG4:
+                if (bufferSize >= 10)  {
+                    uint32_t sec  = AV_RB32(dataPtr);
+                    uint32_t vos  = AV_RB32(dataPtr + 6);
+
+                    if (mpegOrH264(vos) == PIC_MODE_MPEG4_411)  {
+                        // Check for MPEG4 start code in data along with a
timestamp
+                        // of 1980 or later.  Crappy test, but there isn't
much data
+                        // to go on.  Should be able to use milliseconds
<= 1000 but
+                        // servers often send larger values than this,
+                        // nonsensical as that is
+                        if (sec > 315532800)  {
+                            if ((vos >= 0x1B0) && (vos <= 0x1B6)) {
+                                av_log(NULL, AV_LOG_DEBUG, "%s: Detected
minimal MPEG4 packet %u\n", __func__, dataSize);
+                                score += AVPROBE_SCORE_MAX / 4;
+                            }
+                        }
+                        break;
+                    }
+                }
+                // Servers can send h264 identified as MPEG4, so fall
through
+                // to next case
+            //case(AD_DATATYPE_MINIMAL_H264):
+                if (bufferSize >= 10)  {
+                    uint32_t sec  = AV_RB32(dataPtr);
+                    uint32_t vos  = AV_RB32(dataPtr + 6);
+                    // Check for h264 start code in data along with a
timestamp
+                    // of 1980 or later.  Crappy test, but there isn't
much data
+                    // to go on.  Should be able to use milliseconds <=
1000 but
+                    // servers often send larger values than this,
+                    // nonsensical as that is
+                    if (sec > 315532800)  {
+                        if (vos == 0x01) {
+                            av_log(NULL, AV_LOG_DEBUG, "%s: Detected
minimal h264 packet %u\n", __func__, dataSize);
+                            score += AVPROBE_SCORE_MAX / 4;
+                        }
+                    }
+                }
+                break;
+            case AD_DATATYPE_MINIMAL_AUDIO_ADPCM:
+                if (bufferSize >= 8)  {
+                    MinimalAudioHeader test;
+                    test.t     = AV_RB32(dataPtr);
+                    test.ms    = AV_RB16(dataPtr + 4);
+                    test.mode  = AV_RB16(dataPtr + 6);
+
+                    switch(test.mode)  {
+                        case RTP_PAYLOAD_TYPE_8000HZ_ADPCM:
+                        case RTP_PAYLOAD_TYPE_11025HZ_ADPCM:
+                        case RTP_PAYLOAD_TYPE_16000HZ_ADPCM:
+                        case RTP_PAYLOAD_TYPE_22050HZ_ADPCM:
+                        case RTP_PAYLOAD_TYPE_32000HZ_ADPCM:
+                        case RTP_PAYLOAD_TYPE_44100HZ_ADPCM:
+                        case RTP_PAYLOAD_TYPE_48000HZ_ADPCM:
+                        case RTP_PAYLOAD_TYPE_8000HZ_PCM:
+                        case RTP_PAYLOAD_TYPE_11025HZ_PCM:
+                        case RTP_PAYLOAD_TYPE_16000HZ_PCM:
+                        case RTP_PAYLOAD_TYPE_22050HZ_PCM:
+                        case RTP_PAYLOAD_TYPE_32000HZ_PCM:
+                        case RTP_PAYLOAD_TYPE_44100HZ_PCM:
+                        case RTP_PAYLOAD_TYPE_48000HZ_PCM:
+                            av_log(NULL, AV_LOG_DEBUG, "%s: Detected
minimal audio packet\n", __func__);
+                            score += AVPROBE_SCORE_MAX / 4;
+                    }
+                }
+                break;
+            case AD_DATATYPE_LAYOUT:
+                av_log(NULL, AV_LOG_DEBUG, "%s: Detected layout packet\n",
__func__);
+                break;
+            case AD_DATATYPE_INFO:
+                if ( (bufferSize >= 1) && (dataPtr[0] == 0) || (dataPtr[0]
== 1) )  {
+                    av_log(NULL, AV_LOG_DEBUG, "%s: Detected info packet
(%d)\n", __func__, dataPtr[0]);
+                    if ((bufferSize >= 5) && (strncmp(&dataPtr[1], "SITE",
4) == 0))
+                        score += AVPROBE_SCORE_MAX;
+                    else if ((bufferSize >= 15) && (strncmp(&dataPtr[1],
"(JPEG)TARGSIZE", 14) == 0))
+                        score += AVPROBE_SCORE_MAX;
+                    else
+                        score += 5;
+                }
+                break;
+            case AD_DATATYPE_XML_INFO:
+                if (bufferSize >= dataSize)  {
+                    const char *infoString = "<infoList>";
+                    int infoStringLen = strlen(infoString);
+                    if ( (infoStringLen <= dataSize) &&
(av_strncasecmp(dataPtr, infoString, infoStringLen) == 0) )  {
+                        av_log(NULL, AV_LOG_DEBUG, "%s: Detected xml info
packet\n", __func__);
+                        score += AVPROBE_SCORE_MAX;
+                    }
+                }
+                break;
+            case AD_DATATYPE_BMP:
+                av_log(NULL, AV_LOG_DEBUG, "%s: Detected bmp packet\n",
__func__);
+                break;
+            case AD_DATATYPE_PBM:
+                if (bufferSize >= 3)  {
+                    if ((dataPtr[0] == 'P') && (dataPtr[1] >= '1') &&
(dataPtr[1] <= '6'))  {
+                        if (dataPtr[2] == 0x0A)  {
+                            score += AVPROBE_SCORE_MAX;
+                            av_log(NULL, AV_LOG_DEBUG, "%s: Detected pbm
packet\n", __func__);
+                        }
+                    }
+                }
+                break;
+            case AD_DATATYPE_SVARS_INFO:
+                if ( (bufferSize >= 1) && (dataPtr[0] == 0) || (dataPtr[0]
== 1) )  {
+                    av_log(NULL, AV_LOG_DEBUG, "%s: Detected svars info
packet (%d)\n", __func__, dataPtr[0]);
+                    score += 5;
+                }
+                break;
+            default:
+                av_log(NULL, AV_LOG_DEBUG, "%s: Detected unknown packet
type\n", __func__);
+                break;
+        }
+
+        if (dataSize <= bufferSize)  {
+            bufferSize -= dataSize;
+            bufPtr = dataPtr + dataSize;
+        }
+        else  {
+            bufferSize = 0;
+            bufPtr = p->buf;
+        }
+    }
+
+    if (score > AVPROBE_SCORE_MAX)
+        score = AVPROBE_SCORE_MAX;
+
+    av_log(NULL, AV_LOG_DEBUG, "%s: Score %d\n", __func__, score);
+
+    return score;
+}
+
+static int adbinary_read_header(AVFormatContext *s)
+{
+    AdContext *adContext = s->priv_data;
+    return ad_read_header(s, &adContext->utc_offset);
+}
+
+static int adbinary_read_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+    AVIOContext *       pb        = s->pb;
+    void *              payload   = NULL;
+    char *              txtDat    = NULL;
+    int                 errorVal  = -1;
+    unsigned char *     tempbuf   = NULL;
+    enum AVMediaType    mediaType = AVMEDIA_TYPE_UNKNOWN;
+    enum AVCodecID      codecId   = AV_CODEC_ID_NONE;
+    int                 data_type, data_channel;
+    unsigned int        size;
+    uint8_t             temp[6];
+
+    // First read the 6 byte separator
+    if (avio_read(pb, temp, 6) >= 6)  {
+        data_type    = temp[0];
+        data_channel = temp[1];
+        size         = AV_RB32(temp + 2);
+        if (data_type >= AD_DATATYPE_MAX)  {
+            av_log(s, AV_LOG_WARNING, "%s: No handler for data_type = %d",
__func__, data_type);
+            return ADFFMPEG_AD_ERROR_READ_6_BYTE_SEPARATOR;
+        }
+        //if (data_channel >= 32)  {
+        //    av_log(s, AV_LOG_WARNING, "%s: Channel number %d too high",
__func__, data_channel);
+        //    return ADFFMPEG_AD_ERROR_READ_6_BYTE_SEPARATOR;
+        //}
+        if (size >= 0x1000000)  {
+            av_log(s, AV_LOG_WARNING, "%s: Packet too large, %d bytes",
__func__, size);
+            return ADFFMPEG_AD_ERROR_READ_6_BYTE_SEPARATOR;
+        }
+    }
+    else
+        return ADFFMPEG_AD_ERROR_READ_6_BYTE_SEPARATOR;
+
+    if (size == 0)  {
+        if(pb->eof_reached)
+            errorVal = AVERROR_EOF;
+        else {
+            av_log(s, AV_LOG_ERROR, "%s: Reading separator, error code
%d\n",
+                   __func__, pb->error);
+            errorVal = ADFFMPEG_AD_ERROR_READ_6_BYTE_SEPARATOR;
+        }
+        return errorVal;
+    }
+
+    // Prepare for video or audio read
+    errorVal = initADData(data_type, &mediaType, &codecId, &payload);
+    if (errorVal >= 0)  {
+        // Proceed based on the type of data in this frame
+        switch(data_type) {
+            case AD_DATATYPE_JPEG:
+                errorVal = ad_read_jpeg(s, pkt, payload, &txtDat);
+                break;
+            case AD_DATATYPE_JFIF:
+                errorVal = ad_read_jfif(s, pkt, 0, size, payload, &txtDat);
+                break;
+            case AD_DATATYPE_MPEG4I:
+            case AD_DATATYPE_MPEG4P:
+            case AD_DATATYPE_H264I:
+            case AD_DATATYPE_H264P:
+                errorVal = adbinary_mpeg(s, pkt, payload, &txtDat);
+                break;
+            case AD_DATATYPE_MINIMAL_MPEG4:
+            //case(AD_DATATYPE_MINIMAL_H264):
+                errorVal = adbinary_mpeg_minimal(s, pkt, size,
data_channel,
+                                                 payload, &txtDat,
data_type);
+                break;
+            case AD_DATATYPE_MINIMAL_AUDIO_ADPCM:
+                errorVal = adbinary_audio_minimal(s, pkt, size, payload);
+                break;
+            case AD_DATATYPE_AUDIO_ADPCM:
+                errorVal = ad_read_audio(s, pkt, size, payload,
AV_CODEC_ID_ADPCM_IMA_WAV);
+                break;
+            case AD_DATATYPE_INFO:
+            case AD_DATATYPE_XML_INFO:
+            case AD_DATATYPE_SVARS_INFO:
+                // May want to handle INFO, XML_INFO and SVARS_INFO
separately in future
+                errorVal = ad_read_info(s, pkt, size);
+                break;
+            case AD_DATATYPE_LAYOUT:
+                errorVal = ad_read_layout(s, pkt, size);
+                break;
+            case AD_DATATYPE_BMP:
+                // av_dlog(s, "Bitmap overlay\n");
+                tempbuf = av_malloc(size);
+                if (tempbuf)  {
+                    avio_read(pb, tempbuf, size);
+                    av_free(tempbuf);
+                }
+                else
+                    return AVERROR(ENOMEM);
+                return ADFFMPEG_AD_ERROR_DEFAULT;
+            case AD_DATATYPE_PBM:
+                errorVal = ad_read_overlay(s, pkt, data_channel, size,
&txtDat);
+                break;
+            default:
+                av_log(s, AV_LOG_WARNING, "%s: No handler for data_type =
%d  "
+                       "Surrounding bytes = %02x%02x%08x\n",
+                       __func__, data_type, data_type, data_channel, size);
+
+                // Would like to use avio_skip, but that needs seek
support,
+                // so just read the data into a buffer then throw it away
+                tempbuf = av_malloc(size);
+                avio_read(pb, tempbuf, size);
+                av_free(tempbuf);
+
+                return ADFFMPEG_AD_ERROR_DEFAULT;
+        }
+    }
+
+    if (errorVal >= 0)  {
+        errorVal = ad_read_packet(s, pkt, data_channel, mediaType,
codecId, payload, txtDat);
+    }
+    else  {
+        av_log(s, AV_LOG_ERROR, "%s: Error %d creating packet\n",
__func__, errorVal);
+
+#ifdef AD_SIDEDATA_IN_PRIV
+        // If there was an error, release any memory that has been
allocated
+        if (payload != NULL)
+            av_freep(&payload);
+
+        if( txtDat != NULL )
+            av_freep(&txtDat);
+#endif
+    }
+
+#ifndef AD_SIDEDATA_IN_PRIV
+    if (payload != NULL)
+        av_freep(&payload);
+
+    if( txtDat != NULL )
+        av_freep(&txtDat);
+#endif
+
+    return errorVal;
+}
+
+static int adbinary_read_close(AVFormatContext *s)
+{
+    return 0;
+}
+
+
+AVInputFormat ff_adbinary_demuxer = {
+    .name           = "adbinary",
+    .long_name      = NULL_IF_CONFIG_SMALL("AD-Holdings video format
(binary)"),
+    .priv_data_size = sizeof(AdContext),
+    .read_probe     = adbinary_probe,
+    .read_header    = adbinary_read_header,
+    .read_packet    = adbinary_read_packet,
+    .read_close     = adbinary_read_close,
+    .flags          = AVFMT_TS_DISCONT | AVFMT_VARIABLE_FPS |
AVFMT_NO_BYTE_SEEK,
+};
diff --git a/libavformat/adcommon.c b/libavformat/adcommon.c
new file mode 100644
index 0000000000..76a97d4c56
--- /dev/null
+++ b/libavformat/adcommon.c
@@ -0,0 +1,1106 @@
+/*
+ * AD-Holdings common functions for demuxers
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#include <strings.h>
+
+#include "internal.h"
+#include "url.h"
+#include "libavutil/avstring.h"
+#include "libavutil/intreadwrite.h"
+
+#include "adffmpeg_errors.h"
+#include "adpic.h"
+#include "adjfif.h"
+#include "netvu.h"
+
+
+static const AVRational MilliTB = {1, 1000};
+
+
+int ad_read_header(AVFormatContext *s, int *utcOffset)
+{
+    AVIOContext*    pb          = s->pb;
+    URLContext*     urlContext  = pb->opaque;
+    NetvuContext*   nv          = NULL;
+
+    if (urlContext && urlContext->is_streamed)  {
+        if ( av_stristart(urlContext->filename, "netvu://", NULL) == 1)
+            nv = urlContext->priv_data;
+    }
+    if (nv)  {
+        int ii;
+        char temp[12];
+
+        if (utcOffset)
+            *utcOffset = nv->utc_offset;
+
+        for(ii = 0; ii < NETVU_MAX_HEADERS; ii++)  {
+            av_dict_set(&s->metadata, nv->hdrNames[ii], nv->hdrs[ii], 0);
+        }
+        if ( (nv->utc_offset >= 0) && (nv->utc_offset <= 1440) )  {
+            snprintf(temp, sizeof(temp), "%d", nv->utc_offset);
+            av_dict_set(&s->metadata, "timezone", temp, 0);
+        }
+    }
+
+    s->ctx_flags |= AVFMTCTX_NOHEADER;
+    return 0;
+}
+
+void ad_network2host(struct NetVuImageData *pic, uint8_t *data)
+{
+    pic->version                = AV_RB32(data + 0);
+    pic->mode                   = AV_RB32(data + 4);
+    pic->cam                    = AV_RB32(data + 8);
+    pic->vid_format             = AV_RB32(data + 12);
+    pic->start_offset           = AV_RB32(data + 16);
+    pic->size                   = AV_RB32(data + 20);
+    pic->max_size               = AV_RB32(data + 24);
+    pic->target_size            = AV_RB32(data + 28);
+    pic->factor                 = AV_RB32(data + 32);
+    pic->alm_bitmask_hi         = AV_RB32(data + 36);
+    pic->status                 = AV_RB32(data + 40);
+    pic->session_time           = AV_RB32(data + 44);
+    pic->milliseconds           = AV_RB32(data + 48);
+    if ((uint8_t *)pic != data)  {
+        memcpy(pic->res,    data + 52, 4);
+        memcpy(pic->title,  data + 56, 31);
+        memcpy(pic->alarm,  data + 87, 31);
+    }
+    pic->format.src_pixels      = AV_RB16(data + 118);
+    pic->format.src_lines       = AV_RB16(data + 120);
+    pic->format.target_pixels   = AV_RB16(data + 122);
+    pic->format.target_lines    = AV_RB16(data + 124);
+    pic->format.pixel_offset    = AV_RB16(data + 126);
+    pic->format.line_offset     = AV_RB16(data + 128);
+    if ((uint8_t *)pic != data)
+        memcpy(pic->locale, data + 130, 30);
+    pic->utc_offset             = AV_RB32(data + 160);
+    pic->alm_bitmask            = AV_RB32(data + 164);
+}
+
+static AVStream * netvu_get_stream(AVFormatContext *s, struct
NetVuImageData *p)
+{
+    time_t dateSec;
+    char dateStr[18];
+    AVStream *stream = ad_get_vstream(s,
+                                      p->format.target_pixels,
+                                      p->format.target_lines,
+                                      p->cam,
+                                      p->vid_format,
+                                      p->title);
+    stream->start_time = p->session_time * 1000LL + p->milliseconds;
+    dateSec = p->session_time;
+    strftime(dateStr, sizeof(dateStr), "%Y-%m-%d %H:%MZ",
gmtime(&dateSec));
+    av_dict_set(&stream->metadata, "date", dateStr, 0);
+    return stream;
+}
+
+int ad_adFormatToCodecId(AVFormatContext *s, int32_t adFormat)
+{
+    int codec_id = AV_CODEC_ID_NONE;
+
+    switch(adFormat) {
+        case PIC_MODE_JPEG_422:
+        case PIC_MODE_JPEG_411:
+            codec_id = AV_CODEC_ID_MJPEG;
+            break;
+
+        case PIC_MODE_MPEG4_411:
+        case PIC_MODE_MPEG4_411_I:
+        case PIC_MODE_MPEG4_411_GOV_P:
+        case PIC_MODE_MPEG4_411_GOV_I:
+            codec_id = AV_CODEC_ID_MPEG4;
+            break;
+
+        case PIC_MODE_H264I:
+        case PIC_MODE_H264P:
+        case PIC_MODE_H264J:
+            codec_id = AV_CODEC_ID_H264;
+            break;
+
+        default:
+            av_log(s, AV_LOG_WARNING,
+                   "ad_get_stream: unrecognised vid_format %d\n",
+                   adFormat);
+            codec_id = AV_CODEC_ID_NONE;
+    }
+    return codec_id;
+}
+
+AVStream * ad_get_vstream(AVFormatContext *s, uint16_t w, uint16_t h,
uint8_t cam, int32_t format, const char *title)
+{
+    uint8_t codec_type = 0;
+    int codec_id, id;
+    int i, found;
+    char textbuffer[4];
+    AVStream *st;
+
+    codec_id = ad_adFormatToCodecId(s, format);
+    if (codec_id == AV_CODEC_ID_MJPEG)
+        codec_type = 0;
+    else if (codec_id == AV_CODEC_ID_MPEG4)
+        codec_type = 1;
+    else if (codec_id)
+        codec_type = 2;
+
+    id = ((codec_type & 0x0003) << 29) |
+         (((cam - 1)  & 0x001F) << 24) |
+         (((w >> 4)   & 0x0FFF) << 12) |
+         (((h >> 4)   & 0x0FFF) << 0);
+
+    found = FALSE;
+    for (i = 0; i < s->nb_streams; i++) {
+        st = s->streams[i];
+        if (st->id == id) {
+            found = TRUE;
+            break;
+        }
+    }
+    if (!found) {
+        st = avformat_new_stream(s, NULL);
+        if (st) {
+            st->id = id;
+            st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
+            st->codec->codec_id = codec_id;
+            st->codec->width = w;
+            st->codec->height = h;
+            st->index = i;
+
+            // Set pixel aspect ratio, display aspect is (sar * width /
height)
+            // May get overridden by codec
+            if( (st->codec->width > 360) && (st->codec->height < 480) )
+                st->sample_aspect_ratio = (AVRational) { 1, 2 };
+            else
+                st->sample_aspect_ratio = (AVRational) { 1, 1 };
+
+            // Use milliseconds as the time base
+            st->r_frame_rate = MilliTB;
+            avpriv_set_pts_info(st, 32, MilliTB.num, MilliTB.den);
+            st->codec->time_base = MilliTB;
+
+            if (title)
+                av_dict_set(&st->metadata, "title", title, 0);
+            snprintf(textbuffer, sizeof(textbuffer), "%u", cam);
+            av_dict_set(&st->metadata, "track", textbuffer, 0);
+
+            av_dict_set(&st->metadata, "type", "camera", 0);
+        }
+    }
+    return st;
+}
+
+static unsigned int RSHash(int camera, const char *name, unsigned int len)
+{
+    unsigned int b    = 378551;
+    unsigned int a    = (camera << 16) + (camera << 8) + camera;
+    unsigned int hash = 0;
+    unsigned int i    = 0;
+
+    for(i = 0; i < len; name++, i++)  {
+        hash = hash * a + (*name);
+        a    = a * b;
+    }
+    return hash;
+}
+
+static AVStream * ad_get_overlay_stream(AVFormatContext *s, int channel,
const char *title)
+{
+    static const int codec_id = AV_CODEC_ID_PBM;
+    unsigned int id;
+    int i, found;
+    AVStream *st;
+
+    id = RSHash(channel+1, title, strlen(title));
+
+    found = FALSE;
+    for (i = 0; i < s->nb_streams; i++) {
+        st = s->streams[i];
+        if ((st->codec->codec_id == codec_id) && (st->id == id)) {
+            found = TRUE;
+            break;
+        }
+    }
+    if (!found) {
+        st = avformat_new_stream(s, NULL);
+        if (st) {
+            st->id = id;
+            st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
+            st->codec->codec_id = codec_id;
+            st->index = i;
+
+            // Use milliseconds as the time base
+            st->r_frame_rate = MilliTB;
+            avpriv_set_pts_info(st, 32, MilliTB.num, MilliTB.den);
+            st->codec->time_base = MilliTB;
+
+            av_dict_set(&st->metadata, "title", title, 0);
+            av_dict_set(&st->metadata, "type", "mask", 0);
+        }
+    }
+    return st;
+}
+
+AVStream * ad_get_audio_stream(AVFormatContext *s, struct NetVuAudioData*
audioHeader)
+{
+    int i, found;
+    AVStream *st;
+
+    found = FALSE;
+    for( i = 0; i < s->nb_streams; i++ ) {
+        st = s->streams[i];
+        if ( (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) && (st->id ==
audioHeader->channel) ) {
+            found = TRUE;
+            break;
+        }
+    }
+
+    // Did we find our audio stream? If not, create a new one
+    if( !found ) {
+        st = avformat_new_stream(s, NULL);
+        if (st) {
+            st->id = audioHeader->channel;
+            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
+            st->codec->channels = 1;
+            st->codec->block_align = 0;
+
+            if (audioHeader)  {
+                switch(audioHeader->mode)  {
+                    default:
+                    case(RTP_PAYLOAD_TYPE_8000HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_16000HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_44100HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_11025HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_22050HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_32000HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_48000HZ_ADPCM):
+                        st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                        st->codec->bits_per_coded_sample = 4;
+                        break;
+                    case(RTP_PAYLOAD_TYPE_8000HZ_PCM):
+                    case(RTP_PAYLOAD_TYPE_16000HZ_PCM):
+                    case(RTP_PAYLOAD_TYPE_44100HZ_PCM):
+                    case(RTP_PAYLOAD_TYPE_11025HZ_PCM):
+                    case(RTP_PAYLOAD_TYPE_22050HZ_PCM):
+                    case(RTP_PAYLOAD_TYPE_32000HZ_PCM):
+                    case(RTP_PAYLOAD_TYPE_48000HZ_PCM):
+                        st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
+                        st->codec->bits_per_coded_sample = 16;
+                        break;
+                }
+                switch(audioHeader->mode)  {
+                    default:
+                    case(RTP_PAYLOAD_TYPE_8000HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_8000HZ_PCM):
+                        st->codec->sample_rate = 8000;
+                        break;
+                    case(RTP_PAYLOAD_TYPE_16000HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_16000HZ_PCM):
+                        st->codec->sample_rate = 16000;
+                        break;
+                    case(RTP_PAYLOAD_TYPE_44100HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_44100HZ_PCM):
+                        st->codec->sample_rate = 441000;
+                        break;
+                    case(RTP_PAYLOAD_TYPE_11025HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_11025HZ_PCM):
+                        st->codec->sample_rate = 11025;
+                        break;
+                    case(RTP_PAYLOAD_TYPE_22050HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_22050HZ_PCM):
+                        st->codec->sample_rate = 22050;
+                        break;
+                    case(RTP_PAYLOAD_TYPE_32000HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_32000HZ_PCM):
+                        st->codec->sample_rate = 32000;
+                        break;
+                    case(RTP_PAYLOAD_TYPE_48000HZ_ADPCM):
+                    case(RTP_PAYLOAD_TYPE_48000HZ_PCM):
+                        st->codec->sample_rate = 48000;
+                        break;
+                }
+            }
+            else  {
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->bits_per_coded_sample = 4;
+                st->codec->sample_rate = 8000;
+            }
+            avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
+
+            st->index = i;
+        }
+    }
+
+    return st;
+}
+
+/**
+ * Returns the data stream associated with the current connection.
+ *
+ * If there isn't one already, a new one will be created and added to the
+ * AVFormatContext passed in.
+ *
+ * \param s Pointer to AVFormatContext
+ * \return Pointer to the data stream on success, NULL on failure
+ */
+static AVStream * ad_get_data_stream(AVFormatContext *s, enum AVCodecID
codecId)
+{
+    int i, found = FALSE;
+    AVStream *st = NULL;
+
+    for (i = 0; i < s->nb_streams && !found; i++ ) {
+        st = s->streams[i];
+        if (st->id == codecId)
+            found = TRUE;
+    }
+
+    // Did we find our data stream? If not, create a new one
+    if( !found ) {
+        st = avformat_new_stream(s, NULL);
+        if (st) {
+            st->id = codecId;
+            st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
+            st->codec->codec_id = AV_CODEC_ID_TEXT;
+
+            // Use milliseconds as the time base
+            //st->r_frame_rate = MilliTB;
+            avpriv_set_pts_info(st, 32, MilliTB.num, MilliTB.den);
+            //st->codec->time_base = MilliTB;
+
+            st->index = i;
+        }
+    }
+    return st;
+}
+
+//static AVStream *ad_get_stream(AVFormatContext *s, enum AVMediaType
media,
+//                               enum AVCodecID codecId, void *data)
+//{
+//    switch (media)  {
+//        case(AVMEDIA_TYPE_VIDEO):
+//            if (codecId == AV_CODEC_ID_PBM)
+//                return ad_get_overlay_stream(s, (const char *)data);
+//            else
+//                return netvu_get_stream(s, (struct NetVuImageData
*)data);
+//            break;
+//        case(AVMEDIA_TYPE_AUDIO):
+//            return ad_get_audio_stream(s, (struct NetVuAudioData *)data);
+//            break;
+//        case(AVMEDIA_TYPE_DATA):
+//            return ad_get_data_stream(s);
+//            break;
+//        default:
+//            break;
+//    }
+//    return NULL;
+//}
+
+#ifdef AD_SIDEDATA_IN_PRIV
+static void ad_release_packet( AVPacket *pkt )
+{
+    if (pkt == NULL)
+        return;
+
+    if (pkt->priv)  {
+        // Have a look what type of frame we have and then free as
appropriate
+        struct ADFrameData *frameData = (struct ADFrameData *)pkt->priv;
+        struct NetVuAudioData *audHeader;
+
+        switch(frameData->frameType)  {
+            case(NetVuAudio):
+                audHeader = (struct NetVuAudioData *)frameData->frameData;
+                if( audHeader->additionalData )
+                    av_free( audHeader->additionalData );
+            case(NetVuVideo):
+            case(DMVideo):
+            case(DMNudge):
+            case(NetVuDataInfo):
+            case(NetVuDataLayout):
+            case(RTPAudio):
+                av_freep(&frameData->frameData);
+                av_freep(&frameData->additionalData);
+                break;
+            default:
+                // Error, unrecognised frameType
+                break;
+        }
+        av_freep(&pkt->priv);
+    }
+
+    // Now use the default routine to release the rest of the packet's
resources
+    av_destruct_packet( pkt );
+}
+#endif
+
+#ifdef AD_SIDEDATA_IN_PRIV
+int ad_new_packet(AVPacket *pkt, int size)
+{
+    int retVal = av_new_packet( pkt, size );
+    pkt->priv = NULL;
+    if( retVal >= 0 ) {
+        // Give the packet its own destruct function
+        pkt->destruct = ad_release_packet;
+    }
+
+    return retVal;
+}
+
+static void ad_keyvalsplit(const char *line, char *key, char *val)
+{
+    int ii, jj = 0;
+    int len = strlen(line);
+    int inKey, inVal;
+
+    inKey = 1;
+    inVal = 0;
+    for (ii = 0; ii < len; ii++)  {
+        if (inKey)  {
+            if (line[ii] == ':')  {
+                key[jj++] = '\0';
+                inKey = 0;
+                inVal = 1;
+                jj = 0;
+            }
+            else  {
+                key[jj++] = line[ii];
+            }
+        }
+        else if (inVal)  {
+            val[jj++] = line[ii];
+        }
+    }
+    val[jj++] = '\0';
+}
+
+static int ad_splitcsv(const char *csv, int *results, int maxElements, int
base)
+{
+    int ii, jj, ee;
+    char element[8];
+    int len = strlen(csv);
+
+    for (ii = 0, jj = 0, ee = 0; ii < len; ii++)  {
+        if ((csv[ii] == ',') || (csv[ii] == ';') || (ii == (len -1)))  {
+            element[jj++] = '\0';
+            if (base == 10)
+                sscanf(element, "%d", &results[ee++]);
+            else
+                sscanf(element, "%x", &results[ee++]);
+            if (ee >= maxElements)
+                break;
+            jj = 0;
+        }
+        else
+            element[jj++] = csv[ii];
+    }
+    return ee;
+}
+
+static void ad_parseVSD(const char *vsd, struct ADFrameData *frame)
+{
+    char key[64], val[128];
+    ad_keyvalsplit(vsd, key, val);
+
+    if ((strlen(key) == 2) && (key[0] == 'M'))  {
+        switch(key[1])  {
+            case('0'):
+                ad_splitcsv(val, frame->vsd[VSD_M0], VSDARRAYLEN, 10);
+                break;
+            case('1'):
+                ad_splitcsv(val, frame->vsd[VSD_M1], VSDARRAYLEN, 10);
+                break;
+            case('2'):
+                ad_splitcsv(val, frame->vsd[VSD_M2], VSDARRAYLEN, 10);
+                break;
+            case('3'):
+                ad_splitcsv(val, frame->vsd[VSD_M3], VSDARRAYLEN, 10);
+                break;
+            case('4'):
+                ad_splitcsv(val, frame->vsd[VSD_M4], VSDARRAYLEN, 10);
+                break;
+            case('5'):
+                ad_splitcsv(val, frame->vsd[VSD_M5], VSDARRAYLEN, 10);
+                break;
+            case('6'):
+                ad_splitcsv(val, frame->vsd[VSD_M6], VSDARRAYLEN, 10);
+                break;
+        }
+    }
+    else if ((strlen(key) == 3) && (av_strncasecmp(key, "FM0", 3) == 0))  {
+        ad_splitcsv(val, frame->vsd[VSD_FM0], VSDARRAYLEN, 10);
+    }
+    else if ((strlen(key) == 1) && (key[0] == 'F')) {
+        ad_splitcsv(val, frame->vsd[VSD_F], VSDARRAYLEN, 16);
+    }
+    else if ((strlen(key) == 3) && (av_strncasecmp(key, "EM0", 3) == 0))  {
+        ad_splitcsv(val, frame->vsd[VSD_EM0], VSDARRAYLEN, 10);
+    }
+    else
+        av_log(NULL, AV_LOG_DEBUG, "Unknown VSD key: %s:  Val: %s", key,
val);
+}
+
+static void ad_parseLine(AVFormatContext *s, const char *line, struct
ADFrameData *frame)
+{
+    char key[32], val[128];
+    ad_keyvalsplit(line, key, val);
+
+    if (av_strncasecmp(key, "Active-zones", 12) == 0)  {
+        sscanf(val, "%d", &frame->activeZones);
+    }
+    else if (av_strncasecmp(key, "FrameNum", 8) == 0)  {
+        sscanf(val, "%u", &frame->frameNum);
+    }
+//    else if (av_strncasecmp(key, "Site-ID", 7) == 0)  {
+//    }
+    else if (av_strncasecmp(key, "ActMask", 7) == 0)  {
+        for (int ii = 0, jj = 0; (ii < ACTMASKLEN) && (jj < strlen(val));
ii++)  {
+            sscanf(&val[jj], "0x%04hx", &frame->activityMask[ii]);
+            jj += 7;
+        }
+    }
+    else if (av_strncasecmp(key, "VSD", 3) == 0)  {
+        ad_parseVSD(val, frame);
+    }
+}
+
+static void ad_parseText(AVFormatContext *s, struct ADFrameData *frameData)
+{
+    const char *src = frameData->additionalData;
+    int len = strlen(src);
+    char line[512];
+    int ii, jj = 0;
+
+    for (ii = 0; ii < len; ii++)  {
+        if ( (src[ii] == '\r') || (src[ii] == '\n') )  {
+            line[jj++] = '\0';
+            if (strlen(line) > 0)
+                ad_parseLine(s, line, frameData);
+            jj = 0;
+        }
+        else
+            line[jj++] = src[ii];
+    }
+}
+#endif
+
+int initADData(int data_type, enum AVMediaType *mediaType, enum AVCodecID
*codecId, void **payload)
+{
+    switch(data_type)  {
+        case(AD_DATATYPE_JPEG):
+        case(AD_DATATYPE_JFIF):
+        case(AD_DATATYPE_MPEG4I):
+        case(AD_DATATYPE_MPEG4P):
+        case(AD_DATATYPE_H264I):
+        case(AD_DATATYPE_H264P):
+        case(AD_DATATYPE_MINIMAL_MPEG4):
+        //case(AD_DATATYPE_MINIMAL_H264):
+            *payload = av_mallocz( sizeof(struct NetVuImageData) );
+            if( *payload == NULL )
+                return AVERROR(ENOMEM);
+            *mediaType = AVMEDIA_TYPE_VIDEO;
+            switch(data_type)  {
+                case(AD_DATATYPE_JPEG):
+                case(AD_DATATYPE_JFIF):
+                    *codecId = AV_CODEC_ID_MJPEG;
+                    break;
+                case(AD_DATATYPE_MPEG4I):
+                case(AD_DATATYPE_MPEG4P):
+                case(AD_DATATYPE_MINIMAL_MPEG4):
+                    *codecId = AV_CODEC_ID_MPEG4;
+                    break;
+                case(AD_DATATYPE_H264I):
+                case(AD_DATATYPE_H264P):
+                //case(AD_DATATYPE_MINIMAL_H264):
+                    *codecId = AV_CODEC_ID_H264;
+                    break;
+            }
+            break;
+        case(AD_DATATYPE_AUDIO_ADPCM):
+        case(AD_DATATYPE_MINIMAL_AUDIO_ADPCM):
+            *payload = av_malloc( sizeof(struct NetVuAudioData) );
+            if( *payload == NULL )
+                return AVERROR(ENOMEM);
+            *mediaType = AVMEDIA_TYPE_AUDIO;
+            *codecId = AV_CODEC_ID_ADPCM_IMA_WAV;
+            break;
+        case(AD_DATATYPE_INFO):
+        case(AD_DATATYPE_XML_INFO):
+        case(AD_DATATYPE_LAYOUT):
+        case(AD_DATATYPE_SVARS_INFO):
+            *mediaType = AVMEDIA_TYPE_DATA;
+            *codecId = AV_CODEC_ID_FFMETADATA;
+            break;
+        case(AD_DATATYPE_BMP):
+            *mediaType = AVMEDIA_TYPE_VIDEO;
+            *codecId = AV_CODEC_ID_BMP;
+            break;
+        case(AD_DATATYPE_PBM):
+            *mediaType = AVMEDIA_TYPE_VIDEO;
+            *codecId = AV_CODEC_ID_PBM;
+            break;
+        default:
+            *mediaType = AVMEDIA_TYPE_UNKNOWN;
+            *codecId = AV_CODEC_ID_NONE;
+    }
+    return 0;
+}
+
+#if CONFIG_ADBINARY_DEMUXER || CONFIG_ADMIME_DEMUXER
+int ad_read_jpeg(AVFormatContext *s, AVPacket *pkt, struct NetVuImageData
*video_data,
+                 char **text_data)
+{
+    static const int nviSize = NetVuImageDataHeaderSize;
+    AVIOContext *pb = s->pb;
+    int hdrSize;
+    char jfif[2048], *ptr;
+    int n, textSize, errorVal = 0;
+    int status;
+
+    // Check if we've already read a NetVuImageData header
+    // (possible if this is called by adraw demuxer)
+    if (video_data && (!pic_version_valid(video_data->version)))  {
+        // Read the pic structure
+        if ((n = avio_read(pb, (uint8_t*)video_data, nviSize)) != nviSize)
 {
+            av_log(s, AV_LOG_ERROR, "%s: Short of data reading "
+                                    "struct NetVuImageData, expected %d,
read %d\n",
+                                    __func__, nviSize, n);
+            return ADFFMPEG_AD_ERROR_JPEG_IMAGE_DATA_READ;
+        }
+
+        // Endian convert if necessary
+        ad_network2host(video_data, (uint8_t *)video_data);
+    }
+
+    if ((video_data==NULL) || !pic_version_valid(video_data->version))  {
+        av_log(s, AV_LOG_ERROR, "%s: invalid struct NetVuImageData version
"
+                                "0x%08X\n", __func__, video_data->version);
+        return ADFFMPEG_AD_ERROR_JPEG_PIC_VERSION;
+    }
+
+    // Get the additional text block
+    textSize = video_data->start_offset;
+    *text_data = av_malloc(textSize + 1);
+    if( *text_data == NULL )  {
+        av_log(s, AV_LOG_ERROR, "%s: text_data allocation failed "
+                                "(%d bytes)", __func__, textSize + 1);
+        return AVERROR(ENOMEM);
+    }
+
+    // Copy the additional text block
+    if( (n = avio_read( pb, *text_data, textSize )) != textSize )  {
+        av_log(s, AV_LOG_ERROR, "%s: short of data reading text block"
+                                " data, expected %d, read %d\n",
+                                __func__, textSize, n);
+        return ADFFMPEG_AD_ERROR_JPEG_READ_TEXT_BLOCK;
+    }
+
+    // Somtimes the buffer seems to end with a NULL terminator, other
times it
+    // doesn't.  Adding a terminator here regardless
+    (*text_data)[textSize] = '\0';
+
+    // Use the struct NetVuImageData struct to build a JFIF header
+    if ((hdrSize = build_jpeg_header( jfif, video_data, 2048)) <= 0)  {
+        av_log(s, AV_LOG_ERROR, "%s: build_jpeg_header failed\n",
__func__);
+        return ADFFMPEG_AD_ERROR_JPEG_HEADER;
+    }
+    // We now know the packet size required for the image, allocate it.
+    if ((status = ad_new_packet(pkt, hdrSize + video_data->size + 2)) < 0)
 {
+        av_log(s, AV_LOG_ERROR, "%s: ad_new_packet %d failed, "
+                                "status %d\n", __func__,
+                                hdrSize + video_data->size + 2, status);
+        return ADFFMPEG_AD_ERROR_JPEG_NEW_PACKET;
+    }
+    ptr = pkt->data;
+    // Copy the JFIF header into the packet
+    memcpy(ptr, jfif, hdrSize);
+    ptr += hdrSize;
+    // Now get the compressed JPEG data into the packet
+    if ((n = avio_read(pb, ptr, video_data->size)) != video_data->size) {
+        av_log(s, AV_LOG_ERROR, "%s: short of data reading pic body, "
+                                "expected %d, read %d\n", __func__,
+                                video_data->size, n);
+        return ADFFMPEG_AD_ERROR_JPEG_READ_BODY;
+    }
+    ptr += video_data->size;
+    // Add the EOI marker
+    *ptr++ = 0xff;
+    *ptr++ = 0xd9;
+
+    return errorVal;
+}
+
+int ad_read_jfif(AVFormatContext *s, AVPacket *pkt, int imgLoaded, int
size,
+                 struct NetVuImageData *video_data, char **text_data)
+{
+    int n, status, errorVal = 0;
+    AVIOContext *pb = s->pb;
+
+    if(!imgLoaded) {
+        if ((status = ad_new_packet(pkt, size)) < 0) { // PRC 003
+            av_log(s, AV_LOG_ERROR, "ad_read_jfif: ad_new_packet %d
failed, status %d\n", size, status);
+            return ADFFMPEG_AD_ERROR_JFIF_NEW_PACKET;
+        }
+
+        if ((n = avio_read(pb, pkt->data, size)) < size) {
+            av_log(s, AV_LOG_ERROR, "ad_read_jfif: short of data reading
jfif image, expected %d, read %d\n", size, n);
+            return ADFFMPEG_AD_ERROR_JFIF_GET_BUFFER;
+        }
+    }
+    if ( parse_jfif(s, pkt->data, video_data, size, text_data ) <= 0) {
+        av_log(s, AV_LOG_ERROR, "ad_read_jfif: parse_jfif failed\n");
+        return ADFFMPEG_AD_ERROR_JFIF_MANUAL_SIZE;
+    }
+    return errorVal;
+}
+
+/**
+ * Data info extraction. A DATA_INFO frame simply contains a
+ * type byte followed by a text block. Due to its simplicity, we'll
+ * just extract the whole block into the AVPacket's data structure and
+ * let the client deal with checking its type and parsing its text
+ *
+ * \todo Parse the string and use the information to add metadata and/or
update
+ *       the struct NetVuImageData struct
+ *
+ * Example strings, taken from a minimal mp4 stream: (Prefixed by a zero
byte)
+ * SITE DVIP3S;CAM 1:TV;(JPEG)TARGSIZE 0:(MPEG)BITRATE 262144;IMAGESIZE
0,0:704,256;
+ * SITE DVIP3S;CAM 2:Directors office;(JPEG)TARGSIZE 0:(MPEG)BITRATE
262144;IMAGESIZE 0,0:704,256;
+ * SITE DVIP3S;CAM 3:Development;(JPEG)TARGSIZE 0:(MPEG)BITRATE
262144;IMAGESIZE 0,0:704,256;
+ * SITE DVIP3S;CAM 4:Rear road;(JPEG)TARGSIZE 0:(MPEG)BITRATE
262144;IMAGESIZE 0,0:704,256;
+ */
+int ad_read_info(AVFormatContext *s, AVPacket *pkt, int size)
+{
+    int n, status, errorVal = 0;
+    AVIOContext *pb = s->pb;
+    //uint8_t dataDatatype;
+
+    // Allocate a new packet
+    if( (status = ad_new_packet( pkt, size )) < 0 )
+        return ADFFMPEG_AD_ERROR_INFO_NEW_PACKET;
+
+    //dataDatatype = avio_r8(pb);
+    //--size;
+
+    // Get the data
+    if( (n = avio_read( pb, pkt->data, size)) != size )
+        return ADFFMPEG_AD_ERROR_INFO_GET_BUFFER;
+
+    return errorVal;
+}
+
+int ad_read_layout(AVFormatContext *s, AVPacket *pkt, int size)
+{
+    int n, status, errorVal = 0;
+    AVIOContext *pb = s->pb;
+
+    // Allocate a new packet
+    if( (status = ad_new_packet( pkt, size )) < 0 )
+        return ADFFMPEG_AD_ERROR_LAYOUT_NEW_PACKET;
+
+    // Get the data
+    if( (n = avio_read( pb, pkt->data, size)) != size )
+        return ADFFMPEG_AD_ERROR_LAYOUT_GET_BUFFER;
+
+    return errorVal;
+}
+
+int ad_read_overlay(AVFormatContext *s, AVPacket *pkt, int channel, int
insize, char **text_data)
+{
+    AdContext* adContext = s->priv_data;
+    AVIOContext *pb      = s->pb;
+    AVStream *st         = NULL;
+    uint8_t *inbuf       = NULL;
+    int n, w, h;
+    char *comment = NULL;
+
+    inbuf = av_malloc(insize);
+    n = avio_read(pb, inbuf, insize);
+    if (n != insize)  {
+        av_log(s, AV_LOG_ERROR, "%s: short of data reading pbm data body,
expected %d, read %d\n", __func__, insize, n);
+        return ADFFMPEG_AD_ERROR_OVERLAY_GET_BUFFER;
+    }
+
+    pkt->size = ad_pbmDecompress(&comment, &inbuf, insize, pkt, &w, &h);
+    if (pkt->size <= 0) {
+ av_log(s, AV_LOG_ERROR, "ADPIC: ad_pbmDecompress failed\n");
+ return ADFFMPEG_AD_ERROR_OVERLAY_PBM_READ;
+ }
+
+    if (text_data)  {
+        int len = 12 + strlen(comment);
+        *text_data = av_malloc(len);
+        snprintf(*text_data, len-1, "Camera %u: %s", channel+1, comment);
+
+        st = ad_get_overlay_stream(s, channel, *text_data);
+        st->codec->width = w;
+        st->codec->height = h;
+    }
+
+    av_free(comment);
+
+    if (adContext)
+        pkt->dts = pkt->pts = adContext->lastVideoPTS;
+
+    return 0;
+}
+#endif
+
+
+int ad_pbmDecompress(char **comment, uint8_t **src, int size, AVPacket
*pkt, int *width, int *height)
+{
+    static const uint8_t pbm[3] = { 0x50, 0x34, 0x0A };
+    static const uint8_t rle[4] = { 0x52, 0x4C, 0x45, 0x20 };
+    int isrle                   = 0;
+    const uint8_t *ptr          = *src;
+    const uint8_t *endPtr       = (*src) + size;
+    uint8_t *dPtr               = NULL;
+    int strSize                 = 0;
+    const char *strPtr          = NULL;
+    const char *endStrPtr       = NULL;
+    unsigned int elementsRead;
+
+    if ((size >= sizeof(pbm)) && (ptr[0] == 'P') && (ptr[1] >= '1') &&
(ptr[1] <= '6') && (ptr[2] == 0x0A) )
+        ptr += sizeof(pbm);
+    else
+        return -1;
+
+    while ( (ptr < endPtr) && (*ptr == '#') )  {
+        ++ptr;
+
+        if ( ((endPtr - ptr) > sizeof(rle)) && (memcmp(ptr, rle,
sizeof(rle)) == 0) )  {
+            isrle = 1;
+            ptr += sizeof(rle);
+        }
+
+        strPtr = ptr;
+        while ( (ptr < endPtr) && (*ptr != 0x0A) )  {
+            ++ptr;
+        }
+        endStrPtr = ptr;
+        strSize = endStrPtr - strPtr;
+
+        if (comment)  {
+            if (*comment)
+                av_free(*comment);
+            *comment = av_malloc(strSize + 1);
+
+            memcpy(*comment, strPtr, strSize);
+            (*comment)[strSize] = '\0';
+        }
+        ++ptr;
+    }
+
+    elementsRead = sscanf(ptr, "%d", width);
+    ptr += sizeof(*width) * elementsRead;
+    elementsRead = sscanf(ptr, "%d", height);
+    ptr += sizeof(*height) * elementsRead;
+
+    if (isrle)  {
+        // Data is Runlength Encoded, alloc a new buffer and decode into it
+        int len;
+        uint8_t val;
+        unsigned int headerSize   = (ptr - *src) - sizeof(rle);
+        unsigned int headerP1Size = sizeof(pbm) + 1;
+        unsigned int headerP2Size = headerSize - headerP1Size;
+        unsigned int dataSize = ((*width) * (*height)) / 8;
+
+        ad_new_packet(pkt, headerSize + dataSize);
+        dPtr = pkt->data;
+
+        memcpy(dPtr, *src, headerP1Size);
+        dPtr += headerP1Size;
+        if (strPtr)  {
+            memcpy(dPtr, strPtr, headerP2Size);
+            dPtr += headerP2Size;
+        }
+
+        // Decompress loop
+        while (ptr < endPtr)  {
+            len = *ptr++;
+            val = *ptr++;
+            do  {
+ len--;
+ *dPtr++ = val;
+ } while(len>0);
+        }
+
+        // Free compressed data
+        av_freep(src);
+
+        return headerSize + dataSize;
+    }
+    else  {
+        ad_new_packet(pkt, size);
+        memcpy(pkt->data, *src, size);
+        av_freep(src);
+        return size;
+    }
+}
+
+static int addSideData(AVFormatContext *s, AVPacket *pkt,
+                       enum AVMediaType media, unsigned int size,
+                       void *data, const char *text)
+{
+#if defined(AD_SIDEDATA_IN_PRIV)
+    struct ADFrameData *frameData = av_mallocz(sizeof(*frameData));
+    if( frameData == NULL )
+        return AVERROR(ENOMEM);
+
+    if (media == AVMEDIA_TYPE_VIDEO)
+        frameData->frameType = NetVuVideo;
+    else
+        frameData->frameType = NetVuAudio;
+
+    frameData->frameData = data;
+    frameData->additionalData = (unsigned char *)text;
+
+    if (text != NULL)
+        ad_parseText(s, frameData);
+    pkt->priv = frameData;
+#elif defined(AD_SIDEDATA)
+    uint8_t *side = av_packet_new_side_data(pkt, AV_PKT_DATA_AD_FRAME,
size);
+    if (side)
+        memcpy(side, data, size);
+
+    if (text)  {
+        size = strlen(text) + 1;
+        side = av_packet_new_side_data(pkt, AV_PKT_DATA_AD_TEXT, size);
+        if (side)
+            memcpy(side, text, size);
+    }
+#endif
+    return 0;
+}
+
+int ad_read_packet(AVFormatContext *s, AVPacket *pkt, int channel,
+                   enum AVMediaType media, enum AVCodecID codecId,
+                   void *data, char *text)
+{
+    AdContext *adContext          = s->priv_data;
+    AVStream *st                  = NULL;
+
+    if ((media == AVMEDIA_TYPE_VIDEO) && (codecId == AV_CODEC_ID_PBM))  {
+        // Get or create a data stream
+        if ( (st = ad_get_overlay_stream(s, channel, text)) == NULL ) {
+            av_log(s, AV_LOG_ERROR, "%s: ad_get_overlay_stream failed\n",
__func__);
+            return ADFFMPEG_AD_ERROR_GET_OVERLAY_STREAM;
+        }
+    }
+    else if (media == AVMEDIA_TYPE_VIDEO)  {
+        // At this point We have a legal NetVuImageData structure which we
use
+        // to determine which codec stream to use
+        struct NetVuImageData *video_data = (struct NetVuImageData *)data;
+        if ( (st = netvu_get_stream( s, video_data)) == NULL ) {
+            av_log(s, AV_LOG_ERROR, "ad_read_packet: Failed get_stream for
video\n");
+            return ADFFMPEG_AD_ERROR_GET_STREAM;
+        }
+        else  {
+            if (video_data->session_time > 0)  {
+                pkt->pts = video_data->session_time;
+                pkt->pts *= 1000ULL;
+                pkt->pts += video_data->milliseconds % 1000;
+            }
+            else
+                pkt->pts = AV_NOPTS_VALUE;
+            if (adContext)
+                adContext->lastVideoPTS = pkt->pts;
+
+            // Servers occasionally send insane timezone data, which can
screw
+            // up clients.  Check for this and set to 0
+            if (abs(video_data->utc_offset) > 1440)  {
+                av_log(s, AV_LOG_INFO,
+                       "ad_read_packet: Invalid utc_offset of %d, "
+                       "setting to zero\n", video_data->utc_offset);
+                video_data->utc_offset = 0;
+            }
+
+            if (adContext && (!adContext->metadataSet))  {
+                char utcOffsetStr[12];
+                snprintf(utcOffsetStr, sizeof(utcOffsetStr), "%d",
video_data->utc_offset);
+                av_dict_set(&s->metadata, "locale", video_data->locale, 0);
+                av_dict_set(&s->metadata, "timezone", utcOffsetStr, 0);
+                adContext->metadataSet = 1;
+            }
+
+            addSideData(s, pkt, media, sizeof(struct NetVuImageData),
data, text);
+        }
+    }
+    else if (media == AVMEDIA_TYPE_AUDIO) {
+        // Get the audio stream
+        struct NetVuAudioData *audHdr = (struct NetVuAudioData *)data;
+        if ( (st = ad_get_audio_stream( s, audHdr )) == NULL ) {
+            av_log(s, AV_LOG_ERROR, "ad_read_packet: ad_get_audio_stream
failed\n");
+            return ADFFMPEG_AD_ERROR_GET_AUDIO_STREAM;
+        }
+        else  {
+            if (audHdr->seconds > 0)  {
+                int64_t milliseconds = audHdr->seconds * 1000ULL +
(audHdr->msecs % 1000);
+                pkt->pts = av_rescale_q(milliseconds, MilliTB,
st->time_base);
+            }
+            else
+                pkt->pts = AV_NOPTS_VALUE;
+
+            addSideData(s, pkt, media, sizeof(struct NetVuAudioData),
audHdr, text);
+        }
+    }
+    else if (media == AVMEDIA_TYPE_DATA) {
+        // Get or create a data stream
+        if ( (st = ad_get_data_stream(s, codecId)) == NULL ) {
+            av_log(s, AV_LOG_ERROR, "%s: ad_get_data_stream failed\n",
__func__);
+            return ADFFMPEG_AD_ERROR_GET_INFO_LAYOUT_STREAM;
+        }
+    }
+
+    if (st)  {
+        pkt->stream_index = st->index;
+
+        pkt->duration = 1;
+        pkt->pos = -1;
+    }
+
+    return 0;
+}
+
+void audiodata_network2host(uint8_t *dest, const uint8_t *src, int size)
+{
+    const uint8_t *dataEnd = src + size;
+
+    uint16_t predictor = AV_RB16(src);
+    src += 2;
+
+    AV_WL16(dest, predictor);
+    dest += 2;
+
+    *dest++ = *src++;
+    *dest++ = *src++;
+
+    for (;src < dataEnd; src++, dest++)  {
+        *dest = (((*src) & 0xF0) >> 4) | (((*src) & 0x0F) << 4);
+    }
+}
+
+int mpegOrH264(unsigned int startCode)
+{
+    if ( (startCode & 0xFFFFFF00) == 0x00000100)
+        return PIC_MODE_MPEG4_411;
+    else
+        return PIC_MODE_H264I;
+}
diff --git a/libavformat/adffmpeg_errors.h b/libavformat/adffmpeg_errors.h
new file mode 100644
index 0000000000..97f44676d9
--- /dev/null
+++ b/libavformat/adffmpeg_errors.h
@@ -0,0 +1,94 @@
+#ifndef __ADFFMPEG_ERRORS_H__
+#define __ADFFMPEG_ERRORS_H__
+
+enum ADHErrorCode
+{
+    ADFFMPEG_ERROR_NONE                               = 0,
+
+    ADFFMPEG_DS_ERROR                                 = -99,
+    ADFFMPEG_DS_ERROR_AUTH_REQUIRED                   = ADFFMPEG_DS_ERROR
-1,
+    ADFFMPEG_DS_ERROR_DNS_HOST_RESOLUTION_FAILURE     = ADFFMPEG_DS_ERROR
-2,
+    ADFFMPEG_DS_ERROR_HOST_UNREACHABLE                = ADFFMPEG_DS_ERROR
-3,
+    ADFFMPEG_DS_ERROR_INVALID_CREDENTIALS             = ADFFMPEG_DS_ERROR
-4,
+    ADFFMPEG_DS_ERROR_SOCKET                          = ADFFMPEG_DS_ERROR
-5,
+    ADFFMPEG_DS_ERROR_CREAT_CONECTION_TIMEOUT         = ADFFMPEG_DS_ERROR
-6,
+
+    ADFFMPEG_AD_ERROR                                 = -200,
+
+    ADFFMPEG_AD_ERROR_UNKNOWN                         = ADFFMPEG_AD_ERROR
-1,
+    ADFFMPEG_AD_ERROR_READ_6_BYTE_SEPARATOR           = ADFFMPEG_AD_ERROR
-2,
+    ADFFMPEG_AD_ERROR_NEW_PACKET                      = ADFFMPEG_AD_ERROR
-3,
+    ADFFMPEG_AD_ERROR_PARSE_MIME_HEADER               = ADFFMPEG_AD_ERROR
-4,
+    //ADFFMPEG_AD_ERROR_NETVU_IMAGE_DATA                =
ADFFMPEG_AD_ERROR -5,
+    //ADFFMPEG_AD_ERROR_NETVU_AUDIO_DATA                =
ADFFMPEG_AD_ERROR -6,
+
+    ADFFMPEG_AD_ERROR_JPEG_IMAGE_DATA_READ            = ADFFMPEG_AD_ERROR
-7,
+    ADFFMPEG_AD_ERROR_JPEG_PIC_VERSION                = ADFFMPEG_AD_ERROR
-8,
+    //ADFFMPEG_AD_ERROR_JPEG_ALLOCATE_TEXT_BLOCK        =
ADFFMPEG_AD_ERROR -9,
+    ADFFMPEG_AD_ERROR_JPEG_READ_TEXT_BLOCK            = ADFFMPEG_AD_ERROR
-10,
+    ADFFMPEG_AD_ERROR_JPEG_HEADER                     = ADFFMPEG_AD_ERROR
-11,
+    ADFFMPEG_AD_ERROR_JPEG_NEW_PACKET                 = ADFFMPEG_AD_ERROR
-12,
+    ADFFMPEG_AD_ERROR_JPEG_READ_BODY                  = ADFFMPEG_AD_ERROR
-13,
+
+    ADFFMPEG_AD_ERROR_JFIF_NEW_PACKET                 = ADFFMPEG_AD_ERROR
-14,
+    ADFFMPEG_AD_ERROR_JFIF_GET_BUFFER                 = ADFFMPEG_AD_ERROR
-15,
+    ADFFMPEG_AD_ERROR_JFIF_MANUAL_SIZE                = ADFFMPEG_AD_ERROR
-16,
+
+    ADFFMPEG_AD_ERROR_MPEG4_MIME_NEW_PACKET           = ADFFMPEG_AD_ERROR
-17,
+    ADFFMPEG_AD_ERROR_MPEG4_MIME_GET_BUFFER           = ADFFMPEG_AD_ERROR
-18,
+    ADFFMPEG_AD_ERROR_MPEG4_MIME_PARSE_HEADER         = ADFFMPEG_AD_ERROR
-19,
+    ADFFMPEG_AD_ERROR_MPEG4_MIME_PARSE_TEXT_DATA      = ADFFMPEG_AD_ERROR
-20,
+    ADFFMPEG_AD_ERROR_MPEG4_MIME_GET_TEXT_BUFFER      = ADFFMPEG_AD_ERROR
-21,
+    //ADFFMPEG_AD_ERROR_MPEG4_MIME_ALLOCATE_TEXT_BUFFER =
ADFFMPEG_AD_ERROR -22,
+
+    ADFFMPEG_AD_ERROR_MPEG4_GET_BUFFER                = ADFFMPEG_AD_ERROR
-23,
+    ADFFMPEG_AD_ERROR_MPEG4_PIC_VERSION_VALID         = ADFFMPEG_AD_ERROR
-24,
+    //ADFFMPEG_AD_ERROR_MPEG4_ALLOCATE_TEXT_BUFFER      =
ADFFMPEG_AD_ERROR -25,
+    ADFFMPEG_AD_ERROR_MPEG4_GET_TEXT_BUFFER           = ADFFMPEG_AD_ERROR
-26,
+    ADFFMPEG_AD_ERROR_MPEG4_NEW_PACKET                = ADFFMPEG_AD_ERROR
-27,
+    ADFFMPEG_AD_ERROR_MPEG4_PIC_BODY                  = ADFFMPEG_AD_ERROR
-28,
+
+    ADFFMPEG_AD_ERROR_MPEG4_MINIMAL_GET_BUFFER        = ADFFMPEG_AD_ERROR
-29,
+    ADFFMPEG_AD_ERROR_MPEG4_MINIMAL_NEW_PACKET        = ADFFMPEG_AD_ERROR
-30,
+    ADFFMPEG_AD_ERROR_MPEG4_MINIMAL_NEW_PACKET2       = ADFFMPEG_AD_ERROR
-31,
+
+    ADFFMPEG_AD_ERROR_MINIMAL_AUDIO_ADPCM_GET_BUFFER  = ADFFMPEG_AD_ERROR
-32,
+    ADFFMPEG_AD_ERROR_MINIMAL_AUDIO_ADPCM_NEW_PACKET  = ADFFMPEG_AD_ERROR
-33,
+    ADFFMPEG_AD_ERROR_MINIMAL_AUDIO_ADPCM_GET_BUFFER2 = ADFFMPEG_AD_ERROR
-34,
+
+    ADFFMPEG_AD_ERROR_AUDIO_ADPCM_GET_BUFFER          = ADFFMPEG_AD_ERROR
-35,
+    //ADFFMPEG_AD_ERROR_AUDIO_ADPCM_ALLOCATE_ADDITIONAL =
ADFFMPEG_AD_ERROR -36,
+    ADFFMPEG_AD_ERROR_AUDIO_ADPCM_GET_BUFFER2         = ADFFMPEG_AD_ERROR
-37,
+
+    ADFFMPEG_AD_ERROR_AUDIO_ADPCM_MIME_NEW_PACKET     = ADFFMPEG_AD_ERROR
-38,
+    ADFFMPEG_AD_ERROR_AUDIO_ADPCM_MIME_GET_BUFFER     = ADFFMPEG_AD_ERROR
-39,
+
+    ADFFMPEG_AD_ERROR_INFO_NEW_PACKET                 = ADFFMPEG_AD_ERROR
-40,
+    ADFFMPEG_AD_ERROR_INFO_GET_BUFFER                 = ADFFMPEG_AD_ERROR
-41,
+
+    ADFFMPEG_AD_ERROR_LAYOUT_NEW_PACKET               = ADFFMPEG_AD_ERROR
-42,
+    ADFFMPEG_AD_ERROR_LAYOUT_GET_BUFFER               = ADFFMPEG_AD_ERROR
-43,
+
+    ADFFMPEG_AD_ERROR_DEFAULT                         = ADFFMPEG_AD_ERROR
-44,
+
+    ADFFMPEG_AD_ERROR_GET_STREAM                      = ADFFMPEG_AD_ERROR
-45,
+    ADFFMPEG_AD_ERROR_GET_AUDIO_STREAM                = ADFFMPEG_AD_ERROR
-46,
+    ADFFMPEG_AD_ERROR_GET_INFO_LAYOUT_STREAM          = ADFFMPEG_AD_ERROR
-47,
+    //ADFFMPEG_AD_ERROR_END_OF_STREAM                   =
ADFFMPEG_AD_ERROR -48,
+
+    //ADFFMPEG_AD_ERROR_FAILED_TO_PARSE_INFOLIST        =
ADFFMPEG_AD_ERROR -49,
+    ADFFMPEG_AD_ERROR_OVERLAY_GET_BUFFER              = ADFFMPEG_AD_ERROR
-50,
+    ADFFMPEG_AD_ERROR_OVERLAY_PBM_READ                = ADFFMPEG_AD_ERROR
-51,
+    ADFFMPEG_AD_ERROR_GET_OVERLAY_STREAM              = ADFFMPEG_AD_ERROR
-52,
+
+    ADFFMPEG_AD_ERROR_LAST                            = ADFFMPEG_AD_ERROR
-53,
+
+ ADH_GRAPH_RELOAD_NEEDED                           = 0x8001,
+
+#ifdef __midl
+} ADHErrorCode;
+#else
+};
+#endif
+
+#endif
diff --git a/libavformat/adjfif.c b/libavformat/adjfif.c
new file mode 100644
index 0000000000..50df8257fe
--- /dev/null
+++ b/libavformat/adjfif.c
@@ -0,0 +1,551 @@
+/*
+ * Helper functions for converting between raw JPEG data with
+ * NetVuImageData header and full JFIF image (and vice-versa)
+ *
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+/**
+ * @file
+ * Helper functions for converting between raw JPEG data with
+ * NetVuImageData header and full JFIF image (and vice-versa)
+ */
+
+#include <time.h>
+#include "internal.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avstring.h"
+
+#include "adjfif.h"
+#include "adpic.h"
+
+static int find_q(unsigned char *qy);
+static void parse_comment(char *text, int text_len, struct NetVuImageData
*pic,
+                          char **additionalText );
+static void calcQtabs(void);
+
+
+static const char comment_version[] = "Version: 00.02\r\n";
+static const char comment_version_0_1[] = "Version: 00.01\r\n";
+static const char camera_title[] = "Name: ";
+static const char camera_number[] = "Number: ";
+static const char image_date[] = "Date: ";
+static const char image_time[] = "Time: ";
+static const char image_ms[] = "MSec: ";
+static const char q_factor[] = "Q-Factor: ";
+static const char alarm_comment[] = "Alarm-text: ";
+static const char active_alarms[] = "Active-alarms: ";
+static const char active_detectors[] = "Active-detectors: ";
+static const char script_msg[] = "Comments: ";
+static const char time_zone[] = "Locale: ";
+static const char utc_offset[] = "UTCoffset: ";
+
+static const uint8_t jfif_header[] = {
+    0xFF, 0xD8,                     // SOI
+    0xFF, 0xE0,                     // APP0
+    0x00, 0x10,                     // APP0 header size (including
+                                    // this field, but excluding preceding)
+    0x4A, 0x46, 0x49, 0x46, 0x00,   // ID string 'JFIF\0'
+    0x01, 0x02,                     // version
+    0x02,                           // density units (0 - none, 1 - PPI, 2
- PPCM)
+    0x00, 0x19,                     // X density
+    0x00, 0x19,                     // Y density
+    0x00,                           // X thumbnail size
+    0x00,                           // Y thumbnail size
+};
+
+static const unsigned char sof_422_header[] = {
+    0xFF, 0xC0, 0x00, 0x11, 0x08, 0x01, 0x00, 0x01,
+    0x60, 0x03, 0x01, 0x21, 0x00, 0x02, 0x11, 0x01,
+    0x03, 0x11, 0x01
+};
+static const unsigned char sof_411_header[] = {
+    0xFF, 0xC0, 0x00, 0x11, 0x08, 0x01, 0x00, 0x01,
+    0x60, 0x03, 0x01, 0x22, 0x00, 0x02, 0x11, 0x01,
+    0x03, 0x11, 0x01
+};
+
+static const unsigned char huf_header[] = {
+    0xFF, 0xC4, 0x00, 0x1F, 0x00, 0x00, 0x01, 0x05,
+    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02,
+    0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
+    0x0B, 0xFF, 0xC4, 0x00, 0xB5, 0x10, 0x00, 0x02,
+    0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05,
+    0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02,
+    0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31,
+    0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71,
+    0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42,
+    0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, 0x33,
+    0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18,
+    0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A,
+    0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43,
+    0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53,
+    0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63,
+    0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73,
+    0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83,
+    0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92,
+    0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A,
+    0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9,
+    0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8,
+    0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+    0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
+    0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4,
+    0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2,
+    0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA,
+    0xFF, 0xC4, 0x00, 0x1F, 0x01, 0x00, 0x03, 0x01,
+    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02,
+    0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
+    0x0B, 0xFF, 0xC4, 0x00, 0xB5, 0x11, 0x00, 0x02,
+    0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05,
+    0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01,
+    0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06,
+    0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22,
+    0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1,
+    0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62,
+    0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25,
+    0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28,
+    0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A,
+    0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A,
+    0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A,
+    0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A,
+    0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
+    0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+    0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+    0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+    0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
+    0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5,
+    0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4,
+    0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3,
+    0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2,
+    0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA
+};
+
+static const unsigned char sos_header[] = {
+    0xFF, 0xDA, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02,
+    0x11, 0x03, 0x11, 0x00, 0x3F, 0x00
+};
+
+unsigned short Yvis[64] = {
+     16,  11,  12,  14,  12,  10,  16,  14,
+     13,  14,  18,  17,  16,  19,  24,  40,
+     26,  24,  22,  22,  24,  49,  35,  37,
+     29,  40,  58,  51,  61,  60,  57,  51,
+     56,  55,  64,  72,  92,  78,  64,  68,
+     87,  69,  55,  56,  80, 109,  81,  87,
+     95,  98, 103, 104, 103,  62,  77, 113,
+    121, 112, 100, 120,  92, 101, 103,  99
+};
+
+unsigned short UVvis[64] = {
+    17, 18, 18, 24, 21, 24, 47, 26,
+    26, 47, 99, 66, 56, 66, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99
+};
+
+unsigned char YQuantizationFactors[256][64],
UVQuantizationFactors[256][64];
+
+static int q_init;
+
+
+/**
+ * Build the correct JFIF headers & tables for the supplied image data
+ *
+ * \param jfif Pointer to output buffer
+ * \param pic  Pointer to NetVuImageData - includes Q factors, image size,
+ *             mode etc.
+ * \param max  Maximum size of header
+ * \return Total bytes in the JFIF image
+ */
+unsigned int build_jpeg_header(void *jfif, struct NetVuImageData *pic,
unsigned int max)
+{
+    volatile unsigned int count;
+    unsigned short    us1;
+    char  *bufptr = jfif;
+    char sof_copy[sizeof(sof_422_header)];
+
+
+    if (!q_init) {
+        calcQtabs();
+        q_init = 1;
+    }
+
+    // Add all the fixed length headers prior to building comment field
+    count = sizeof(jfif_header);
+    if (count > max)
+        return 0;
+
+    memcpy(bufptr, jfif_header, sizeof(jfif_header));
+
+    if( (pic->format.target_pixels > 360) && (pic->format.target_lines <
480) )
+        bufptr[17] = 0x32;
+
+    bufptr += sizeof(jfif_header);
+
+    // Q tables and markers
+    count += 138;
+    if (count > max)
+        return 0;
+    *bufptr++ = 0xff;
+    *bufptr++ = 0xdb;
+    *bufptr++ = 0x00;
+    *bufptr++ = 0x43;
+    *bufptr++ = 0x00;
+    for (us1 = 0; us1 < 64; us1++)
+        *bufptr++ = YQuantizationFactors[pic->factor][us1];
+    *bufptr++ = 0xff;
+    *bufptr++ = 0xdb;
+    *bufptr++ = 0x00;
+    *bufptr++ = 0x43;
+    *bufptr++ = 0x01;
+    for (us1 = 0; us1 < 64; us1++)
+        *bufptr++ = UVQuantizationFactors[pic->factor][us1];
+    count += (sizeof(sof_copy) + sizeof(huf_header) + sizeof(sos_header));
+    if (count > max)
+        return 0;
+    else {
+        unsigned short targ;
+        char *ptr1, *ptr2;
+        memcpy(sof_copy, (pic->vid_format == PIC_MODE_JPEG_411) ?
sof_411_header : sof_422_header, sizeof( sof_copy ) );
+        targ = AV_RB16(&pic->format.target_pixels); // Byte swap from
native to big-endian
+        ptr1 = (char *)&targ;
+        ptr2 = &sof_copy[7];
+        *ptr2++ = *ptr1++;
+        *ptr2 = *ptr1;
+
+        targ = AV_RB16(&pic->format.target_lines); // Byte swap from
native to big-endian
+        ptr1 = (char *)&targ;
+        ptr2 = &sof_copy[5];
+        *ptr2++ = *ptr1++;
+        *ptr2 = *ptr1;
+
+        memcpy(bufptr, sof_copy, sizeof(sof_copy));
+        bufptr += sizeof(sof_copy);
+
+        memcpy(bufptr, huf_header, sizeof(huf_header));
+        bufptr += sizeof(huf_header);
+
+        memcpy(bufptr, sos_header, sizeof(sos_header));
+        bufptr += sizeof(sos_header);
+    }
+
+    return bufptr - (char*)jfif;
+}
+
+/**
+ * Calculate the Q tables
+ *
+ * Updates the module Q factor tables
+ */
+static void calcQtabs(void)
+{
+    short i;
+    int uvfactor;
+    short factor;
+    short yval, uvval;
+    for (factor = 1; factor < 256; factor++ ) {
+        uvfactor = factor * 1;
+        if (uvfactor > 255)
+            uvfactor = 255;
+        for (i = 0; i < 64; i++) {
+            yval  = (short)(((Yvis[i] * factor) + 25) / 50);
+            uvval = (short)(((UVvis[i] * uvfactor) + 25) / 50);
+
+            if ( yval < 1 )
+                yval = 1;    // The DC and AC values cannot be
+            if ( uvval < 1)  //
+                uvval = 1;   // less than 1
+
+            if ( yval > 255 )
+                yval = 255;    // The DC and AC values cannot
+            if ( uvval > 255)  //
+                uvval = 255;   // be more than 255
+
+            YQuantizationFactors[factor][i]  = (uint8_t)yval;
+            UVQuantizationFactors[factor][i] = (uint8_t)uvval;
+        }
+    }
+}
+
+static int find_q(unsigned char *qy)
+{
+    int factor, smallest_err = 0, best_factor = 0;
+    unsigned char *q1, *q2, *qe;
+    unsigned char qtest[64];
+    int err_diff;
+
+    if (!q_init) {
+        calcQtabs();
+        q_init = 1;
+    }
+
+    memcpy(&qtest[0], qy, 64); // PRC 025
+
+    qe = &qtest[32];
+
+    for (factor = 1; factor < 256; factor++ ) {
+        q1 = &qtest[0];
+        q2 = &YQuantizationFactors[factor][0];
+        err_diff = 0;
+        while (q1 < qe) {
+            if (*q1 > *q2)
+                err_diff = *q1 - *q2;
+            else
+                err_diff = *q2 - *q1;
+
+            q1++;
+            q2++;
+        }
+        if (err_diff == 0) {
+            best_factor = factor;
+            smallest_err = 0;
+            break;
+        }
+        else if ( err_diff < smallest_err || best_factor == 0 ) {
+            best_factor = factor;
+            smallest_err = err_diff;
+        }
+    }
+    if (factor == 256) {
+        factor = best_factor;
+        av_log(NULL, AV_LOG_ERROR, "find_q: Unable to match Q setting
%d\n", best_factor );
+    }
+    return factor;
+}
+
+
+/**
+ * Analyses a JFIF header and fills out a NetVuImageData structure with
the info
+ *
+ * \param data        Input buffer
+ * \param pic         NetVuImageData structure
+ * \param imgSize     Total length of input buffer
+ * \param text        Buffer in which is placed text from the JFIF comment
+ *                    that doesn't have a specific field in NetVuImageData
+ * \return Length of JFIF header in bytes
+ */
+int parse_jfif(AVFormatContext *s, unsigned char *data, struct
NetVuImageData *pic,
+               int imgSize, char **text)
+{
+    int i, sos = FALSE;
+    unsigned short length, marker;
+    uint16_t xdensity = 0;
+    uint16_t ydensity = 0;
+    uint8_t *densityPtr = NULL;
+    unsigned int jfifMarker;
+
+    //av_log(s, AV_LOG_DEBUG, "parse_jfif: leading bytes 0x%02X, 0x%02X, "
+    //       "length=%d\n", data[0], data[1], imgSize);
+    memset(pic, 0, sizeof(*pic));
+    pic->version = PIC_VERSION;
+    pic->factor = -1;
+    pic->start_offset = 0;
+    i = 0;
+
+    if(data[i] != 0xff && data[i+1] != 0xd8) {
+        //there is a header so skip it
+        while( ((unsigned char)data[i] != 0xff) && (i < imgSize) )
+            i++;
+        //if ( i > 0 )
+        //    av_log(s, AV_LOG_DEBUG, "parse_jfif: %d leading bytes\n", i);
+
+        i++;
+        if ( (unsigned char) data[i] != 0xd8) {
+            //av_log(s, AV_LOG_ERROR, "parse_jfif: incorrect SOI
0xff%02x\n", data[i]);
+            return -1;
+        }
+        i++;
+    }
+    else {
+        //no header
+        i += 2;
+    }
+
+    while ( !sos && (i < imgSize) ) {
+        if (data[i] != 0xff)
+            continue;
+        marker = AV_RB16(&data[i]);
+        i += 2;
+        length = AV_RB16(&data[i]) - 2;
+        i += 2;
+
+        switch (marker) {
+            case 0xffe0 :    // APP0
+                jfifMarker = AV_RB32(&data[i]);
+                if ( (jfifMarker==0x4A464946) && (data[i+4]==0x00) )  {
+                    xdensity = AV_RB16(&data[i+8]);
+                    ydensity = AV_RB16(&data[i+10]);
+                    densityPtr = &data[i+8];
+                }
+                break;
+            case 0xffdb :    // Q table
+                if (!data[i])
+                    pic->factor =  find_q(&data[i+1]);
+                break;
+            case 0xffc0 :    // SOF
+                pic->format.target_lines  = AV_RB16(&data[i+1]);
+                pic->format.target_pixels = AV_RB16(&data[i+3]);
+                if (data[i+7] == 0x22)
+                    pic->vid_format = PIC_MODE_JPEG_411;
+                else if (data[i+7] == 0x21)
+                    pic->vid_format = PIC_MODE_JPEG_422;
+                else
+                    av_log(s, AV_LOG_WARNING, "%s: Unknown SOF format byte
0x%02X\n", __func__, data[i+7]);
+                break;
+            case 0xffda :    // SOS
+                sos = TRUE;
+                break;
+            case 0xfffe :    // Comment
+                parse_comment((char *)&data[i], length - 2, pic, text);
+                break;
+            default :
+                break;
+        }
+        i += length;
+
+        if ( densityPtr && (pic->format.target_lines > 0) && (xdensity ==
(ydensity*2)) )  {
+            if( (pic->format.target_pixels > 360) &&
(pic->format.target_lines < 480) )  {
+                // Server is sending wrong pixel aspect ratio, reverse it
+                //av_log(s, AV_LOG_DEBUG, "%s: Server is sending wrong
pixel "
+                //                        "aspect ratio. Old = %d:%d, New
= %d:%d"
+                //                        " Res = %dx%d\n",
+                //       __func__, xdensity, ydensity, ydensity, xdensity,
+                //       pic->format.target_pixels,
pic->format.target_lines);
+                AV_WB16(densityPtr, ydensity);
+                AV_WB16(densityPtr + 2, xdensity);
+            }
+            else  {
+                // Server is sending wrong pixel aspect ratio, set it to
1:1
+                AV_WB16(densityPtr, ydensity);
+            }
+            xdensity = AV_RB16(densityPtr);
+            ydensity = AV_RB16(densityPtr+2);
+        }
+    }
+    pic->size = imgSize - i - 2;     // 2 bytes for FFD9
+    return i;
+}
+
+static void parse_comment( char *text, int text_len, struct NetVuImageData
*pic, char **additionalText )
+{
+    char            result[512];
+    int             i = 0;
+    int             j = 0;
+    struct tm       t;
+    time_t          when = 1000000000;
+
+    // Calculate timezone offset of client, needed because mktime uses
local
+    // time and there is no ISO C equivalent for GMT.
+    struct tm       utc = *gmtime(&when);
+    struct tm       lcl = *localtime(&when);
+    int             delta_h = mktime(&utc) - mktime(&lcl);
+
+    memset(&t, 0, sizeof(t));
+
+    while( 1 ) {
+        j = 0;
+
+        // Check we haven't covered all the buffer already
+        if( i >= text_len || text[i] <= 0 )
+            break;
+
+        // Get the next line from the text block
+        while (text[i] && text[i] != '\n' && (i < text_len))  {
+            result[j++] = text[i++];
+            if ( j >= sizeof(result) )  {
+                --j;
+                break;
+            }
+        }
+        result[j] = '\0';
+
+        // Skip the \n
+        if( text[i] == '\n' ) {
+            if (j > 0)
+                result[j-1] = '\0';
+            else
+                result[0] = '\0';
+            i++;
+        }
+
+        // Changed this line so it doesn't include the \r\n from the end
of comment_version
+        if( !memcmp( result, comment_version, strlen(comment_version) - 2
) )
+            pic->version = 0xdecade11;
+        else if ( !memcmp( result, comment_version,
strlen(comment_version_0_1) - 2 ) )
+            pic->version = 0xdecade10;
+        else if( !memcmp( result, camera_title, strlen(camera_title) ) )  {
+            av_strlcpy( pic->title, &result[strlen(camera_title)],
sizeof(pic->title) );
+        }
+        else if( !memcmp( result, camera_number, strlen(camera_number) ) )
+            sscanf( &result[strlen(camera_number)], "%d", &pic->cam );
+        else if( !memcmp( result, image_date, strlen(image_date) ) ) {
+            sscanf( &result[strlen(image_date)], "%d/%d/%d", &t.tm_mday,
&t.tm_mon, &t.tm_year );
+            t.tm_year -= 1900;
+            t.tm_mon--;
+        }
+        else if( !memcmp( result, image_time, strlen(image_time) ) )
+            sscanf( &result[strlen(image_time)], "%d:%d:%d", &t.tm_hour,
&t.tm_min, &t.tm_sec );
+        else if( !memcmp( result, image_ms, strlen(image_ms) ) )
+            sscanf( &result[strlen(image_ms)], "%d", &pic->milliseconds );
+        else if( !memcmp( result, q_factor, strlen(q_factor) ) )
+            sscanf( &result[strlen(q_factor)], "%d", &pic->factor );
+        else if( !memcmp( result, alarm_comment, strlen(alarm_comment) ) )
+            av_strlcpy( pic->alarm, &result[strlen(alarm_comment)],
sizeof(pic->alarm) );
+        else if( !memcmp( result, active_alarms, strlen(active_alarms) ) )
+            sscanf( &result[strlen(active_alarms)], "%X",
&pic->alm_bitmask );
+        else if( !memcmp( result, active_detectors,
strlen(active_detectors) ) )
+            sscanf( &result[strlen(active_detectors)], "%X",
&pic->alm_bitmask_hi );
+        else if( !memcmp( result, script_msg, strlen(script_msg) ) ) {
+        }
+        else if( !memcmp( result, time_zone, strlen(time_zone) ) )
+            av_strlcpy( pic->locale, &result[strlen(time_zone)],
sizeof(pic->locale) );
+        else if( !memcmp( result, utc_offset, strlen(utc_offset) ) )
+            sscanf( &result[strlen(utc_offset)], "%d", &pic->utc_offset );
+        else {
+            // Any line we don't explicitly detect for extraction to the
pic
+            // struct, we must add to the additional text block
+            if ( (additionalText != NULL) && (strlen(result) > 0) )  {
+                int             strLen  = 0;
+                const char      lineEnd[3] = { '\r', '\n', '\0' };
+
+                // Get the length of the existing text block if it exists
+                if( *additionalText != NULL )
+                    strLen = strlen( *additionalText );
+
+                // Ok, now allocate some space to hold the new string
+                *additionalText = av_realloc( *additionalText, strLen +
strlen(result) + 3 );
+
+                // Copy the line into the text block
+                memcpy( &(*additionalText)[strLen], result, strlen(result)
);
+
+                // Add a \r\n and NULL termination
+                memcpy( &(*additionalText)[strLen + strlen(result)],
lineEnd, 3 );
+            }
+        }
+    }
+
+    pic->session_time = mktime(&t) - delta_h;
+}
diff --git a/libavformat/adjfif.h b/libavformat/adjfif.h
new file mode 100644
index 0000000000..b59c5823c7
--- /dev/null
+++ b/libavformat/adjfif.h
@@ -0,0 +1,41 @@
+/*
+ * Helper functions for converting between raw JPEG data with
+ * NetVuImageData header and full JFIF image (and vice-versa)
+ *
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+/**
+ * @file
+ * Helper functions for converting between raw JPEG data with
+ * NetVuImageData header and full JFIF image (and vice-versa)
+ */
+
+#ifndef AVFORMAT_ADJFIF_H
+#define AVFORMAT_ADJFIF_H
+
+#include "ds_exports.h"
+
+extern unsigned int build_jpeg_header(void *jfif, struct NetVuImageData
*pic,
+                                      unsigned int max);
+extern int parse_jfif(AVFormatContext *s, unsigned char *data,
+                      struct NetVuImageData *pic, int imgSize, char
**text);
+
+#endif
diff --git a/libavformat/admime.c b/libavformat/admime.c
new file mode 100644
index 0000000000..47d9ece2e4
--- /dev/null
+++ b/libavformat/admime.c
@@ -0,0 +1,822 @@
+/*
+ * AD-Holdings demuxer for AD stream format (binary)
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+/**
+ * @file AD-Holdings demuxer for AD stream format (binary)
+ */
+
+#include <strings.h>
+
+#include "avformat.h"
+#include "adffmpeg_errors.h"
+#include "adpic.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avstring.h"
+
+
+#define TEMP_BUFFER_SIZE        1024
+#define MAX_IMAGE_SIZE          (256 * 1024)
+
+// This value is only used internally within the library DATA_PLAINTEXT
blocks
+// should not be exposed to the client
+#define DATA_PLAINTEXT          (AD_DATATYPE_MAX + 1)
+
+
+static const char *     BOUNDARY_PREFIX1 = "--0plm(";
+static const char *     BOUNDARY_PREFIX2 = "Í -0plm(";
+static const char *     BOUNDARY_PREFIX3 = "ÍÍ 0plm(";
+static const char *     BOUNDARY_PREFIX4 = "ÍÍÍ plm(";
+static const char *     BOUNDARY_PREFIX5 = "ÍÍÍÍ lm(";
+static const char *     BOUNDARY_PREFIX6 = "ÍÍÍÍÍ m(";
+static const char *     BOUNDARY_PREFIX7 = "/r--0plm(";
+static const char *     BOUNDARY_SUFFIX  =
":Server-Push:Boundary-String)1qaz";
+
+static const char *     MIME_TYPE_JPEG   = "image/jpeg";
+static const char *     MIME_TYPE_MP4    = "image/admp4";
+static const char *     MIME_TYPE_TEXT   = "text/plain";
+static const char *     MIME_TYPE_XML    = "text/xml";
+static const char *     MIME_TYPE_ADPCM  = "audio/adpcm";
+static const char *     MIME_TYPE_LAYOUT = "data/layout";
+static const char *     MIME_TYPE_PBM    = "image/pbm";
+static const char *     MIME_TYPE_H264   = "image/adh264";
+
+static const uint8_t rawJfifHeader[] = { 0xff, 0xd8, 0xff, 0xe0,
+                                         0x00, 0x10, 0x4a, 0x46,
+                                         0x49, 0x46, 0x00, 0x01 };
+
+
+
+/**
+ * Validates a multipart MIME boundary separator against the convention
used by
+ * NetVu video servers
+ *
+ * \param buf Buffer containing the boundary separator
+ * \param bufLen Size of the buffer
+ * \return 1 if boundary separator is valid, 0 if not
+ */
+static int is_valid_separator( unsigned char * buf, int bufLen )
+{
+    if (buf == NULL )
+        return FALSE;
+
+    if (bufLen < strlen(BOUNDARY_PREFIX1) + strlen(BOUNDARY_SUFFIX) )
+        return FALSE;
+
+    if ((strncmp(buf, BOUNDARY_PREFIX1, strlen(BOUNDARY_PREFIX1)) == 0) ||
+        (strncmp(buf, BOUNDARY_PREFIX2, strlen(BOUNDARY_PREFIX2)) == 0) ||
+        (strncmp(buf, BOUNDARY_PREFIX3, strlen(BOUNDARY_PREFIX3)) == 0) ||
+        (strncmp(buf, BOUNDARY_PREFIX4, strlen(BOUNDARY_PREFIX4)) == 0) ||
+        (strncmp(buf, BOUNDARY_PREFIX5, strlen(BOUNDARY_PREFIX5)) == 0) ||
+        (strncmp(buf, BOUNDARY_PREFIX6, strlen(BOUNDARY_PREFIX6)) == 0) ||
+        (strncmp(buf, BOUNDARY_PREFIX7, strlen(BOUNDARY_PREFIX7)) == 0)
 ) {
+        unsigned char *     b = &buf[strlen(BOUNDARY_PREFIX1)];
+
+        // Now we have a server type string. We must skip past this
+        while( !av_isspace(*b) && *b != ':' && (b - buf) < bufLen ) {
+            b++;
+        }
+
+        if (*b == ':' ) {
+            if ((b - buf) + strlen(BOUNDARY_SUFFIX)  <= bufLen ) {
+                if (strncmp(b, BOUNDARY_SUFFIX, strlen(BOUNDARY_SUFFIX))
== 0 )
+                    return TRUE;
+            }
+        }
+    }
+
+    return FALSE;
+}
+
+
+/**
+ * Parse a line of MIME data
+ */
+static int process_line(char *line, int *line_count, int *dataType,
+                        int *size, long *extra )
+{
+    char *tag, *p = NULL;
+    int         http_code = 0;
+
+    // end of header
+    if (line[0] == '\0')
+        return 0;
+
+    p = line;
+
+    // The boundry string is missing sometimes so check for the HTTP header
+    // and skip to line 1
+    if(*line_count == 0 && 'H' == *(p) && 'T' == *(p + 1) && 'T' == *(p +
2) && 'P' == *(p + 3))
+        *line_count = 1;
+
+    // The first valid line will be the boundary string - validate this
here
+    if (*line_count == 0 ) {
+        if (is_valid_separator( p, strlen(p) ) == FALSE )
+            return -1;
+    }
+    else if (*line_count == 1 ) { // Second line will contain the HTTP
status code
+        while (!av_isspace(*p) && *p != '\0')
+            p++;
+        while (av_isspace(*p))
+            p++;
+        http_code = strtol(p, NULL, 10);
+    }
+    else {
+        // Any other line we are just looking for particular headers
+        // if we find them, we fill in the appropriate output data
+        while (*p != '\0' && *p != ':')
+            p++;
+        if (*p != ':')
+            return 1;
+
+        *p = '\0';
+        tag = line;
+        p++;
+        while (av_isspace(*p))
+            p++;
+
+        if (!strcmp(tag, "Content-length")) {
+            *size = strtol(p, NULL, 10);
+
+            if (size == 0 )
+                return -1;
+        }
+
+        if (!strcmp(tag, "Content-type")) {
+            // Work out what type we actually have
+            if (av_strcasecmp(p, MIME_TYPE_JPEG ) == 0 )
+                *dataType = AD_DATATYPE_JFIF;
+            // Or if it starts image/mp4 - this covers all the supported
mp4
+            // variations (i and p frames)
+            else if(av_strncasecmp(p, MIME_TYPE_MP4, strlen(MIME_TYPE_MP4)
) == 0) {
+                // P for now - as they are both processed the same
subsequently
+                *dataType = AD_DATATYPE_MPEG4P;
+            }
+            else if (av_strcasecmp(p, MIME_TYPE_TEXT ) == 0 )
+                *dataType = DATA_PLAINTEXT;
+            else if (av_strcasecmp(p, MIME_TYPE_LAYOUT ) == 0 )
+                *dataType = AD_DATATYPE_LAYOUT;
+            else if (av_strcasecmp(p, MIME_TYPE_XML ) == 0 )
+                *dataType = AD_DATATYPE_XML_INFO;
+            else if(av_strncasecmp(p, MIME_TYPE_ADPCM,
strlen(MIME_TYPE_ADPCM)) == 0) {
+                *dataType = AD_DATATYPE_AUDIO_ADPCM;
+
+                // If we find audio in a mime header, we need to extract
the
+                // mode out. The header takes the form,
+                // Content-Type: audio/adpcm;rate=<mode>
+                while (*p != '\0' && *p != ';')
+                    p++;
+
+                if (*p != ';' )
+                    return 1;
+
+                p++;
+                while (av_isspace(*p))
+                    p++;
+
+                // p now pointing at the rate. Look for the first '='
+                while (*p != '\0' && *p != '=')
+                    p++;
+                if (*p != '=')
+                    return 1;
+
+                p++;
+
+                tag = p;
+
+                while( *p != '\0' && !av_isspace(*p) )
+                    p++;
+
+                if (*p != '\0' )
+                    *p = '\0';
+
+                *extra = strtol(tag, NULL, 10);
+
+                // Map the rate to a RTP payload value for consistancy -
+                // the other audio headers contain mode values in this
format
+                if (*extra == 8000 )
+                    *extra = RTP_PAYLOAD_TYPE_8000HZ_ADPCM;
+                else if (*extra == 11025 )
+                    *extra = RTP_PAYLOAD_TYPE_11025HZ_ADPCM;
+                else if (*extra == 16000 )
+                    *extra = RTP_PAYLOAD_TYPE_16000HZ_ADPCM;
+                else if (*extra == 22050 )
+                    *extra = RTP_PAYLOAD_TYPE_22050HZ_ADPCM;
+                else if (*extra == 32000 )
+                    *extra = RTP_PAYLOAD_TYPE_32000HZ_ADPCM;
+                else if (*extra == 44100 )
+                    *extra = RTP_PAYLOAD_TYPE_44100HZ_ADPCM;
+                else if (*extra == 48000 )
+                    *extra = RTP_PAYLOAD_TYPE_48000HZ_ADPCM;
+                else
+                    *extra = RTP_PAYLOAD_TYPE_8000HZ_ADPCM; // Default
+            }
+            else if (av_strcasecmp(p, MIME_TYPE_PBM ) == 0 )  {
+                *dataType = AD_DATATYPE_PBM;
+            }
+            else if(av_strncasecmp(p, MIME_TYPE_H264,
strlen(MIME_TYPE_H264) ) == 0) {
+                // P for now - as they are both processed the same
subsequently
+                *dataType = AD_DATATYPE_H264P;
+            }
+            else  {
+                *dataType = AD_DATATYPE_MAX;
+            }
+        }
+    }
+    return 1;
+}
+
+/**
+ * Read and process MIME header information
+ *
+ * \return Zero on successful decode, -2 if a raw JPEG, anything else
indicates
+ *         failure
+ */
+static int parse_mime_header(AVIOContext *pb, uint8_t *buffer, int
*bufSize,
+                             int *dataType, int *size, long *extra)
+{
+    unsigned char ch, *q = NULL;
+    int           err, lineCount = 0;
+    const int     maxBufSize = *bufSize;
+
+    *bufSize = 0;
+
+    // Check for JPEG header first
+    do {
+        if (avio_read(pb, &ch, 1) < 0)
+            break;
+        if (buffer && (ch == rawJfifHeader[*bufSize]))  {
+            buffer[*bufSize] = ch;
+            ++(*bufSize);
+            if ((*bufSize) == sizeof(rawJfifHeader))
+                return -2;
+        }
+        else
+            break;
+    } while( (*bufSize) < sizeof(rawJfifHeader));
+
+    q = buffer + *bufSize;
+    // Try and parse the header
+    for(;;) {
+        if (ch == '\n') {
+            // process line
+            if (q > buffer && q[-1] == '\r')
+                q--;
+            *q = '\0';
+
+            err = process_line( buffer, &lineCount, dataType, size, extra
);
+            // First line contains a \n
+            if (!(err == 0 && lineCount == 0) ) {
+                if (err < 0 )
+                    return err;
+
+                if (err == 0 )
+                    return 0;
+                lineCount++;
+            }
+
+            q = buffer;
+        }
+        else {
+            if ((q - buffer) < (maxBufSize - 1))
+                *q++ = ch;
+            else
+                return ADFFMPEG_AD_ERROR_PARSE_MIME_HEADER;
+        }
+
+        err = avio_read(pb, &ch, 1);
+        if (err < 0)  {
+            if (pb->eof_reached)
+                return err;
+            else
+                return ADFFMPEG_AD_ERROR_PARSE_MIME_HEADER;
+        }
+    }
+
+    return ADFFMPEG_AD_ERROR_PARSE_MIME_HEADER;
+}
+
+
+/**
+ * Parse a line of MIME data for MPEG video frames
+ */
+static int process_mp4data_line( char *line, int line_count,
+                                 struct NetVuImageData *vidDat, struct tm
*tim,
+                                 char ** txtDat )
+{
+    static const int titleLen = sizeof(vidDat->title) /
sizeof(vidDat->title[0]);
+    char        *tag = NULL, *p = NULL;
+    int         lineLen = 0;
+
+    // end of header
+    if (line[0] == '\0')
+        return 0;
+
+    p = line;
+
+
+    while (*p != '\0' && *p != ':')
+        p++;
+    if (*p != ':')
+        return 1;
+
+    tag = line;
+    p++;
+
+    while( *p != '\0' && *p == ' '  )  // While the current char is a space
+        p++;
+
+    if (*p == '\0')
+        return 1;
+    else {
+        char * temp = p;
+
+        // Get the length of the rest of the line
+        while( *temp != '\0' )
+            temp++;
+
+        lineLen = temp - line;
+    }
+
+    if (!memcmp( tag, "Number", strlen( "Number" ) ) )
+        vidDat->cam = strtol(p, NULL, 10);
+    else if (!memcmp( tag, "Name", strlen( "Name" ) ) )
+        memcpy( vidDat->title, p, FFMIN( titleLen, strlen(p) ) );
+    else if (!memcmp( tag, "Version", strlen( "Version" ) ) )  {
+        int verNum = strtod(p, NULL) * 100.0 - 1;
+        vidDat->version = 0xDECADE10 + verNum;
+    }
+    else if (!memcmp( tag, "Date", strlen( "Date" ) ) ) {
+        sscanf( p, "%d/%d/%d", &tim->tm_mday, &tim->tm_mon, &tim->tm_year
);
+        tim->tm_year -= 1900;
+        tim->tm_mon--;
+
+        if ((tim->tm_sec != 0) || (tim->tm_min != 0) || (tim->tm_hour !=
0))
+            vidDat->session_time = mktime(tim);
+    }
+    else if (!memcmp( tag, "Time", strlen( "Time" ) ) ) {
+        sscanf( p, "%d:%d:%d", &tim->tm_hour, &tim->tm_min, &tim->tm_sec );
+
+        if (tim->tm_year != 0)
+            vidDat->session_time = mktime(tim);
+    }
+    else if (!memcmp( tag, "MSec", strlen( "MSec" ) ) )
+        vidDat->milliseconds = strtol(p, NULL, 10);
+    else if (!memcmp( tag, "Locale", strlen( "Locale" ) ) )
+        memcpy( vidDat->locale, p, FFMIN( titleLen, strlen(p) ) );
+    else if (!memcmp( tag, "UTCoffset", strlen( "UTCoffset" ) ) )
+        vidDat->utc_offset = strtol(p, NULL, 10);
+    else {
+        // Any lines that aren't part of the pic struct,
+        // tag onto the additional text block
+        // \todo Parse out some of these and put them into metadata
+        if (txtDat != NULL && lineLen > 0 ) {
+#define LINE_END_LEN        3
+            int             strLen  = 0;
+            const char      lineEnd[LINE_END_LEN] = { '\r', '\n', '\0' };
+
+            // Get the length of the existing text block if it exists
+            if (*txtDat != NULL )
+                strLen = strlen( *txtDat );
+
+            // Ok, now allocate some space to hold the new string
+            *txtDat = av_realloc(*txtDat, strLen + lineLen + LINE_END_LEN);
+
+            // Copy the line into the text block
+            memcpy( &(*txtDat)[strLen], line, lineLen );
+
+            // Add a NULL terminator
+            memcpy( &(*txtDat)[strLen + lineLen], lineEnd, LINE_END_LEN );
+        }
+    }
+
+    return 1;
+}
+
+/**
+ * Parse MIME data that is sent after each MPEG video frame
+ */
+static int parse_mp4_text_data( unsigned char *mp4TextData, int bufferSize,
+                                struct NetVuImageData *vidDat, char
**txtDat )
+{
+    unsigned char               buffer[TEMP_BUFFER_SIZE];
+    int                         ch, err;
+    unsigned char *             q = NULL;
+    int                         lineCount = 0;
+    unsigned char *             currentChar = mp4TextData;
+    struct tm                   tim;
+
+    memset( &tim, 0, sizeof(struct tm) );
+
+    // Try and parse the header
+    q = buffer;
+    for(;;) {
+        ch = *currentChar++;
+
+        if (ch < 0)
+            return 1;
+
+        if (ch == '\n') {
+            // process line
+            if (q > buffer && q[-1] == '\r')
+                q--;
+            *q = '\0';
+
+            err = process_mp4data_line(buffer, lineCount, vidDat, &tim,
txtDat);
+
+            if (err < 0 )
+                return err;
+
+            if (err == 0 )
+                return 0;
+
+            // Check we're not at the end of the buffer. If the following
+            // statement is true and we haven't encountered an error then
we've
+            // finished parsing the buffer
+            if (err == 1 ) {
+                // Not particularly happy with this code but it seems
there's
+                // little consistency in the way these buffers end. This
block
+                // catches all of those variations. The variations that
indicate
+                // the end of a MP4 MIME text block are:
+                //
+                // 1. The amount of buffer parsed successfully is equal to
the
+                //    total buffer size
+                //
+                // 2. The buffer ends with two NULL characters
+                //
+                // 3. The buffer ends with the sequence \r\n\0
+
+                if (currentChar - mp4TextData == bufferSize )
+                    return 0;
+
+                // CS - I *think* lines should end either when we've
processed
+                // all the buffer OR it's padded with 0s
+                // Is detection of a NULL character here sufficient?
+                if (*currentChar == '\0' )
+                    return 0;
+            }
+
+            lineCount++;
+            q = buffer;
+        }
+        else {
+            if ((q - buffer) < sizeof(buffer) - 1)
+                *q++ = ch;
+        }
+    }
+
+    return 1;
+}
+
+/**
+ * MPEG4 or H264 video frame with a MIME trailer
+ */
+static int admime_mpeg(AVFormatContext *s,
+                       AVPacket *pkt, int size, long *extra,
+                       struct NetVuImageData *vidDat, char **txtDat,
+                       int adDataType)
+{
+    AVIOContext *pb = s->pb;
+    AdContext* adContext = s->priv_data;
+    int errorVal = 0;
+    int mimeBlockType = 0;
+    uint8_t buf[TEMP_BUFFER_SIZE];
+    int bufSize = TEMP_BUFFER_SIZE;
+
+    // Fields are set manually from MIME data with these types so need
+    // to set everything to zero initially in case some values aren't
+    // available
+    memset(vidDat, 0, sizeof(struct NetVuImageData));
+
+    // Allocate a new packet to hold the frame's image data
+    if (ad_new_packet(pkt, size) < 0 )
+        return ADFFMPEG_AD_ERROR_MPEG4_MIME_NEW_PACKET;
+
+    // Now read the frame data into the packet
+    if (avio_read( pb, pkt->data, size ) != size )
+        return ADFFMPEG_AD_ERROR_MPEG4_MIME_GET_BUFFER;
+
+    if (adContext->streamDatatype == 0)  {
+        if (adDataType == AD_DATATYPE_H264I)
+            adContext->streamDatatype = PIC_MODE_H264I;
+        else if (adDataType == AD_DATATYPE_H264P)
+            adContext->streamDatatype = PIC_MODE_H264P;
+        else
+            adContext->streamDatatype = mpegOrH264(AV_RB32(pkt->data));
+    }
+    vidDat->vid_format = adContext->streamDatatype;
+
+    // Now we should have a text block following this which contains the
+    // frame data that we can place in a _image_data struct
+    if (parse_mime_header(pb, buf, &bufSize, &mimeBlockType, &size, extra
) != 0)
+        return ADFFMPEG_AD_ERROR_MPEG4_MIME_PARSE_HEADER;
+
+    // Validate the data type and then extract the text buffer
+    if (mimeBlockType == DATA_PLAINTEXT ) {
+        unsigned char *textBuffer = av_malloc( size );
+
+        if (textBuffer != NULL ) {
+            if (avio_read( pb, textBuffer, size ) == size ) {
+                // Now parse the text buffer and populate the
+                // _image_data struct
+                if (parse_mp4_text_data(textBuffer, size, vidDat, txtDat )
!= 0) {
+                    av_free( textBuffer );
+                    return ADFFMPEG_AD_ERROR_MPEG4_MIME_PARSE_TEXT_DATA;
+                }
+            }
+            else {
+                av_free( textBuffer );
+                return ADFFMPEG_AD_ERROR_MPEG4_MIME_GET_TEXT_BUFFER;
+            }
+
+            av_free( textBuffer );
+        }
+        else {
+            return AVERROR(ENOMEM);
+        }
+    }
+    return errorVal;
+}
+
+
+/**
+ * Audio frame
+ */
+static int ad_read_audio(AVFormatContext *s,
+                         AVPacket *pkt, int size, long extra,
+                         struct NetVuAudioData *audDat,
+                         enum AVCodecID codec_id)
+{
+    AVIOContext *pb = s->pb;
+
+    // No presentation information is sent with audio frames in a mime
+    // stream so there's not a lot we can do here other than ensure the
+    // struct contains the size of the audio data
+    audDat->sizeOfAudioData = size;
+    audDat->mode = extra;
+    audDat->seconds = 0;
+    audDat->msecs = 0;
+    audDat->channel = 0;
+    audDat->sizeOfAdditionalData = 0;
+    audDat->additionalData = NULL;
+
+    if (ad_new_packet(pkt, size) < 0)
+        return ADFFMPEG_AD_ERROR_AUDIO_ADPCM_MIME_NEW_PACKET;
+
+    // Now get the actual audio data
+    if (avio_read( pb, pkt->data, size) != size)
+        return ADFFMPEG_AD_ERROR_AUDIO_ADPCM_MIME_GET_BUFFER;
+
+    if (codec_id == AV_CODEC_ID_ADPCM_IMA_WAV)
+        audiodata_network2host(pkt->data, pkt->data, size);
+
+    return 0;
+}
+
+
+/**
+ * Invalid mime header.
+ *
+ * However sometimes the header is missing and then there is a valid image
so
+ * try and parse a frame out anyway.
+ */
+static int handleInvalidMime(AVFormatContext *s,
+                             uint8_t *preRead, int preReadSize, AVPacket
*pkt,
+                             int *data_type, int *size, int *imgLoaded)
+{
+    AVIOContext *pb = s->pb;
+    int errorVal = 0;
+    unsigned char chkByte;
+    int status, read, found = FALSE;
+    uint8_t imageData[MAX_IMAGE_SIZE];
+
+    for(read = 0; read < preReadSize; read++)  {
+        imageData[read] = preRead[read];
+    }
+
+    //Set the data type
+    *data_type = AD_DATATYPE_JFIF;
+
+    // Read more data till we find end of image marker
+    for (; !found && (pb->eof_reached==0) && (pb->error==0); read++) {
+        if (read >= MAX_IMAGE_SIZE)
+            return ADFFMPEG_AD_ERROR_PARSE_MIME_HEADER;
+
+        if (avio_read(pb, &chkByte, 1) < 0)
+            break;
+        imageData[read] = chkByte;
+        if(imageData[read - 1] == 0xFF && imageData[read] == 0xD9)
+            found = TRUE;
+    }
+
+    *size = read;
+    if ((status = ad_new_packet(pkt, *size)) < 0) {
+        av_log(s, AV_LOG_ERROR, "handleInvalidMime: ad_new_packet (size
%d)"
+                                " failed, status %d\n", *size, status);
+        return ADFFMPEG_AD_ERROR_NEW_PACKET;
+    }
+
+    memcpy(pkt->data, imageData, *size);
+    *imgLoaded = TRUE;
+
+    return errorVal;
+}
+
+
+/**
+ * Identify if the stream as an AD MIME stream
+ */
+static int admime_probe(AVProbeData *p)
+{
+    int offset = 0;
+    int ii, matchedBytes = 0;
+
+    if (p->buf_size <= sizeof(BOUNDARY_PREFIX1))
+        return 0;
+
+    // This is nasty but it's got to go here as we don't want to try and
deal
+    // with fixes for certain server nuances in the HTTP layer.
+    // DS2 servers seem to end their HTTP header section with the byte
sequence,
+    // 0x0d, 0x0a, 0x0d, 0x0a, 0x0a
+    // Eco9 server ends its HTTP headers section with the sequence,
+    // 0x0d, 0x0a, 0x0d, 0x0a, 0x0d, 0x0a
+    // Both of which are incorrect. We'll try and detect these cases here
and
+    // make adjustments to the buffers so that a standard validation
routine
+    // can be called...
+
+    if (p->buf[0] == 0x0a )                             // DS2 detection
+        offset = 1;
+    else if (p->buf[0] == 0x0d &&  p->buf[1] == 0x0a )  // Eco 9 detection
+        offset = 2;
+
+    // Now check whether we have the start of a MIME boundary separator
+    if (is_valid_separator( &p->buf[offset], p->buf_size - offset ) > 0 )
+        return AVPROBE_SCORE_MAX;
+
+    // If server is only sending a single frame (i.e. fields=1) then it
+    // sometimes just sends the raw JPEG with no MIME or other header,
check
+    // for this
+    if (p->buf_size >= sizeof(rawJfifHeader))  {
+        for(ii = 0; ii < sizeof(rawJfifHeader); ii++)  {
+            if (p->buf[ii] == rawJfifHeader[ii])
+                ++matchedBytes;
+        }
+        if (matchedBytes == sizeof(rawJfifHeader))
+            return AVPROBE_SCORE_MAX;
+    }
+
+    return 0;
+}
+
+static int admime_read_header(AVFormatContext *s)
+{
+    return ad_read_header(s, NULL);
+}
+
+
+static int admime_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+    //AdContext*              adContext = s->priv_data;
+    AVIOContext *           pb = s->pb;
+    void *                  payload = NULL;
+    char *                  txtDat = NULL;
+    int                     data_type = AD_DATATYPE_MAX;
+    int                     size = -1;
+    long                    extra = 0;
+    int                     errorVal = ADFFMPEG_AD_ERROR_UNKNOWN;
+    enum AVMediaType        mediaType = AVMEDIA_TYPE_UNKNOWN;
+    enum AVCodecID          codecId   = AV_CODEC_ID_NONE;
+    int                     imgLoaded = FALSE;
+    uint8_t                 buf[TEMP_BUFFER_SIZE];
+    int                     bufSize = TEMP_BUFFER_SIZE;
+    unsigned char *         tempbuf = NULL;
+
+    errorVal = parse_mime_header(pb, buf, &bufSize, &data_type, &size,
&extra);
+    if(errorVal != 0 )  {
+        if (errorVal == -2)
+            errorVal = handleInvalidMime(s, buf, bufSize, pkt,
+                                         &data_type, &size, &imgLoaded);
+
+        if (errorVal < 0)  {
+            return errorVal;
+        }
+    }
+
+    // Prepare for video or audio read
+    errorVal = initADData(data_type, &mediaType, &codecId, &payload);
+    if (errorVal < 0)  {
+        if (payload != NULL )
+            av_free(payload);
+        return errorVal;
+    }
+
+    // Proceed based on the type of data in this frame
+    switch(data_type) {
+        case AD_DATATYPE_JPEG:
+            errorVal = ad_read_jpeg(s, pkt, payload, &txtDat);
+            break;
+        case AD_DATATYPE_JFIF:
+            errorVal = ad_read_jfif(s, pkt, imgLoaded, size, payload,
&txtDat);
+            break;
+        case AD_DATATYPE_MPEG4I:
+        case AD_DATATYPE_MPEG4P:
+        case AD_DATATYPE_H264I:
+        case AD_DATATYPE_H264P:
+            errorVal = admime_mpeg(s, pkt, size, &extra, payload, &txtDat,
data_type);
+            break;
+        case AD_DATATYPE_AUDIO_ADPCM:
+            errorVal = ad_read_audio(s, pkt, size, extra, payload,
AV_CODEC_ID_ADPCM_IMA_WAV);
+            break;
+        case AD_DATATYPE_INFO:
+        case AD_DATATYPE_XML_INFO:
+        case AD_DATATYPE_SVARS_INFO:
+            // May want to handle INFO, XML_INFO and SVARS_INFO separately
in future
+            errorVal = ad_read_info(s, pkt, size);
+            break;
+        case AD_DATATYPE_LAYOUT:
+            errorVal = ad_read_layout(s, pkt, size);
+            break;
+        case AD_DATATYPE_PBM:
+            errorVal = ad_read_overlay(s, pkt, 1, size, &txtDat);
+            break;
+        case AD_DATATYPE_BMP:
+        default: {
+            av_log(s, AV_LOG_WARNING, "admime_read_packet: No handler for "
+                   "data_type=%d\n", data_type);
+
+            // Would like to use avio_skip, but that needs seek support,
+            // so just read the data into a buffer then throw it away
+            tempbuf = av_malloc(size);
+            if (tempbuf)  {
+                avio_read(pb, tempbuf, size);
+                av_free(tempbuf);
+            }
+            else
+                return AVERROR(ENOMEM);
+
+            return ADFFMPEG_AD_ERROR_DEFAULT;
+        }
+        break;
+    }
+
+    if (errorVal >= 0)  {
+        errorVal = ad_read_packet(s, pkt, 1, mediaType, codecId, payload,
txtDat);
+    }
+    else  {
+        // av_dlog(s, "admime_read_packet: Error %d\n", errorVal);
+
+#ifdef AD_SIDEDATA_IN_PRIV
+        // If there was an error, release any memory that has been
allocated
+        if (payload != NULL)
+            av_free(payload);
+
+        if (txtDat != NULL)
+            av_free( txtDat );
+#endif
+    }
+
+#ifndef AD_SIDEDATA_IN_PRIV
+    if (payload != NULL)
+        av_freep(&payload);
+
+    if( txtDat != NULL )
+        av_freep(&txtDat);
+#endif
+
+    return errorVal;
+}
+
+static int admime_read_close(AVFormatContext *s)
+{
+    return 0;
+}
+
+
+AVInputFormat ff_admime_demuxer = {
+    .name           = "admime",
+    .long_name      = NULL_IF_CONFIG_SMALL("AD-Holdings video format
(MIME)"),
+    .priv_data_size = sizeof(AdContext),
+    .read_probe     = admime_probe,
+    .read_header    = admime_read_header,
+    .read_packet    = admime_read_packet,
+    .read_close     = admime_read_close,
+    .flags          = AVFMT_TS_DISCONT | AVFMT_VARIABLE_FPS |
AVFMT_NO_BYTE_SEEK,
+};
diff --git a/libavformat/adpic.h b/libavformat/adpic.h
new file mode 100644
index 0000000000..c8a27a6d29
--- /dev/null
+++ b/libavformat/adpic.h
@@ -0,0 +1,116 @@
+/*
+ * Type information and function prototypes for common AD-Holdings demuxer
code
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+/**
+ * @file
+ * Type information and function prototypes for common AD-Holdings demuxer
code
+ */
+
+#ifndef AVFORMAT_ADPIC_H
+#define AVFORMAT_ADPIC_H
+
+#include "avformat.h"
+#include "ds_exports.h"
+
+
+#ifdef AD_SIDEDATA_IN_PRIV
+int ad_new_packet(AVPacket *pkt, int size);
+#else
+#define ad_new_packet av_new_packet
+#endif
+
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+/// These are the data types that are supported by the DS2 video servers
+enum ff_ad_data_type {  AD_DATATYPE_JPEG = 0,
+                        AD_DATATYPE_JFIF,
+                        AD_DATATYPE_MPEG4I,
+                        AD_DATATYPE_MPEG4P,
+                        AD_DATATYPE_AUDIO_ADPCM,
+                        AD_DATATYPE_AUDIO_RAW,
+                        AD_DATATYPE_MINIMAL_MPEG4,
+                        AD_DATATYPE_MINIMAL_AUDIO_ADPCM,
+                        AD_DATATYPE_LAYOUT,
+                        AD_DATATYPE_INFO,
+                        AD_DATATYPE_H264I,
+                        AD_DATATYPE_H264P,
+                        AD_DATATYPE_XML_INFO,
+                        AD_DATATYPE_BMP,
+                        AD_DATATYPE_PBM,
+                        AD_DATATYPE_SVARS_INFO,
+                        AD_DATATYPE_MAX
+                      };
+
+typedef struct {
+    int64_t lastVideoPTS;
+    int     utc_offset;     ///< Only used in minimal video case
+    int     metadataSet;
+    enum ff_ad_data_type streamDatatype;
+} AdContext;
+
+
+int ad_read_header(AVFormatContext *s, int *utcOffset);
+void ad_network2host(struct NetVuImageData *pic, uint8_t *data);
+int initADData(int data_type, enum AVMediaType *media, enum AVCodecID
*codecId, void **payload);
+int ad_read_jpeg(AVFormatContext *s, AVPacket *pkt, struct NetVuImageData
*vid, char **txt);
+int ad_read_jfif(AVFormatContext *s, AVPacket *pkt, int manual_size, int
size,
+                 struct NetVuImageData *video_data, char **text_data);
+int ad_read_info(AVFormatContext *s, AVPacket *pkt, int size);
+int ad_read_layout(AVFormatContext *s, AVPacket *pkt, int size);
+int ad_read_overlay(AVFormatContext *s, AVPacket *pkt, int channel, int
size, char **text_data);
+int ad_read_packet(AVFormatContext *s, AVPacket *pkt, int channel,
+                   enum AVMediaType mediaType, enum AVCodecID codecId,
+                   void *data, char *text_data);
+AVStream * ad_get_vstream(AVFormatContext *s, uint16_t w, uint16_t h,
+                          uint8_t cam, int format, const char *title);
+AVStream * ad_get_audio_stream(AVFormatContext *s, struct NetVuAudioData*
audioHeader);
+void audiodata_network2host(uint8_t *data, const uint8_t *src, int size);
+int ad_adFormatToCodecId(AVFormatContext *s, int32_t adFormat);
+int mpegOrH264(unsigned int startCode);
+int ad_pbmDecompress(char **comment, uint8_t **src, int size, AVPacket
*pkt, int *width, int *height);
+
+
+#define PIC_REVISION 1
+#define MIN_PIC_VERSION 0xDECADE10
+#define MAX_PIC_VERSION (MIN_PIC_VERSION + PIC_REVISION)
+#define PIC_VERSION (MIN_PIC_VERSION + PIC_REVISION)
+#define pic_version_valid(v) ( ( (v)>=MIN_PIC_VERSION ) && (
(v)<=MAX_PIC_VERSION ) )
+
+#define AUD_VERSION 0x00ABCDEF
+
+#define PIC_MODE_JPEG_422        0
+#define PIC_MODE_JPEG_411        1
+#define PIC_MODE_MPEG4_411       2
+#define PIC_MODE_MPEG4_411_I     3
+#define PIC_MODE_MPEG4_411_GOV_P 4
+#define PIC_MODE_MPEG4_411_GOV_I 5
+#define PIC_MODE_H264I           6
+#define PIC_MODE_H264P           7
+#define PIC_MODE_H264J           8
+
+#endif
diff --git a/libavformat/adraw.c b/libavformat/adraw.c
new file mode 100644
index 0000000000..c689ce4f56
--- /dev/null
+++ b/libavformat/adraw.c
@@ -0,0 +1,131 @@
+/*
+ * AD-Holdings demuxer for AD stream format (raw)
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+/**
+ * @file
+ * AD-Holdings demuxer for AD stream format (raw)
+ */
+
+#include "avformat.h"
+#include "adpic.h"
+
+
+/**
+ * Identify if the stream as an AD raw stream
+ */
+static int adraw_probe(AVProbeData *p)
+{
+    int bufferSize = p->buf_size;
+    uint8_t *bufPtr = p->buf;
+
+    while (bufferSize >= NetVuImageDataHeaderSize)  {
+        struct NetVuImageData test;
+        ad_network2host(&test, bufPtr);
+        if (pic_version_valid(test.version))  {
+            if (ad_adFormatToCodecId(NULL, test.vid_format) ==
AV_CODEC_ID_MJPEG)
+                return AVPROBE_SCORE_MAX / 2;
+        }
+        --bufferSize;
+        ++bufPtr;
+    }
+    return 0;
+}
+
+static int adraw_read_header(AVFormatContext *s)
+{
+    return ad_read_header(s, NULL);
+}
+
+static int adraw_read_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+    AVIOContext     *pb = s->pb;
+    uint8_t         *buf = NULL;
+    struct NetVuImageData  *vidDat = NULL;
+    char            *txtDat = NULL;
+    int             errVal = 0;
+    int             ii = 0;
+
+    vidDat = av_malloc(sizeof(struct NetVuImageData));
+    buf = av_malloc(sizeof(struct NetVuImageData));
+
+    if (!vidDat || !buf)
+        return AVERROR(ENOMEM);
+
+    // Scan for 0xDECADE11 marker
+    errVal = avio_read(pb, buf, sizeof(struct NetVuImageData));
+    while (errVal > 0)  {
+        ad_network2host(vidDat, buf);
+        if (pic_version_valid(vidDat->version))  {
+            break;
+        }
+        for(ii = 0; ii < (sizeof(struct NetVuImageData) - 1); ii++)  {
+            buf[ii] = buf[ii+1];
+        }
+        errVal = avio_read(pb, buf + sizeof(struct NetVuImageData) - 1, 1);
+    }
+    av_free(buf);
+
+    if (errVal > 0)  {
+        switch (ad_adFormatToCodecId(s, vidDat->vid_format))  {
+            case(AV_CODEC_ID_MJPEG):
+                errVal = ad_read_jpeg(s, pkt, vidDat, &txtDat);
+                break;
+            case(AV_CODEC_ID_MPEG4):
+            case(AV_CODEC_ID_H264):
+            default:
+                //errVal = adbinary_mpeg(s, pkt, vidDat, &txtDat);
+                av_log(s, AV_LOG_ERROR, "Unsupported format for adraw
demuxer: "
+                        "%d\n", vidDat->vid_format);
+                break;
+        }
+    }
+    if (errVal >= 0)  {
+        errVal = ad_read_packet(s, pkt, 1, AVMEDIA_TYPE_VIDEO,
AV_CODEC_ID_MJPEG,
+                                vidDat, txtDat);
+    }
+    else  {
+        // If there was an error, release any allocated memory
+        if( vidDat != NULL )
+            av_free( vidDat );
+
+        if( txtDat != NULL )
+            av_free( txtDat );
+    }
+
+    return errVal;
+}
+
+static int adraw_read_close(AVFormatContext *s)
+{
+    return 0;
+}
+
+
+AVInputFormat ff_adraw_demuxer = {
+    .name           = "adraw",
+    .long_name      = NULL_IF_CONFIG_SMALL("AD-Holdings video format
(raw)"),
+    .read_probe     = adraw_probe,
+    .read_header    = adraw_read_header,
+    .read_packet    = adraw_read_packet,
+    .read_close     = adraw_read_close,
+    .flags          = AVFMT_TS_DISCONT | AVFMT_VARIABLE_FPS |
AVFMT_NO_BYTE_SEEK,
+};
diff --git a/libavformat/allformats.c b/libavformat/allformats.c
index cd00834807..6988c87414 100644
--- a/libavformat/allformats.c
+++ b/libavformat/allformats.c
@@ -35,8 +35,12 @@ extern AVInputFormat  ff_ac3_demuxer;
 extern AVOutputFormat ff_ac3_muxer;
 extern AVInputFormat  ff_acm_demuxer;
 extern AVInputFormat  ff_act_demuxer;
+extern AVInputFormat  ff_adaudio_demuxer;
+extern AVInputFormat  ff_adbinary_demuxer;
 extern AVInputFormat  ff_adf_demuxer;
+extern AVInputFormat  ff_admime_demuxer;
 extern AVInputFormat  ff_adp_demuxer;
+extern AVInputFormat  ff_adraw_demuxer;
 extern AVInputFormat  ff_ads_demuxer;
 extern AVOutputFormat ff_adts_muxer;
 extern AVInputFormat  ff_adx_demuxer;
@@ -117,6 +121,7 @@ extern AVInputFormat  ff_dnxhd_demuxer;
 extern AVOutputFormat ff_dnxhd_muxer;
 extern AVInputFormat  ff_dsf_demuxer;
 extern AVInputFormat  ff_dsicin_demuxer;
+extern AVInputFormat  ff_dspic_demuxer;
 extern AVInputFormat  ff_dss_demuxer;
 extern AVInputFormat  ff_dts_demuxer;
 extern AVOutputFormat ff_dts_muxer;
@@ -494,6 +499,8 @@ extern AVOutputFormat ff_chromaprint_muxer;
 extern AVInputFormat  ff_libgme_demuxer;
 extern AVInputFormat  ff_libmodplug_demuxer;
 extern AVInputFormat  ff_libopenmpt_demuxer;
+extern AVInputFormat  ff_libparreader_demuxer;
+extern AVOutputFormat ff_libparreader_muxer;
 extern AVInputFormat  ff_vapoursynth_demuxer;

 #include "libavformat/muxer_list.c"
diff --git a/libavformat/ds.c b/libavformat/ds.c
new file mode 100644
index 0000000000..4765c51b67
--- /dev/null
+++ b/libavformat/ds.c
@@ -0,0 +1,1262 @@
+/*
+ * Protocol for AD-Holdings Digital Sprite stream format
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#include <strings.h>
+
+#include "avformat.h"
+#include "internal.h"
+#include "url.h"
+#include "libavutil/avstring.h"
+#include "libavutil/bswap.h"
+
+#include "adffmpeg_errors.h"
+#include "dsenc.h"
+#include "ds.h"
+
+/* -------------------------------------- Constants
-------------------------------------- */
+#define DS_DEFAULT_PORT                             8234                /*
Deafult TCP and UDP port for comms */
+#define DS_HEADER_MAGIC_NUMBER                      0xFACED0FF
+#define MAX_USER_ID_LENGTH                          30
+#define ACCESS_KEY_LENGTH                           36
+#define MAC_ADDR_LENGTH                             16
+#define UNIT_NAME_LENGTH                            32
+
+#define DS_WRITE_BUFFER_SIZE                        1024
+#define DS_READ_BUFFER_SIZE                         1024
+
+#define DS_PLAYBACK_MODE_LIVE                       0x00
+#define DS_PLAYBACK_MODE_PLAY                       0x01
+
+#define DS_RESOLUTION_HI                            2
+#define DS_RESOLUTION_MED                           1
+#define DS_RESOLUTION_LOW                           0
+
+
+#define SET_FLAG_ZONE_UNKNOWN(flags)    ((flags) |= 0x80)
+
+/* -------------------------------------- Structures/types
-------------------------------------- */
+typedef struct _dsContext {
+    URLContext *        TCPContext; /* Context of the underlying TCP
network connection */
+} DSContext;
+
+typedef struct _networkMessage {
+    MessageHeader       header;
+    void *              body;
+} NetworkMessage;
+
+typedef enum _imgControlMsgType {
+    IMG_LIVE,
+    IMG_PLAY,
+    IMG_GOTO,
+    IMG_DATA,
+    IMG_STOP
+} ImgControlMsgType;
+
+typedef struct _clientConnectMsg {
+    unsigned long           udpPort;
+    long                    connectType;        /* ImgControlMsgType enum.
long used to avoid sizeof(enum) discrepancies */
+    char                    userID[MAX_USER_ID_LENGTH];
+    char                    accessKey[ACCESS_KEY_LENGTH];
+} ClientConnectMsg;
+#define VER_TCP_CLI_CONNECT                     0x00000003
+#define SIZEOF_TCP_CLI_CONNECT_IO               (MAX_USER_ID_LENGTH +
ACCESS_KEY_LENGTH + 8)      /* Size in bytes of the MessageHeader
structure. Can't use sizeof to read/write one of these to network as
structure packing may differ based on platform */
+
+typedef enum _netRejectReason {
+    REJECT_BUSY,
+    REJECT_INVALID_USER_ID,
+    REJECT_AUTHENTIFICATION_REQUIRED,
+    REJECT_AUTHENTIFICATION_INVALID,
+    REJECT_UNAUTHORISED,
+    REJECT_OTHER,
+    REJECT_PASSWORD_CHANGE_REQUIRED,
+    REJECT_OUT_OF_MEMORY,
+    REJECT_CORRUPT_USERS_FILES
+} NetRejectReason;
+
+typedef struct _srvConnectRejectMsg {
+    long                    reason;                /* enum
NET_REJECT_REASON */
+    long                    timestamp;
+    char                    macAddr[MAC_ADDR_LENGTH];
+    unsigned long           appVersion;
+    unsigned long           minViewerVersion;
+} SrvConnectRejectMsg;
+#define VER_TCP_SRV_CONNECT_REJECT          0x00000001
+
+typedef enum _realmType {
+    REALM_LIVE,
+    REALM_PLAYBACK,
+    REALM_TELEM,
+    REALM_EVENTS,
+    REALM_ADMIN,
+    REALM_PASSWORD,
+    REALM_PW_ONCE,
+    REALM_VIEW_ALL,
+    REALM_MCI,
+    REALM_FILE_EXPORT,
+    REALM_WEB,
+    REALM_POS,
+    NUM_FIXED_REALMS,
+} RealmType;
+
+typedef struct _srvConnectReplyMsg {
+    long                    numCameras;
+    long                    viewableCamMask;
+    long                    telemetryCamMask;
+    long                    failedCamMask;
+    long                    maxMsgInterval;
+    int64_t                    timestamp;                          /*
TODO: Verify - this was a 'hyper long' before. Assuming 64 bit value. Is
this correct? Is solution portable? */
+    char                    cameraTitles[16][28];
+    long                    unitType;
+    unsigned long           applicationVersion;
+    long                    videoStandard;
+    char                    macAddr[MAC_ADDR_LENGTH];
+    char                    unitName[UNIT_NAME_LENGTH];
+    long                    numFixedRealms;                        /*
Number of FIXED system realms */
+    unsigned long            realmFlags[NUM_FIXED_REALMS];        /*
Indicates if user is in realm. */
+    unsigned long           minimumViewerVersion;
+} SrvConnectReplyMsg;
+#define VER_TCP_SRV_CONNECT_REPLY           0x00000001
+
+typedef struct _srvFeatureConnectReplyMsg {
+    long                    numCameras;
+    long                    viewableCamMask;
+    long                    telemetryCamMask;
+    long                    failedCamMask;
+    long                    maxMsgInterval;
+    int64_t                    timestamp;                          /*
TODO: Verify - this was a 'hyper long' before. Assuming 64 bit value. Is
this correct? Is solution portable? */
+    char                    cameraTitles[16][28];
+    long                    unitType;
+    unsigned long           applicationVersion;
+    long                    videoStandard;
+    char                    macAddr[MAC_ADDR_LENGTH];
+    char                    unitName[UNIT_NAME_LENGTH];
+
+    unsigned long           minimumViewerVersion;
+    unsigned long             unitFeature01;
+    unsigned long             unitFeature02;
+    unsigned long             unitFeature03;
+    unsigned long             unitFeature04;
+    long                    numFixedRealms;                   /* Number of
FIXED system realms */
+    unsigned long            realmFlags[NUM_FIXED_REALMS];       /*
Indicates if user is in realm. */
+} SrvFeatureConnectReplyMsg;
+#define VER_TCP_SRV_FEATURE_CONNECT_REPLY   0x00000002
+
+typedef struct _cliImgLiveRequestMsg {
+    long                    cameraMask;
+    long                    resolution;
+} CliImgLiveRequestMsg;
+#define VER_TCP_CLI_IMG_LIVE_REQUEST        0x00000001
+#define SIZEOF_TCP_CLI_IMG_LIVE_REQUEST_IO  8            /* Size in bytes
of the MessageHeader structure. Can't use sizeof to read/write one of these
to network as structure packing may differ based on platform */
+
+typedef enum _vcrMode {
+    VM_PLAY,
+    VM_VIS_REW,
+    VM_VIS_FF,
+    VM_STOP,
+    VM_PLAY_SHUTTLE,
+    VM_FINISH
+} vcrMode;
+
+typedef struct _cliImgPlayRequestMsg {
+    long                    cameraMask;
+    long                    mode;                /*    (enum VCR_MODE) */
+    long                    pace;
+    int64_t                    fromTime;            /*        (time_u)
 */
+    int64_t                    toTime;                /*        (time_u)
 */
+} CliImgPlayRequestMsg;
+#define VER_TCP_CLI_IMG_PLAY_REQUEST        0x00000001
+#define SIZEOF_TCP_CLI_IMG_PLAY_REQUEST_IO  28            /* Size in bytes
of the MessageHeader structure. Can't use sizeof to read/write one of these
to network as structure packing may differ based on platform */
+
+
+/* -------------------------------------- Local function declarations
-------------------------------------- */
+static NetworkMessage *     CreateNetworkMessage( ControlMessageTypes
messageType, long channelID );
+static void                 FreeNetworkMessage( NetworkMessage **message );
+
+static int DSOpen( URLContext *h, const char *uri, int flags );
+static int DSRead( URLContext *h, uint8_t *buf, int size );
+static int DSWrite( URLContext *h, const uint8_t *buf, int size );
+static int DSClose( URLContext *h );
+static int DSConnect( URLContext *h, const char *path, const char
*hoststr, const char *auth );
+static inline int MessageSize( const NetworkMessage *message );
+
+static int ReceiveNetworkMessage( URLContext *h, NetworkMessage **message
);
+static int ReadNetworkMessageHeader( URLContext *h, MessageHeader *header
);
+static int ReadNetworkMessageBody( URLContext * h, NetworkMessage *message
);
+
+static int SendNetworkMessage( URLContext *h, NetworkMessage *message );
+static void HToNMessageHeader( MessageHeader *header, unsigned char *buf );
+
+static int ReadConnectRejectMessage( URLContext * h, NetworkMessage
*message );
+static int ReadConnectReplyMessage( URLContext * h, NetworkMessage
*message );
+static int ReadFeatureConnectReplyMessage( URLContext * h, NetworkMessage
*message );
+
+static int GetUserAndPassword( const char * auth, char *user, char
*password  );
+static int CrackURI( const char *path, int *streamType, int *res, int
*cam, time_t *from, time_t *to, int *rate, vcrMode *playMode );
+static int DSReadBuffer( URLContext *h, uint8_t *buffer, int size );
+static int64_t TimeTolong64( time_t time );
+static void NToHMessageHeader( MessageHeader *header );
+
+#if HAVE_BIGENDIAN
+#define HTON64(x)               x
+#define HTON32(x)               x
+#else
+#define HTON64(x)               (av_bswap64(x))
+#define HTON32(x)               (av_bswap32(x))
+#endif
+
+
+/****************************************************************************************************************
+ * Function: CreateNetworkMessage
+ * Desc: Allocates and initialises a NetworkMessage for sending to the
server based on the given type and channel.
+ *       It is the caller's responsibility to release the NetworkMessage
created by this function. This should be
+ *       done with the FreeNetworkMessage function.
+ * Params:
+ *  messageType - Type of the message to create
+ *  channelID - Channel for which the message is intended
+ * Return:
+ *   Pointer to new network message on success. NULL on failure
+
****************************************************************************************************************/
+static NetworkMessage * CreateNetworkMessage( ControlMessageTypes
messageType, long channelID )
+{
+    NetworkMessage *        newMessage = NULL;
+    int                     length = 0;
+    int                     version = 0;
+
+    /* Create a new message structure */
+    newMessage = av_mallocz( sizeof(NetworkMessage) );
+
+    if( newMessage != NULL ) {
+        /* Now allocate the body structure if we've been successful */
+        switch( messageType ) {
+                /* In normal cases, set whatever member variables we can
in here also */
+            case TCP_CLI_CONNECT: {
+                if( (newMessage->body = av_mallocz(
sizeof(ClientConnectMsg) )) != NULL ) {
+                    length = SIZEOF_TCP_CLI_CONNECT_IO;
+                    version = VER_TCP_CLI_CONNECT;
+                }
+                else
+                    goto fail;
+            }
+            break;
+
+            case TCP_CLI_IMG_LIVE_REQUEST: {
+                if( (newMessage->body = av_mallocz(
sizeof(CliImgLiveRequestMsg) )) != NULL ) {
+                    length = SIZEOF_TCP_CLI_IMG_LIVE_REQUEST_IO;
+                    version = VER_TCP_CLI_IMG_LIVE_REQUEST;
+                }
+                else
+                    goto fail;
+            }
+            break;
+
+            case TCP_CLI_IMG_PLAY_REQUEST: {
+                if( (newMessage->body = av_mallocz(
sizeof(CliImgPlayRequestMsg) )) != NULL ) {
+                    length = SIZEOF_TCP_CLI_IMG_PLAY_REQUEST_IO;
+                    version = VER_TCP_CLI_IMG_PLAY_REQUEST;
+                }
+                else
+                    goto fail;
+            }
+            break;
+
+            default: { /* Unknown (or unsupported) message type
encountered */
+                goto fail;
+            }
+            break;
+        }
+
+        /* Set whatever header values we can in here */
+        newMessage->header.messageType = messageType;
+        newMessage->header.magicNumber = DS_HEADER_MAGIC_NUMBER;
+        newMessage->header.channelID = channelID;
+        newMessage->header.sequence = 0; /* Currently unsupported at
server */
+        newMessage->header.checksum = 0; /* As suggested in protocol
documentation */
+        newMessage->header.messageVersion = version;
+
+        /* Set the length of the remaining data (size of the message
header - the magic number + size of the message body) */
+        newMessage->header.length = SIZEOF_MESSAGE_HEADER_IO -
sizeof(unsigned long) + length;
+    }
+
+    return newMessage;
+
+fail:
+    /* Release whatever may have been allocated */
+    FreeNetworkMessage( &newMessage );
+
+    return NULL;
+}
+
+/****************************************************************************************************************
+ * Function: FreeNetworkMessage
+ * Desc: Releases the resources associated with a network message created
with CreateNetworkMessage()
+ * Params:
+ *  message - Address of a pointer to a NetworkMessage struct allocated
with CreateNetworkMessage
+ * Return:
+
****************************************************************************************************************/
+static void FreeNetworkMessage( NetworkMessage **message )
+{
+    /* Simply cascade free all memory allocated */
+    if( *message ) {
+        if( (*message)->body ) {
+            av_free( (*message)->body );
+        }
+
+        av_free( *message );
+        *message = NULL;
+    }
+}
+
+/****************************************************************************************************************
+ * Function: DSOpen
+ * Desc: Opens a connection to a DM Digital Sprite video server and
executes the initial connect transaction
+ * Params:
+ *  h - Pointer to URLContext struct used to store all connection info
associated with this connection
+ *  uri - The URI indicating the server's location
+ *  flags - Flags indicating the type of connection required (URL_WRONLY,
etc)
+ * Return:
+ *   0 on success, non 0 on failure
+
****************************************************************************************************************/
+static int DSOpen( URLContext *h, const char *uri, int flags )
+{
+    char            hostname[1024], hoststr[1024];
+    char            auth[1024];
+    char            path1[1024];
+    char            buf[1024];
+    int             port, err;
+    const char *    path;
+    URLContext *    TCPContext = NULL;
+    DSContext *     s = NULL;
+
+    h->is_streamed = 1;
+
+    s = av_malloc( sizeof(DSContext) );
+    if (!s) {
+        return -ENOMEM;
+    }
+    h->priv_data = s;
+
+    /* Crack the URL */
+    av_url_split( NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname),
&port, path1, sizeof(path1), uri );
+
+    if (port > 0) {
+        snprintf( hoststr, sizeof(hoststr), "%s:%d", hostname, port );
+    }
+    else {
+        av_strlcpy( hoststr, hostname, sizeof(hoststr) );
+    }
+
+    /* Add the URL parameters (if any) */
+    if (path1[0] == '\0') {
+        path = "/";
+    }
+    else {
+        path = path1;
+    }
+
+    /* Assume default port for this protocol if one isn't supplied */
+    if (port < 0) {
+        port = DS_DEFAULT_PORT;
+    }
+
+    /* Form the appropriate TCP URL */
+    snprintf(buf, sizeof(buf), "tcp://%s:%d", hostname, port);
+
+    /* Now open a connection to that TCP address */
+    if( (err = ffurl_open(&TCPContext, buf, AVIO_FLAG_READ_WRITE,
&TCPContext->interrupt_callback, NULL)) < 0 ) {
+        goto fail;
+    }
+
+    /* Save the TCP context */
+    s->TCPContext = TCPContext;
+
+    /* Now initiate connection using the DS protocol */
+    if( (err = DSConnect( h, path, hoststr, auth )) < 0 ) {
+        goto fail;
+    }
+
+    return 0;
+fail:
+    if( TCPContext ) {
+        ffurl_close( TCPContext );
+    }
+
+    av_free( s );
+
+    return err;
+}
+
+/****************************************************************************************************************
+ * Function: DSRead
+ * Desc: Reads data from the connection
+ * Params:
+ *  h - Pointer to URLContext struct used to store all connection info
associated with this connection
+ *  buf - Buffer to which read data will be written
+ *  size - Number of bytes to read into buf
+ * Return:
+ *   0 on success, non 0 on failure
+
****************************************************************************************************************/
+static int DSRead( URLContext *h, uint8_t *buf, int size )
+{
+    /* All we need to do in here is call the generic read function on our
underlying TCP connection */
+    DSContext *         context = (DSContext *)h->priv_data;
+
+    if( context != NULL )
+        return ffurl_read( context->TCPContext, buf, size );
+
+    return AVERROR(EIO);
+}
+
+/****************************************************************************************************************
+ * Function: DSWrite
+ * Desc: Writes data to the connection
+ * Params:
+ *  h - Pointer to URLContext struct used to store all connection info
associated with this connection
+ *  buf - Buffer from which data will be written
+ *  size - Number of bytes to write from buf
+ * Return:
+ *   0 on success, non 0 on failure
+
****************************************************************************************************************/
+static int DSWrite( URLContext *h, const uint8_t *buf, int size )
+{
+    /* All we need to do in here is call the generic write function on our
underlying TCP connection */
+    DSContext *     context = (DSContext *)h->priv_data;
+
+    if( context != NULL )
+        return ffurl_write( context->TCPContext, buf, size );
+
+    return AVERROR(EIO);
+}
+
+/****************************************************************************************************************
+ * Function: DSClose
+ * Desc: Closes the connection to the DM Digital Sprite video server
+ * Params:
+ *  h - Pointer to URLContext struct used to store all connection info
associated with this connection
+ * Return:
+ *   0 on success, non 0 on failure
+
****************************************************************************************************************/
+static int DSClose( URLContext *h )
+{
+    DSContext * context = (DSContext *)h->priv_data;
+
+    if( context != NULL ) {
+        ffurl_close( context->TCPContext );
+
+        av_free( context );
+    }
+
+    return 0;
+}
+
+/****************************************************************************************************************
+ * Function: MessageSize
+ * Desc: Calculates the size in bytes of the given NetworkMessage
+ * Params:
+ *  message - Pointer to the message for which the size is required
+ * Return:
+ *   The size of the message, -1 if no message passed
+
****************************************************************************************************************/
+static inline int MessageSize( const NetworkMessage *message )
+{
+    if( message )
+        return message->header.length + sizeof(unsigned long);
+
+    return -1;
+}
+
+/****************************************************************************************************************
+ * Function: DSConnect
+ * Desc: Attempts to connect to the DM Digital Sprite video server
according to its connection protocol
+ * Params:
+ *  h - Pointer to URLContext struct used to store all connection info
associated with this connection
+ *  path - TODO: Need to confirm whether these params are actually needed
+ *  hoststr -
+ *  auth - Authentication credentials
+ * Return:
+ *   0 on success, non 0 on failure
+
****************************************************************************************************************/
+static int DSConnect( URLContext *h, const char *path, const char
*hoststr, const char *auth )
+{
+    NetworkMessage *    sendMessage = NULL;
+    NetworkMessage *    recvMessage = NULL;
+    int                 retVal = 0;
+    int                 isConnecting = 1;
+    char *              encCredentials = NULL;
+    ClientConnectMsg *  connectMsg = NULL;
+    int                 channelID = -2;
+    int                 streamType = 0, res = 0, cam = 0;
+    time_t              from = 0, to = 0;
+    int                 rate = 0;
+    vcrMode             playMode = VM_PLAY;
+    char                user[MAX_USER_ID_LENGTH];
+    char                password[MAX_USER_ID_LENGTH];
+
+    /* Initialise the user and password fields */
+    memset( user, 0, sizeof(char) * MAX_USER_ID_LENGTH );
+    memset( password, 0, sizeof(char) * MAX_USER_ID_LENGTH );
+
+    /* Extract the username and password */
+    if( (retVal = GetUserAndPassword( auth, user, password )) == 0 ) {
+        /* Crack the URI to get the control parameters */
+        retVal = CrackURI( path, &streamType, &res, &cam, &from, &to,
&rate, &playMode );
+    }
+
+    while( isConnecting && retVal == 0 ) {
+        if( (sendMessage = CreateNetworkMessage( TCP_CLI_CONNECT,
channelID )) == NULL ) {
+            if( encCredentials != NULL )
+                av_free( encCredentials );
+
+            return AVERROR(ENOMEM);
+        }
+
+        /* Set up the connection request */
+        /* Set the message body up now */
+        connectMsg = (ClientConnectMsg *)sendMessage->body;
+
+        connectMsg->udpPort = DS_DEFAULT_PORT;
+
+        if( streamType == DS_PLAYBACK_MODE_PLAY )
+            connectMsg->connectType = IMG_PLAY;
+        else
+            connectMsg->connectType = IMG_LIVE;
+
+        if( encCredentials != NULL ) {
+            memcpy( connectMsg->userID, user, strlen(user) );
+            memcpy( connectMsg->accessKey, encCredentials,
strlen(encCredentials) );
+        }
+
+        /* Send the message to the server */
+        if( (retVal = SendNetworkMessage( h, sendMessage )) >= 0 ) {
+            /* Receive the response */
+            if( (retVal = ReceiveNetworkMessage( h, &recvMessage )) >= 0 )
{
+                switch( recvMessage->header.messageType ) {
+                    case TCP_SRV_CONNECT_REJECT: { /* We expect this first
time */
+                        /* Extract the info we need to encrypt */
+                        SrvConnectRejectMsg     * msg =
(SrvConnectRejectMsg *)recvMessage->body;
+
+                        /* What was the reason for the failure? */
+                        if( msg->reason ==
REJECT_AUTHENTIFICATION_REQUIRED ) {
+                            channelID = recvMessage->header.channelID;
+
+                            /* Encrypt the username / password */
+                            if( strlen( user ) > 0 && strlen( password ) >
0 ) {
+                                if( (encCredentials =
EncryptPasswordString( user, password, msg->timestamp, msg->macAddr,
msg->appVersion )) == NULL )
+                                    retVal = AVERROR(ENOMEM);
+                            }
+                            else {
+                                /* If we haven't got a user and password
string then we have to notify the client */
+                                retVal = ADFFMPEG_DS_ERROR_AUTH_REQUIRED;
+                            }
+                        }
+                        else if( msg->reason ==
REJECT_AUTHENTIFICATION_INVALID ) { /* Supplied credentials are invalid */
+                            retVal = ADFFMPEG_DS_ERROR_INVALID_CREDENTIALS;
+                        }
+                        else { /* Fail */
+                            retVal = AVERROR(EIO);
+                            isConnecting = 0;
+                        }
+                    }
+                    break;
+
+                    case TCP_SRV_FEATURE_CONNECT_REPLY:
+                    case TCP_SRV_CONNECT_REPLY: {
+                        /* Great, we're connected - we just need to send a
IMG_LIVE_REQUEST to the server to start the streaming */
+                        NetworkMessage *        imgRequestMsg = NULL;
+
+                        if( streamType == DS_PLAYBACK_MODE_LIVE ) {
+                            if( (imgRequestMsg = CreateNetworkMessage(
TCP_CLI_IMG_LIVE_REQUEST, channelID )) ) {
+                                CliImgLiveRequestMsg *      msgBody =
(CliImgLiveRequestMsg *)imgRequestMsg->body;
+
+                                msgBody->cameraMask = cam;
+                                msgBody->resolution = res;
+                            }
+                        }
+                        else if( streamType == DS_PLAYBACK_MODE_PLAY ) {
+                            if( (imgRequestMsg = CreateNetworkMessage(
TCP_CLI_IMG_PLAY_REQUEST, channelID )) ) {
+                                CliImgPlayRequestMsg *      msgBody =
(CliImgPlayRequestMsg *)imgRequestMsg->body;
+
+                                msgBody->cameraMask = cam;
+                                msgBody->fromTime = TimeTolong64( from );
+                                msgBody->toTime = TimeTolong64( to );
+                                msgBody->pace = rate;
+                                msgBody->mode = playMode;
+                            }
+                        }
+                        else
+                            retVal = AVERROR(EIO);
+
+                        if( retVal == 0 ) {
+                            /* Fire the request message off */
+                            retVal = SendNetworkMessage( h, imgRequestMsg
);
+                        }
+
+                        isConnecting = 0;
+                    }
+                    break;
+
+                    /* Anything other than a connect reply is failure */
+                    default: {
+                        retVal = -1;
+                        isConnecting = 0;
+                    }
+                    break;
+                }
+            }
+            else
+                isConnecting = 0;
+        }
+        else
+            isConnecting = 0;
+
+
+        /* We can release the messages now */
+        FreeNetworkMessage( &sendMessage );
+        FreeNetworkMessage( &recvMessage );
+    }
+
+    return retVal;
+}
+
+static int GetUserAndPassword( const char * auth, char *user, char
*password  )
+{
+    int             retVal = 0;
+    char *          authStr = NULL;
+    char *          token = NULL;
+    const char *    delim = ":";
+    int             count = 0;
+
+    if( auth != NULL ) {
+        if( strcmp( auth, "" ) != 0 ) {
+            retVal = AVERROR_INVALIDDATA;
+
+            /* Copy the string as strtok needs to modify as it goes */
+            if( (authStr = (char*)av_mallocz( strlen(auth) + 1 )) != NULL
) {
+                strcpy( authStr, auth );
+
+                /* Now split it into the user and password */
+                token = strtok( authStr, delim );
+
+                while( token != NULL ) {
+                    count++;
+
+                    if( count == 1 ) {
+                        if( strlen(token) <= MAX_USER_ID_LENGTH ) {
+                            strcpy( user, token );
+
+                            if( strlen(token) < MAX_USER_ID_LENGTH )
+                                user[strlen(token)] = '\0';
+                        }
+                    }
+                    else if( count == 2 ) {
+                        /* TODO: Verify whether checking against the
length of the max user id is ok. Ultimately, the password is hashed before
transmission
+                           so the length is not imperative here. Maybe
server defines a maximum length though? */
+                        if( strlen(token) <= MAX_USER_ID_LENGTH ) {
+                            strcpy( password, token );
+
+                            if( strlen(token) < MAX_USER_ID_LENGTH )
+                                password[strlen(token)] = '\0';
+
+                            retVal = 0;
+                        }
+                    }
+                    else { /* There shouldn't be more than 2 tokens,
better flag an error */
+                        retVal = AVERROR_INVALIDDATA;
+                    }
+
+                    token = strtok( NULL, delim );
+                }
+
+                av_free( authStr );
+                authStr = NULL;
+            }
+            else
+                retVal = AVERROR(ENOMEM);
+        }
+    }
+
+    return retVal;
+}
+
+static int CrackURI( const char *path, int *streamType, int *res, int
*cam, time_t *from, time_t *to, int *rate, vcrMode *playMode )
+{
+    int             retVal = AVERROR_INVALIDDATA;
+    const char *    delim = "?&";
+    char *          pathStr = NULL;
+    char *          token = NULL;
+
+    *res = DS_RESOLUTION_HI;
+    *streamType = DS_PLAYBACK_MODE_LIVE;
+    *cam = 1;
+
+    if( path != NULL ) {
+        /* Take a copy of the path string so that strtok can modify it */
+        if( (pathStr = (char*)av_mallocz( strlen(path) + 1 )) != NULL ) {
+            strcpy( pathStr, path );
+
+            retVal = 0;
+
+            token = strtok( pathStr, delim );
+
+            while( token != NULL ) {
+                char *          name = NULL;
+                char *          value = NULL;
+
+                /* Now look inside this token string for a name value pair
separated by an = */
+                if( (value = strstr(token, "=")) != NULL ) {
+                    value++;
+
+                    name = token;
+                    name[(value-1) - token] = '\0';
+
+                    if( name != NULL && value != NULL ) {
+                        /* Which parameter have we got? */
+                        if( av_strcasecmp(name, "res" ) == 0 ) {
+                            if( strcmp( value, "hi" ) == 0 )
+                                *res = DS_RESOLUTION_HI;
+                            else if( strcmp( value, "med" ) == 0 )
+                                *res = DS_RESOLUTION_MED;
+                            else if( strcmp( value, "low" ) == 0 )
+                                *res = DS_RESOLUTION_LOW;
+                        }
+                        else if( av_strcasecmp(name, "stream" ) == 0 ) {
+                            if( av_strcasecmp(value, "live" ) == 0 )
+                                *streamType = DS_PLAYBACK_MODE_LIVE;
+                            else if( av_strcasecmp(value, "play" ) == 0 )
+                                *streamType = DS_PLAYBACK_MODE_PLAY;
+                        }
+                        else if( av_strcasecmp(name, "cam" ) == 0 ) {
+                            *cam = atoi( value );
+                        }
+                        else if( av_strcasecmp(name, "from" ) == 0 ) {
+                            *from = (time_t)atoi( value );
+                        }
+                        else if( av_strcasecmp(name, "to" ) == 0 ) {
+                            *to = (time_t)atoi( value );
+                        }
+                        else if( av_strcasecmp(name, "rate" ) == 0 ) {
+                            *rate = atoi( value );
+                        }
+                        else if( av_strcasecmp(name, "mode" ) == 0 ) {
+                            if( av_strcasecmp(value, "play" ) == 0 )
+                                *playMode = VM_PLAY;
+                            else if( av_strcasecmp(value, "rwd" ) == 0 )
+                                *playMode = VM_VIS_REW;
+                            else if( av_strcasecmp(value, "fwd" ) == 0 )
+                                *playMode = VM_VIS_FF;
+                            else if( av_strcasecmp(value, "stop" ) == 0 )
+                                *playMode = VM_STOP;
+                            else if( av_strcasecmp(value, "shuttle" ) == 0
)
+                                *playMode = VM_PLAY_SHUTTLE;
+                            else if( av_strcasecmp(value, "finish" ) == 0 )
+                                *playMode = VM_FINISH;
+                        }
+                    }
+                }
+
+                token = strtok( NULL, delim );
+            }
+
+            av_free( pathStr );
+            pathStr = NULL;
+        }
+        else
+            retVal = AVERROR(ENOMEM);
+    }
+
+    return retVal;
+}
+
+/****************************************************************************************************************
+ * Function: ReceiveNetworkMessage
+ * Desc: Reads the next NetworkMessage from the connection. New message is
allocated and must be release by the
+ *       caller with FreeNetworkMessage()
+ * Params:
+ *  h - Pointer to URLContext struct used to store all connection info
associated with this connection
+ *  message - Address of pointer to network message. This will be
allocated by this function
+ * Return:
+ *   0 on success, non 0 on failure
+
****************************************************************************************************************/
+static int ReceiveNetworkMessage( URLContext *h, NetworkMessage **message )
+{
+    int     retVal = 0;
+
+    /* Allocate a new NetworkMessage struct */
+    *message = av_mallocz( sizeof(NetworkMessage) );
+
+    if( *message == NULL )
+        return AVERROR(ENOMEM);
+
+    if( (retVal = ReadNetworkMessageHeader( h, &(*message)->header )) != 0
) {
+        FreeNetworkMessage( message );
+        return retVal;
+    }
+
+    if( (retVal = ReadNetworkMessageBody( h, *message )) != 0 ) {
+        FreeNetworkMessage( message );
+        return retVal;
+    }
+
+    return 0;
+}
+
+static int DSReadBuffer( URLContext *h, uint8_t *buffer, int size )
+{
+    int         ret;
+    int         totalRead = 0;
+
+    if( buffer != NULL && size > 0 ) {
+        while( size - totalRead != 0 ) {
+            ret = DSRead( h, buffer, size - totalRead );
+
+            if( ret < 0 )
+                return ret;
+            else
+                totalRead += ret;
+        }
+    }
+
+    return totalRead;
+}
+
+static int ReadNetworkMessageHeader( URLContext *h, MessageHeader *header )
+{
+    /* Read the header in a piece at a time... */
+    if( DSReadBuffer( h, (uint8_t *)&header->magicNumber, sizeof(unsigned
long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&header->length, sizeof(unsigned long)
) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&header->channelID, sizeof(long) ) !=
sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&header->sequence, sizeof(long) ) !=
sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&header->messageVersion,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&header->checksum, sizeof(long) ) !=
sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&header->messageType, sizeof(long) )
!= sizeof(long) )
+        return AVERROR(EIO);
+
+    /* Now adjust the endianess */
+    NToHMessageHeader( header );
+
+    return 0;
+}
+
+static void NToHMessageHeader( MessageHeader *header )
+{
+    if( header ) {
+        header->magicNumber = av_be2ne32(header->magicNumber);
+        header->length = av_be2ne32(header->length);
+        header->channelID = av_be2ne32(header->channelID);
+        header->sequence = av_be2ne32(header->sequence);
+        header->messageVersion = av_be2ne32(header->messageVersion);
+        header->checksum = av_be2ne32(header->checksum);
+        header->messageType = av_be2ne32(header->messageType);
+    }
+}
+
+static int ReadNetworkMessageBody( URLContext * h, NetworkMessage *message
)
+{
+    int         retVal = 0;
+
+    if( message != NULL && message->body == NULL ) {
+        /* Read based on the type of message we have */
+        switch( message->header.messageType ) {
+            case TCP_SRV_CONNECT_REJECT: {
+                retVal = ReadConnectRejectMessage( h, message );
+            }
+            break;
+
+            case TCP_SRV_FEATURE_CONNECT_REPLY: {
+                retVal = ReadFeatureConnectReplyMessage( h, message );
+            }
+            break;
+
+            case TCP_SRV_CONNECT_REPLY: {
+                retVal = ReadConnectReplyMessage( h, message );
+            }
+            break;
+
+            default:
+                /* We shouldn't get into this state so we'd better return
an error here... */
+                retVal = AVERROR(EIO);
+                break;
+        }
+
+    }
+
+    return retVal;
+}
+
+static int ReadConnectRejectMessage( URLContext * h, NetworkMessage
*message )
+{
+    SrvConnectRejectMsg *       bodyPtr = NULL;
+
+    /* Allocate the message body */
+    if( (message->body = av_malloc( sizeof(SrvConnectRejectMsg) )) == NULL
)
+        return AVERROR(ENOMEM);
+
+    /* Now read from the stream into the message */
+    bodyPtr = (SrvConnectRejectMsg *)message->body;
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->reason, sizeof(long) ) !=
sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->timestamp, sizeof(long) ) !=
sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)bodyPtr->macAddr, MAC_ADDR_LENGTH ) !=
MAC_ADDR_LENGTH )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->appVersion, sizeof(unsigned
long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->minViewerVersion,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    /* Correct the byte ordering */
+    bodyPtr->reason = av_be2ne32(bodyPtr->reason);
+    bodyPtr->timestamp = av_be2ne32(bodyPtr->timestamp);
+    bodyPtr->appVersion = av_be2ne32(bodyPtr->appVersion);
+    bodyPtr->minViewerVersion = av_be2ne32(bodyPtr->minViewerVersion);
+
+    return 0;
+}
+
+static int ReadConnectReplyMessage( URLContext * h, NetworkMessage
*message )
+{
+    SrvConnectReplyMsg *        bodyPtr = NULL;
+
+    /* Allocate memory in which to store the message body */
+    if( (message->body = av_malloc( sizeof(SrvConnectReplyMsg) )) == NULL )
+        return AVERROR(ENOMEM);
+
+    bodyPtr = (SrvConnectReplyMsg *)message->body;
+
+    /* Now read the message body, a field at a time */
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->numCameras, sizeof(long) )
!= sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->viewableCamMask,
sizeof(long) ) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->telemetryCamMask,
sizeof(long) ) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->failedCamMask, sizeof(long)
) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->maxMsgInterval, sizeof(long)
) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->timestamp, sizeof(int64_t) )
!= sizeof(int64_t) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)bodyPtr->cameraTitles, (16 * 28) ) !=
(16 * 28) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->unitType, sizeof(long) ) !=
sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->applicationVersion,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->videoStandard, sizeof(long)
) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)bodyPtr->macAddr, MAC_ADDR_LENGTH ) !=
MAC_ADDR_LENGTH )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)bodyPtr->unitName, UNIT_NAME_LENGTH )
!= UNIT_NAME_LENGTH )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->numFixedRealms, sizeof(long)
) != sizeof(long) )
+        return AVERROR(EIO);
+
+    bodyPtr->numFixedRealms = av_be2ne32(bodyPtr->numFixedRealms);
+
+    if( DSReadBuffer( h, (uint8_t *)bodyPtr->realmFlags, (sizeof(unsigned
long) * bodyPtr->numFixedRealms) ) != (sizeof(unsigned long) *
bodyPtr->numFixedRealms) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->minimumViewerVersion,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    /* Correct the byte ordering */
+    bodyPtr->numCameras = av_be2ne32(bodyPtr->numCameras);
+    bodyPtr->viewableCamMask = av_be2ne32(bodyPtr->viewableCamMask);
+    bodyPtr->telemetryCamMask = av_be2ne32(bodyPtr->telemetryCamMask);
+    bodyPtr->failedCamMask = av_be2ne32(bodyPtr->failedCamMask);
+    bodyPtr->maxMsgInterval = av_be2ne32(bodyPtr->maxMsgInterval);
+    bodyPtr->unitType = av_be2ne32(bodyPtr->unitType);
+    bodyPtr->applicationVersion = av_be2ne32(bodyPtr->applicationVersion);
+    bodyPtr->videoStandard = av_be2ne32(bodyPtr->videoStandard);
+    bodyPtr->numFixedRealms = av_be2ne32(bodyPtr->numFixedRealms);
+    bodyPtr->minimumViewerVersion =
av_be2ne32(bodyPtr->minimumViewerVersion);
+
+    return 0;
+}
+
+static int ReadFeatureConnectReplyMessage( URLContext * h, NetworkMessage
*message )
+{
+    SrvFeatureConnectReplyMsg *        bodyPtr = NULL;
+
+    if( (message->body = av_malloc( sizeof(SrvFeatureConnectReplyMsg) ))
== NULL )
+        return AVERROR(ENOMEM);
+
+    bodyPtr = (SrvFeatureConnectReplyMsg *)message->body;
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->numCameras, sizeof(long) )
!= sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->viewableCamMask,
sizeof(long) ) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->telemetryCamMask,
sizeof(long) ) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->failedCamMask, sizeof(long)
) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->maxMsgInterval, sizeof(long)
) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->timestamp, sizeof(int64_t) )
!= sizeof(int64_t) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)bodyPtr->cameraTitles, (16 * 28) ) !=
(16 * 28) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->unitType, sizeof(long) ) !=
sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->applicationVersion,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->videoStandard, sizeof(long)
) != sizeof(long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)bodyPtr->macAddr, MAC_ADDR_LENGTH ) !=
MAC_ADDR_LENGTH )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)bodyPtr->unitName, UNIT_NAME_LENGTH )
!= UNIT_NAME_LENGTH )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->minimumViewerVersion,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->unitFeature01,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->unitFeature02,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->unitFeature03,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->unitFeature04,
sizeof(unsigned long) ) != sizeof(unsigned long) )
+        return AVERROR(EIO);
+
+    if( DSReadBuffer( h, (uint8_t *)&bodyPtr->numFixedRealms, sizeof(long)
) != sizeof(long) )
+        return AVERROR(EIO);
+
+    bodyPtr->numFixedRealms = av_be2ne32(bodyPtr->numFixedRealms);
+
+    if( DSReadBuffer( h, (uint8_t *)bodyPtr->realmFlags, (sizeof(unsigned
long) * bodyPtr->numFixedRealms) ) != (sizeof(unsigned long) *
bodyPtr->numFixedRealms) )
+        return AVERROR(EIO);
+
+    /* Correct the byte ordering */
+    bodyPtr->numCameras = av_be2ne32(bodyPtr->numCameras);
+    bodyPtr->viewableCamMask = av_be2ne32(bodyPtr->viewableCamMask);
+    bodyPtr->telemetryCamMask = av_be2ne32(bodyPtr->telemetryCamMask);
+    bodyPtr->failedCamMask = av_be2ne32(bodyPtr->failedCamMask);
+    bodyPtr->maxMsgInterval = av_be2ne32(bodyPtr->maxMsgInterval);
+    bodyPtr->unitType = av_be2ne32(bodyPtr->unitType);
+    bodyPtr->applicationVersion = av_be2ne32(bodyPtr->applicationVersion);
+    bodyPtr->videoStandard = av_be2ne32(bodyPtr->videoStandard);
+    bodyPtr->minimumViewerVersion =
av_be2ne32(bodyPtr->minimumViewerVersion);
+    bodyPtr->unitFeature01 = av_be2ne32(bodyPtr->unitFeature01);
+    bodyPtr->unitFeature02 = av_be2ne32(bodyPtr->unitFeature02);
+    bodyPtr->unitFeature03 = av_be2ne32(bodyPtr->unitFeature03);
+    bodyPtr->unitFeature04 = av_be2ne32(bodyPtr->unitFeature04);
+
+    return 0;
+}
+
+static void HToNMessageHeader( MessageHeader *header, unsigned char *buf )
+{
+    MessageHeader           tempHeader;
+    int                     bufIdx = 0;
+
+    if( header != NULL && buf != NULL ) {
+        /* Set whatever header values we can in here */
+        tempHeader.magicNumber = av_be2ne32(header->magicNumber);
+        memcpy( &buf[bufIdx], &tempHeader.magicNumber, sizeof(unsigned
long) );
+        bufIdx += sizeof(unsigned long);
+
+        tempHeader.length = av_be2ne32(header->length);
+        memcpy( &buf[bufIdx], &tempHeader.length, sizeof(unsigned long) );
+        bufIdx += sizeof(unsigned long);
+
+        tempHeader.channelID = av_be2ne32(header->channelID);
+        memcpy( &buf[bufIdx], &tempHeader.channelID, sizeof(long) );
+        bufIdx += sizeof(long);
+
+        tempHeader.sequence = av_be2ne32(header->sequence); /* Currently
unsupported at server */
+        memcpy( &buf[bufIdx], &tempHeader.sequence, sizeof(long) );
+        bufIdx += sizeof(long);
+
+        tempHeader.messageVersion = av_be2ne32(header->messageVersion);
+        memcpy( &buf[bufIdx], &tempHeader.messageVersion, sizeof(unsigned
long) );
+        bufIdx += sizeof(unsigned long);
+
+        tempHeader.checksum = av_be2ne32(header->checksum); /* As
suggested in protocol documentation */
+        memcpy( &buf[bufIdx], &tempHeader.checksum, sizeof(long) );
+        bufIdx += sizeof(long);
+
+        tempHeader.messageType = av_be2ne32(header->messageType);
+        memcpy( &buf[bufIdx], &tempHeader.messageType, sizeof(long) );
+        bufIdx += sizeof(long);
+    }
+}
+
+/****************************************************************************************************************
+ * Function: SendNetworkMessage
+ * Desc: Sends the given message over the connection.
+ * Params:
+ *  h - Pointer to URLContext struct used to store all connection info
associated with this connection
+ *  message - Pointer to the message to be sent
+ * Return:
+ *   0 on success, non 0 on failure
+
****************************************************************************************************************/
+static int SendNetworkMessage( URLContext *h, NetworkMessage *message )
+{
+    unsigned char       messageBuffer[DS_WRITE_BUFFER_SIZE];
+    int                 bufIdx = SIZEOF_MESSAGE_HEADER_IO;
+
+    /* 0 the buffer */
+    memset( messageBuffer, 0, DS_WRITE_BUFFER_SIZE );
+
+    /* Write the header into the buffer */
+    HToNMessageHeader( &message->header, messageBuffer );
+
+    /* Now write the rest of the message to the buffer based on its type */
+    switch( message->header.messageType ) {
+        case TCP_CLI_CONNECT: {
+            ClientConnectMsg        tempMsg;
+
+            tempMsg.udpPort = HTON32(((ClientConnectMsg
*)message->body)->udpPort);
+            memcpy( &messageBuffer[bufIdx], &tempMsg.udpPort,
sizeof(unsigned long) );
+            bufIdx += sizeof(unsigned long);
+
+            tempMsg.connectType = HTON32(((ClientConnectMsg
*)message->body)->connectType);
+            memcpy( &messageBuffer[bufIdx], &tempMsg.connectType,
sizeof(long) );
+            bufIdx += sizeof(long);
+
+            memcpy( &messageBuffer[bufIdx], ((ClientConnectMsg
*)message->body)->userID, MAX_USER_ID_LENGTH );
+            bufIdx += MAX_USER_ID_LENGTH;
+
+            memcpy( &messageBuffer[bufIdx], ((ClientConnectMsg
*)message->body)->accessKey, ACCESS_KEY_LENGTH );
+            bufIdx += ACCESS_KEY_LENGTH;
+        }
+        break;
+
+        case TCP_CLI_IMG_LIVE_REQUEST: {
+            long            temp;
+
+            temp = HTON32(
((CliImgLiveRequestMsg*)message->body)->cameraMask );
+            memcpy( &messageBuffer[bufIdx], &temp, sizeof(long) );
+            bufIdx += sizeof(long);
+
+            temp = HTON32(
((CliImgLiveRequestMsg*)message->body)->resolution );
+            memcpy( &messageBuffer[bufIdx], &temp, sizeof(long) );
+            bufIdx += sizeof(long);
+        }
+        break;
+
+        case TCP_CLI_IMG_PLAY_REQUEST: {
+            long            temp;
+            int64_t          tempBig;
+
+            temp = HTON32(
((CliImgPlayRequestMsg*)message->body)->cameraMask );
+            memcpy( &messageBuffer[bufIdx], &temp, sizeof(long) );
+            bufIdx += sizeof(long);
+
+            temp = HTON32( ((CliImgPlayRequestMsg*)message->body)->mode );
+            memcpy( &messageBuffer[bufIdx], &temp, sizeof(long) );
+            bufIdx += sizeof(long);
+
+            temp = HTON32( ((CliImgPlayRequestMsg*)message->body)->pace );
+            memcpy( &messageBuffer[bufIdx], &temp, sizeof(long) );
+            bufIdx += sizeof(long);
+
+            tempBig = HTON64(
((CliImgPlayRequestMsg*)message->body)->fromTime );
+            memcpy( &messageBuffer[bufIdx], &tempBig, sizeof(int64_t) );
+            bufIdx += sizeof(int64_t);
+
+            tempBig = HTON64(
((CliImgPlayRequestMsg*)message->body)->toTime );
+            memcpy( &messageBuffer[bufIdx], &tempBig, sizeof(int64_t) );
+            bufIdx += sizeof(int64_t);
+        }
+        break;
+    }
+
+    /* Write to output stream - remember to add on the 4 bytes for the
magic number which precedes the length */
+    return DSWrite( h, messageBuffer, message->header.length +
sizeof(unsigned long) );
+}
+
+static int64_t TimeTolong64( time_t time )
+{
+    int64_t          timeOut = 0;
+    unsigned short  flags = 0;
+    unsigned short  ms = 0;
+    uint8_t *       bufPtr = NULL;
+
+    /* For now, we're saying we don't know the time zone */
+    SET_FLAG_ZONE_UNKNOWN(flags);
+
+    bufPtr = (uint8_t*)&timeOut;
+
+    memcpy( bufPtr, &flags, sizeof(unsigned short) );
+    bufPtr += sizeof(unsigned short);
+
+    memcpy( bufPtr, &ms, sizeof(unsigned short) );
+    bufPtr += sizeof(unsigned short);
+
+    memcpy( bufPtr, &time, sizeof(time_t) );
+
+    return timeOut;
+}
+
+
+URLProtocol ff_dm_protocol = {
+    .name                = "dm",
+    .url_open            = DSOpen,
+    .url_read            = DSRead,
+    .url_write           = DSWrite,
+    .url_close           = DSClose,
+};
diff --git a/libavformat/ds.h b/libavformat/ds.h
new file mode 100644
index 0000000000..c152d1a061
--- /dev/null
+++ b/libavformat/ds.h
@@ -0,0 +1,135 @@
+/*
+ * Protocol for AD-Holdings Digital Sprite stream format
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#ifndef __DS_H__
+#define __DS_H__
+
+#include "ds_exports.h"
+
+/* -------------------------------------- Constants
-------------------------------------- */
+#define DS_HEADER_MAGIC_NUMBER                      0xFACED0FF
+
+
+/* -------------------------------------- Structures/types
-------------------------------------- */
+typedef enum {
+    INVALID_MESSAGE_TYPE,            /* 0 */
+    TCP_CLI_CONNECT,                        /* 1 */    /* Initial
connection */
+    TCP_SRV_CONNECT_REJECT,                /* 2 */    /* Connection
rejected */
+    TCP_SRV_CONNECT_REPLY,                /* 3 */    /* Connect accepted -
returns DSL configuration */
+    TCP_CLI_IMG_LIVE_REQUEST,            /* 4 */    /* Request an image
stream */
+    TCP_CLI_IMG_PLAY_REQUEST,            /* 5 */      /* Request a
playback stream */
+    TCP_SRV_IMG_DATA,                        /* 6 */    /* Image stream
data */
+    TCP_SRV_NUDGE,                            /* 7 */    /* Nudge */
+    TCP_SRV_DISCONNECT,                    /* 8 */    /* Server dropping
connection  - ie: unit shutting down etc. */
+    UDP_CLI_IMG_CONTROL_REQUEST,        /* 9 */    /* Request to change
image stream ie: camera change */
+    UDP_SRV_IMG_CONTROL_REPLY,            /* A */    /* Acknowledgement of
request */
+    UDP_CLI_SYSTEM_STATUS_REQUEST,    /* B */    /* Request for update of
DSL configuration */
+    UDP_SRV_SYSTEM_STATUS,                /* C */    /* New DSL
configuration - may be sent by server if menus change */
+    UDP_CLI_TELEMETRY_REQUEST,            /* D */    /* Telemetry star
request */
+    UDP_CLI_TELEMETRY_COMMAND,            /* E */    /* Telemetry command
request */
+    UDP_CLI_TELEMETRY_STATUS_REQUEST,/* F */    /* Request for telemetry
status update */
+    UDP_SRV_TELEMETRY_STATUS,            /* 10*/    /* Ack with new
telemetry status */
+    UDP_CLI_DISCONNECT,                    /* 11*/    /* Polite message
notifying of client disconnect */
+    UDP_CLI_DISCOVER,                        /* 12*/    /* Broadcast to
identify units on subnet */
+    UDP_SRV_HELLO,                            /* 13*/    /* Reply
containing basic unit information */
+    UDP_CLI_MCI_COMMAND,                    /* 14*/    /* Send MCI command
*/
+    UDP_SRV_MCI_REPLY,                    /* 15*/    /* Reply from MCI
command */
+    UDP_SRV_MSG_FAIL,                        /* 16*/    /* UDP message
recognition failure - returns status code */
+    UDP_CLI_EVENT_FILTER_REQUEST,        /* 17*/    /* Request to
initialise an event filter. */
+    UDP_SRV_EVENT_FILTER_REPLY,        /* 18*/    /* Filter request either
accepted, or not accepted. */
+    UDP_CLI_EVENTS_REQUEST,                /* 19*/    /* Request to apply
filter and retrieve events */
+    UDP_SRV_EVENTS_REPLY,                /* 1A*/    /* Reply containing
filtered event */
+    UDP_CLI_ADMIN_REQUEST,                     /* 1B*/    /* Request from
user for admin */
+    UDP_SRV_ADMIN_REPLY,                         /* 1C*/    /* Reply -
contains success or fail reason */
+    UDP_CLI_USER_INFO_REQUEST,                  /* 1D*/    /* Request for
information on a specific user */
+    UDP_SRV_USER_INFO_REPLY,                  /* 1E*/    /* Information on
the requested user */
+    UDP_CLI_ADD_USER_REQUEST,                  /* 1F*/    /* Request to
add a user to the system */
+    UDP_SRV_ADD_USER_REPLY,                      /* 20*/    /* Reply to
indicate success or failure */
+    UDP_CLI_DELETE_USER_REQUEST,                  /* 21*/    /* Request to
delete a user from the system */
+    UDP_SRV_DELETE_USER_REPLY,                      /* 22*/    /* Reply to
delete user request */
+    UDP_CLI_CHANGE_PASSWORD_REQUEST,          /* 23*/    /* Change
password */
+    UDP_SRV_CHANGE_PASSWORD_REPLY,           /* 24*/    /* Reply to
request */
+    UDP_CLI_UPDATE_ACCESS_RIGHTS_REQUEST,  /* 25*/    /* Request to change
users realms */
+    UDP_SRV_UPDATE_ACCESS_RIGHTS_REPLY,       /* 26*/    /* Reply to
request */
+    TCP_CLI_CHANGE_PASSWORD,               /* 27*/    /* send password
change*/
+    UDP_CLI_CHANGE_OWN_PASSWORD_REQUEST,    /* 28*/    /* Change own
password */
+    UDP_SRV_CHANGE_OWN_PASSWORD_REPLY,        /* 29*/    /* Reply to
request */
+    UDP_CLI_EMAIL_REQUEST,                      /* 2A*/    /* Request for
email data */
+    UDP_SRV_EMAIL_REPLY,                         /* 2B*/    /* Reply with
email data */
+    UDP_CLI_CHANGE_EMAIL_REQUEST,              /* 2C*/    /* Request to
set new email data */
+    UDP_SRV_CHANGE_EMAIL_REPLY,               /* 2D*/    /* Reply to
setting new email data */
+    UDP_CLI_CHANGE_SESSION_REQUEST,           /* 2E*/    /* Request to
logon to different unit*/
+    TCP_CLI_IMG_DATA_REQUEST,                    /* 2F*/    /* request
from remote to grab image data */
+    TCP_SRV_DATA_DATA,                            /* 30*/    /* us sending
requested images as data */
+    TCP_SRV_NO_DATA,                                /* 31*/    /* Sent
when finished sending data */
+    UDP_CLI_ABORT_DATA_REQUEST,                /* 32*/    /* Cancel data
transfer */
+    UDP_CLI_EVENT_DATA_REQUEST,                /* 33*/    /* Request to
obtain end time of an event */
+    UDP_SRV_EVENT_DATA_REPLY,                    /* 34*/    /* reply */
+    TCP_CLI_CONTROL_CONNECT,            /* 35*/    /* Initial connection
for TCP control link */
+    TCP_SRV_CONTROL_CONNECT_REJECT,
+    TCP_SRV_CONTROL_CONNECT_REPLY,
+    UDP_CLI_SERVICE_CHECK,                        /* 38*/    /* Sent to
check if UDP service working */
+    UDP_SRV_SERVICE_CHECK_REPLY,
+    UDP_CLI_DRIVE_DETAIL_REQUEST,                /* 3A*/    /* Request for
hard drive details */
+    UDP_SRV_DRIVE_DETAIL_REPLY,                /* 3B*/    /* Reply with
data */
+    UDP_CLI_DRIVE_SMART_REQUEST,                /* 3C*/    /* Request for
hard drive S.M.A.R.T. details */
+    UDP_SRV_DRIVE_SMART_REPLY,                    /* 3D*/    /* Reply with
S.M.A.R.T. data */
+    UDP_CLI_DRIVE_LOG_REQUEST,                    /* 3E*/    /* Request
for hard drive log details */
+    UDP_SRV_DRIVE_LOG_REPLY,                    /* 3F*/    /* Reply with
log data */
+    UDP_CLI_DRIVE_TEST_REQUEST,                /* 40*/    /* Request for
hard drive offline test */
+    UDP_SRV_DRIVE_TEST_REPLY,                    /* 41*/    /* Reply with
confirmation of test */
+    TCP_SRV_FEATURE_CONNECT_REPLY,            /* 42*/    /* Connect
accepted - returns DSL configuration */
+    TCP_CLI_ALM_CONNECT,                   /* 43*/    /* Initial alarm
connection to PC */
+    TCP_SRV_ALM_REJECT,                            /* 44*/    /* reject
message with reasons not registered, auth required, invalid password, busy
etc*/
+    TCP_SRV_ALM_ACCEPT,                            /* 45*/    /* Client
connection accepted - send and alarm msg */
+    TCP_CLI_ALM_MSG,                                /* 46*/    /* Alarm
details */
+    TCP_SRV_ALM_ACK,                                /* 47*/    /* Server
ack of an alarm message */
+    UDP_CLI_CONFIG_REQUEST,                        /* 48*/    /* Request
name/value pairs from unit Get/Send*/
+    UDP_SRV_CONFIG_REJECT,                        /* 49*/    /* Server
denied access to config */
+    UDP_CLI_CONFIG_ITEM,                   /* 4A*/    /* has item x of y
to determine missing and last item */
+    UDP_SRV_CONFIG_ITEM,
+    UDP_CLI_RELAY_REQUEST,
+    UDP_SRV_RELAY_REPLY,
+    UDP_CLI_CHANGE_ACTIVE_FEATURES,
+    UDP_SRV_ACTIVE_FEATURES_REPLY,
+    UDP_CLI_POS_FILTER_REQUEST,            /* 50*/    /* POS
keyworldsearch filter */
+    UDP_SRV_POS_TICK,                      /* 51*/    /* sends during
search to say still searching */
+    UDP_SRV_POS_FILTER_REPLY,              /* 52*/    /* reply to filter */
+    UDP_CLI_POS_LINES_REQUEST,             /* 53*/    /* Request for POS
lines */
+    UDP_SRV_POS_LINES_REPLY,               /* 54*/    /* Reply with POS
matches */
+    UDP_CLI_POS_DATA_REQUEST,              /* 55*/    /* Request for info
on a specific POS event */
+    UDP_SRV_POS_DATA_REPLY,                /* 56*/    /* Replies with
start & end time etc */
+    NET_NUMBER_OF_MESSAGE_TYPES            /* 57*/    /* ALWAYS KEEP AS
LAST ITEMS */
+} ControlMessageTypes;
+
+typedef struct _messageHeader {
+    unsigned long       magicNumber;
+    unsigned long       length;
+    long                channelID;
+    long                sequence;
+    unsigned long       messageVersion;
+    long                checksum;
+    long                messageType;
+} MessageHeader;
+#define SIZEOF_MESSAGE_HEADER_IO                28      /* Size in bytes
of the MessageHeader structure. Can't use sizeof to read/write one of these
to network as structure packing may differ based on platform */
+
+#endif /* __DS_H__ */
diff --git a/libavformat/ds_exports.h b/libavformat/ds_exports.h
new file mode 100644
index 0000000000..7cb845898b
--- /dev/null
+++ b/libavformat/ds_exports.h
@@ -0,0 +1,173 @@
+/*
+ * Data exportable to clients for AD-Holdings data types
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#ifndef AVFORMAT_DS_EXPORTS_H
+#define AVFORMAT_DS_EXPORTS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+
+struct NetVuPicture {
+    uint16_t src_pixels;        ///< Input image size (horizontal)
+    uint16_t src_lines;         ///< Input image size (vertical)
+    uint16_t target_pixels;     ///< Output image size (horizontal)
+    uint16_t target_lines;      ///< Output image size (vertical)
+    uint16_t pixel_offset;      ///< Image start offset (horizontal)
+    uint16_t line_offset;       ///< Image start offset (vertical)
+};
+
+struct NetVuImageData {
+    uint32_t version;                ///<  structure version number */
+
+    /** mode: in PIC_REVISION 0 this was the DFT style FULL_HI etc
+     *        in PIC_REVISION 1 this is used to specify AD or JFIF format
image
+     */
+    int32_t mode;
+    int32_t cam;                ///< camera number
+    int32_t vid_format;         ///< 422 or 411
+    uint32_t start_offset;      ///< start of picture
+    int32_t size;               ///< size of image
+    int32_t max_size;           ///< maximum size allowed
+    int32_t target_size;        ///< size wanted for compression
+    int32_t factor;             ///< Q factor
+    uint32_t alm_bitmask_hi;    ///< High 32 bits of the alarm bitmask
+    int32_t status;             ///< status of last action performed on
picture
+    uint32_t session_time;      ///< playback time of image
+    uint32_t milliseconds;      ///< sub-second count for playback speed
control
+    char res[4];                ///< picture size
+    char title[31];             ///< camera title
+    char alarm[31];             ///< alarm text - title, comment etc
+    struct NetVuPicture format; ///< NOTE: Do not assign to a pointer due
to CW-alignment
+    char locale[30];            ///< Timezone name
+    int32_t utc_offset;         ///< Timezone difference in minutes
+    uint32_t alm_bitmask;
+};
+#define NetVuImageDataHeaderSize 168
+
+struct NetVuAudioData {
+    uint32_t            version;
+    int32_t             mode;
+    int32_t             channel;
+    int32_t             sizeOfAdditionalData;
+    int32_t             sizeOfAudioData;
+    uint32_t            seconds;
+    uint32_t            msecs;
+    unsigned char *     additionalData;
+};
+#define NetVuAudioDataHeaderSize (28 + sizeof(unsigned char *))
+
+#define ID_LENGTH                       8
+#define NUM_ACTIVITIES                  8
+#define CAM_TITLE_LENGTH                24
+#define ALARM_TEXT_LENGTH               24
+
+struct DMImageData {
+    char            identifier[ID_LENGTH];
+    unsigned long   jpegLength;
+    int64_t         imgSeq;
+    int64_t         imgTime;
+    unsigned char   camera;
+    unsigned char   status;
+    unsigned short  activity[NUM_ACTIVITIES];
+    unsigned short  QFactor;
+    unsigned short  height;
+    unsigned short  width;
+    unsigned short  resolution;
+    unsigned short  interlace;
+    unsigned short  subHeaderMask;
+    char            camTitle[CAM_TITLE_LENGTH];
+    char            alarmText[ALARM_TEXT_LENGTH];
+};
+
+
+enum ADFrameType {
+    FrameTypeUnknown = 0,
+    NetVuVideo,
+    NetVuAudio,
+    DMVideo,
+    DMNudge,
+    NetVuDataInfo,
+    NetVuDataLayout,
+    RTPAudio
+};
+
+#define VSD_M0      0
+#define VSD_M1      1
+#define VSD_M2      2
+#define VSD_M3      3
+#define VSD_M4      4
+#define VSD_M5      5
+#define VSD_M6      6
+#define VSD_FM0     7
+#define VSD_F       8
+#define VSD_EM0     9
+#define VSD_COUNT  10
+
+#define ACTMASKLEN  16
+#define VSDARRAYLEN 16
+
+/** This is the data structure that the ffmpeg parser fills in as part of
the
+ * parsing routines. It will be shared between adpic and dspic so that our
+ * clients can be compatible with either stream more easily
+ */
+struct ADFrameData {
+    /// Type of frame we have. See ADFrameType enum for supported types
+    enum ADFrameType    frameType;
+    /// Pointer to structure holding the information for the frame.
+    void *              frameData;
+    /// Pointer to text block
+    void *              additionalData;
+    /// Data parsed out of text (if exists)
+    int                 activeZones;
+    /// Data parsed out of text (if exists)
+    uint32_t            frameNum;
+    /// Data parsed out of text (if exists)
+    uint16_t            activityMask[ACTMASKLEN];
+    /// Data parsed out of text (if exists)
+    int                 vsd[VSD_COUNT][VSDARRAYLEN];
+};
+
+#define RTP_PAYLOAD_TYPE_8000HZ_ADPCM                       5
+#define RTP_PAYLOAD_TYPE_11025HZ_ADPCM                      16
+#define RTP_PAYLOAD_TYPE_16000HZ_ADPCM                      6
+#define RTP_PAYLOAD_TYPE_22050HZ_ADPCM                      17
+#define RTP_PAYLOAD_TYPE_32000HZ_ADPCM                      96
+#define RTP_PAYLOAD_TYPE_44100HZ_ADPCM                      97
+#define RTP_PAYLOAD_TYPE_48000HZ_ADPCM                      98
+#define RTP_PAYLOAD_TYPE_8000HZ_PCM                         100
+#define RTP_PAYLOAD_TYPE_11025HZ_PCM                        101
+#define RTP_PAYLOAD_TYPE_16000HZ_PCM                        102
+#define RTP_PAYLOAD_TYPE_22050HZ_PCM                        103
+#define RTP_PAYLOAD_TYPE_32000HZ_PCM                        104
+#define RTP_PAYLOAD_TYPE_44100HZ_PCM                        11
+#define RTP_PAYLOAD_TYPE_48000HZ_PCM                        105
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/libavformat/dsenc.c b/libavformat/dsenc.c
new file mode 100644
index 0000000000..250d107c9f
--- /dev/null
+++ b/libavformat/dsenc.c
@@ -0,0 +1,488 @@
+/*
+ * Protocol for AD-Holdings Digital Sprite stream format
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include "avformat.h"
+#include "libavutil/avstring.h"
+#include "dsenc.h"
+
+/* Constants for MD5Transform routine.
+ */
+#define S11 7
+#define S12 12
+#define S13 17
+#define S14 22
+#define S21 5
+#define S22 9
+#define S23 14
+#define S24 20
+#define S31 4
+#define S32 11
+#define S33 16
+#define S34 23
+#define S41 6
+#define S42 10
+#define S43 15
+#define S44 21
+
+
+/* POINTER defines a generic pointer type */
+typedef unsigned char *       POINTER;
+typedef const unsigned char * CPOINTER;
+
+/* UINT2 defines a two byte word */
+typedef unsigned short int UINT2;
+
+/* UINT4 defines a four byte word */
+typedef unsigned long int UINT4;
+
+
+/* MD5 context. */
+typedef struct {
+    UINT4 state[4];                   /* state (ABCD) */
+    UINT4 count[2];                    /* number of bits, modulo 2^64 (lsb
first) */
+    unsigned char buffer[64];         /* input buffer */
+} MD5_CTX;
+
+
+/* Length of test block, number of test blocks.
+ */
+#define TEST_BLOCK_LEN 1000
+#define TEST_BLOCK_COUNT 1000
+
+
+// Macro Definitions
+
+/* F, G, H and I are basic MD5 functions.
+ */
+#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
+#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define I(x, y, z) ((y) ^ ((x) | (~z)))
+
+/* ROTATE_LEFT rotates x left n bits.
+ */
+#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
+
+/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
+Rotation is separate from addition to prevent recomputation.
+ */
+#define FF(a, b, c, d, x, s, ac) { \
+        (a) += F ((b), (c), (d)) + (x) + (UINT4)(ac); \
+        (a) = ROTATE_LEFT ((a), (s)); \
+        (a) += (b); \
+    }
+#define GG(a, b, c, d, x, s, ac) { \
+        (a) += G ((b), (c), (d)) + (x) + (UINT4)(ac); \
+        (a) = ROTATE_LEFT ((a), (s)); \
+        (a) += (b); \
+    }
+#define HH(a, b, c, d, x, s, ac) { \
+        (a) += H ((b), (c), (d)) + (x) + (UINT4)(ac); \
+        (a) = ROTATE_LEFT ((a), (s)); \
+        (a) += (b); \
+    }
+#define II(a, b, c, d, x, s, ac) { \
+        (a) += I ((b), (c), (d)) + (x) + (UINT4)(ac); \
+        (a) = ROTATE_LEFT ((a), (s)); \
+        (a) += (b); \
+    }
+
+#define VIEWER_VERSION_104_001                      0x00104001
+
+
+static void MD5Init(MD5_CTX * context, const unsigned char *pub_key);
+static void MD5Update(MD5_CTX* context, const unsigned char* input,
unsigned int inputLen);
+static void MD5Final(unsigned char * digest, MD5_CTX * context);
+static void MD5Transform(UINT4 * state, const unsigned char * block);
+static void Encode(unsigned char * output, const UINT4 * input, unsigned
int len);
+static void Decode(UINT4 * output, const unsigned char * input, unsigned
int len);
+static unsigned char hex_val (const char *char2);
+static void MD5Print(char* FingerPrint, int size, const unsigned char *
digest);
+static void GetFingerPrint(char* FingerPrint, const char* Source, unsigned
int Length, const char *cpublic_key);
+
+#if !defined(_WIN32)
+static char* strupr(char *theString)
+{
+    int ii;
+
+    for(ii = 0; ii < strlen(theString); ii++)  {
+        theString[ii] = toupper(theString[ii]);
+    }
+    return theString;
+}
+#endif
+
+//---------------------------------------------------------------------------
+
+#pragma pack(1)
+//#pragma package(smart_init)
+
+static unsigned char PADDING[64] = {
+    0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+
+/* MD5 initialization. Begins an MD5 operation, writing a new context. */
+static void MD5Init(MD5_CTX * context, const unsigned char *pub_key)
+{
+    context->count[0] = context->count[1] = 0;
+    /* Load magic initialization constants.*/
+    context->state[0] = 0x67452301;
+    context->state[1] = 0xefcdab89;
+    context->state[2] = 0x98badcfe;
+    context->state[3] = 0x10325476;
+    if (pub_key != NULL) {
+        context->state[0] &= 0xFF0000FF ;
+        context->state[0] |= ((pub_key[4] << 8) | (pub_key[1] << 16)) ;
+        context->state[1] &= 0xFFFFFF00 ;
+        context->state[1] |= (pub_key[5]) ;
+        context->state[2] &= 0x00FF00FF ;
+        context->state[2] |= ((pub_key[0] << 8) | (pub_key[2] << 24)) ;
+        context->state[3] &= 0xFF00FFFF ;
+        context->state[3] |= (pub_key[3] << 16) ;
+    }
+}
+
+/* MD5 block update operation. Continues an MD5 message-digest
+  operation, processing another message block, and updating the
+  context.
+ */
+static void MD5Update(MD5_CTX* context, const unsigned char* input,
unsigned int inputLen)
+{
+    unsigned int i, index, partLen;
+
+    /* Compute number of bytes mod 64 */
+    index = (unsigned int)((context->count[0] >> 3) & 0x3F);
+
+    /* Update number of bits */
+    if ((context->count[0] += ((UINT4)inputLen << 3)) < ((UINT4)inputLen
<< 3))
+        context->count[1]++;
+
+    context->count[1] += ((UINT4)inputLen >> 29);
+
+    partLen = 64 - index;
+
+    /* Transform as many times as possible */
+    if (inputLen >= partLen) {
+        memcpy((POINTER)&context->buffer[index], (CPOINTER)input, partLen);
+        MD5Transform (context->state, context->buffer);
+
+        for (i = partLen; i + 63 < inputLen; i += 64)
+            MD5Transform (context->state, &input[i]);
+
+        index = 0;
+    }
+    else {
+        i = 0;
+    }
+
+    /* Buffer remaining input */
+    if (inputLen > i)
+        memcpy((POINTER)&context->buffer[index], (CPOINTER)&input[i],
inputLen - i);
+}
+
+/* MD5 finalization. Ends an MD5 message-digest operation, writing the
+  the message digest and zeroizing the context.
+ */
+static void MD5Final(unsigned char * digest, MD5_CTX * context)
+{
+    unsigned char bits[8];
+    unsigned int index, padLen;
+
+    /* Save number of bits */
+    Encode (bits, context->count, 8);
+
+    /* Pad out to 56 mod 64 */
+    index = (unsigned int)((context->count[0] >> 3) & 0x3f);
+    padLen = (index < 56) ? (56 - index) : (120 - index);
+    MD5Update (context, PADDING, padLen);
+
+    /* Append length (before padding) */
+    MD5Update (context, bits, 8);
+    /* Store state in digest */
+    Encode (digest, context->state, 16);
+
+    /* Zeroize sensitive information */
+    memset ((POINTER)context, 0, sizeof (*context));
+}
+
+
+
+/* MD5 basic transformation. Transforms state based on block */
+static void MD5Transform(UINT4 * state, const unsigned char * block)
+{
+    UINT4 a = state[0], b = state[1], c = state[2], d = state[3], x[16];
+
+    Decode (x, block, 64);
+
+    /* Round 1 */
+    FF (a, b, c, d, x[ 0], S11, 0xd76aa478); /* 1 */
+    FF (d, a, b, c, x[ 1], S12, 0xe8c7b756); /* 2 */
+    FF (c, d, a, b, x[ 2], S13, 0x242070db); /* 3 */
+    FF (b, c, d, a, x[ 3], S14, 0xc1bdceee); /* 4 */
+    FF (a, b, c, d, x[ 4], S11, 0xf57c0faf); /* 5 */
+    FF (d, a, b, c, x[ 5], S12, 0x4787c62a); /* 6 */
+    FF (c, d, a, b, x[ 6], S13, 0xa8304613); /* 7 */
+    FF (b, c, d, a, x[ 7], S14, 0xfd469501); /* 8 */
+    FF (a, b, c, d, x[ 8], S11, 0x698098d8); /* 9 */
+    FF (d, a, b, c, x[ 9], S12, 0x8b44f7af); /* 10 */
+    FF (c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */
+    FF (b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */
+    FF (a, b, c, d, x[12], S11, 0x6b901122); /* 13 */
+    FF (d, a, b, c, x[13], S12, 0xfd987193); /* 14 */
+    FF (c, d, a, b, x[14], S13, 0xa679438e); /* 15 */
+    FF (b, c, d, a, x[15], S14, 0x49b40821); /* 16 */
+
+    /* Round 2 */
+    GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */
+    GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */
+    GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */
+    GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */
+    GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */
+    GG (d, a, b, c, x[10], S22,  0x2441453); /* 22 */
+    GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */
+    GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */
+    GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */
+    GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */
+    GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */
+    GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */
+    GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */
+    GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */
+    GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */
+    GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */
+
+    /* Round 3 */
+    HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */
+    HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */
+    HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */
+    HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */
+    HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */
+    HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */
+    HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */
+    HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */
+    HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */
+    HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */
+    HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */
+    HH (b, c, d, a, x[ 6], S34,  0x4881d05); /* 44 */
+    HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */
+    HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */
+    HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */
+    HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */
+
+    /* Round 4 */
+    II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */
+    II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */
+    II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */
+    II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */
+    II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */
+    II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */
+    II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */
+    II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */
+    II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */
+    II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */
+    II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */
+    II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */
+    II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */
+    II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */
+    II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */
+    II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */
+
+    state[0] += a;
+    state[1] += b;
+    state[2] += c;
+    state[3] += d;
+
+    /* Zeroize sensitive information */
+    memset ((POINTER)x, 0, sizeof (x));
+}
+
+/* Encodes input (UINT4) into output (unsigned char). Assumes len is a
multiple of 4 */
+static void Encode(unsigned char * output, const UINT4 *input, unsigned
int len)
+{
+    unsigned int i, j;
+
+    for (i = 0, j = 0; j < len; i++, j += 4) {
+        output[j] = (unsigned char)(input[i] & 0xff);
+        output[j+1] = (unsigned char)((input[i] >> 8) & 0xff);
+        output[j+2] = (unsigned char)((input[i] >> 16) & 0xff);
+        output[j+3] = (unsigned char)((input[i] >> 24) & 0xff);
+    }
+}
+
+
+/* Decodes input (unsigned char) into output (UINT4). Assumes len is
+  a multiple of 4.
+ */
+static void Decode(UINT4 * output, const unsigned char *input, unsigned
int len)
+{
+    unsigned int i, j;
+
+    for (i = 0, j = 0; j < len; i++, j += 4) {
+        output[i] = ((UINT4)input[j]) |
+                    (((UINT4)input[j+1]) << 8) |
+                    (((UINT4)input[j+2]) << 16) |
+                    (((UINT4)input[j+3]) << 24);
+    }
+}
+
+static unsigned char hex_val (const char *char2)
+{
+    unsigned long    ret_val ;
+    if (char2[0] > '9')
+        ret_val = (char2[0] - 'A' + 10) * 0x10 ;
+    else
+        ret_val = (char2[0] - '0') * 0x10 ;
+
+    if (char2[1] > '9')
+        ret_val += (char2[1] - 'A' + 10) ;
+    else
+        ret_val += (char2[1] - '0') ;
+
+    return (unsigned char)(ret_val & 0x000000FF) ;
+}
+
+// Main Function to get a finger print
+static void GetFingerPrint(char* FingerPrint, const char* Source, unsigned
int Length, const char *cpublic_key)
+{
+    MD5_CTX context;
+    unsigned char digest[16];
+
+    short j ;
+    unsigned char public_key[6] ;
+
+    char    local_fp[32+1] ;
+    //unsigned int len = strlen(Source);
+
+    if (cpublic_key != NULL) {
+        for (j = 0; j < 6; j++)
+            public_key[j] = hex_val (&(cpublic_key[j*2])) ;
+        MD5Init (&context, public_key);
+    }
+    else
+        MD5Init (&context, NULL);
+
+    MD5Update (&context, (const unsigned char*)Source, Length);
+    MD5Final (digest, &context);
+
+
+    MD5Print (local_fp, 32 + 1, digest);
+
+    av_strlcpy (FingerPrint, local_fp, 32) ;
+
+}
+
+
+/* Prints a message digest in hexadecimal */
+static void MD5Print(char* FingerPrint, int size, const unsigned char *
digest)
+{
+    unsigned int i;
+
+    char    temp[20];
+
+    strcpy(FingerPrint, "");
+
+    for (i = 0; i < 16; i++) {
+        snprintf (temp, 20, "%02x", digest[i]);
+
+        av_strlcat(FingerPrint, temp, size);
+    }
+
+}
+
+static char *CreateUserPassword( const char *Username, const char
*Password )
+{
+    char *      userPassword = NULL;
+
+    if( Username != NULL && Password != NULL ) {
+        /* Allocate space for both strings */
+        if( (userPassword = (char*) av_malloc( strlen( Username ) +
strlen( Password ) + 1 )) != NULL ) {
+            /* Copy the username into the output string */
+            strcpy( userPassword, Username );
+
+            userPassword[strlen(Username)] = '\0';
+
+            /* Now add the password */
+            av_strlcat( userPassword, Password, strlen( Username ) +
strlen( Password ) + 1 );
+
+            /* NULL terminate */
+            userPassword[strlen(Username) + strlen(Password)] = '\0';
+
+            /* Now convert it to uppercase */
+            strupr( userPassword );
+        }
+    }
+
+    return userPassword;
+}
+
+char * EncryptPasswordString(const char * Username,
+                             const char * Password,
+                             long Timestamp,
+                             const char * MacAddress,
+                             long RemoteApplicationVersion )
+{
+    // Encrypt Password
+    char        EncPassword[33];
+    char        TransmittedData[33];
+    char        Source[128];
+    char *      UserPassword = NULL;
+    char *      EncryptedPassword = NULL;
+    int         canContinue = 1;
+
+    // If connected to a new unit send concat username too
+    if( RemoteApplicationVersion >= VIEWER_VERSION_104_001 ) {     //
version 1.4(001) First version with password handling
+        if( (UserPassword = CreateUserPassword( Username, Password )) !=
NULL ) {
+
+            GetFingerPrint( EncPassword, UserPassword, (unsigned
int)strlen(UserPassword), MacAddress );
+            EncPassword[32] = '\0';
+
+            av_free( UserPassword );
+        }
+        else
+            canContinue = 0;
+    }
+    else {
+        GetFingerPrint( EncPassword, Password, (unsigned
int)(strlen(Password)), MacAddress );
+        EncPassword[32] = '\0';
+    }
+
+    if( canContinue ) {
+        snprintf(Source, 128, "%08X", (int)Timestamp);
+        av_strlcat(Source, strupr(EncPassword), 128);
+
+        GetFingerPrint( TransmittedData, Source, (unsigned
int)strlen(Source), MacAddress );
+        TransmittedData[32] = '\0';
+
+        /* Take a copy of this and return it */
+        if( (EncryptedPassword = (char *) av_malloc(
strlen(TransmittedData) + 1 )) != NULL ) {
+            strcpy( EncryptedPassword, TransmittedData );
+            EncryptedPassword[strlen(TransmittedData)] = '\0';
+        }
+    }
+
+    return EncryptedPassword;
+}
diff --git a/libavformat/dsenc.h b/libavformat/dsenc.h
new file mode 100644
index 0000000000..fe6372efbe
--- /dev/null
+++ b/libavformat/dsenc.h
@@ -0,0 +1,35 @@
+/*
+ * Protocol for AD-Holdings Digital Sprite stream format
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+//---------------------------------------------------------------------------
+
+#ifndef __DM_ENCRYPTION_ROUTINES_H__
+#define __DM_ENCRYPTION_ROUTINES_H__
+//---------------------------------------------------------------------------
+
+char * EncryptPasswordString(const char * Username,
+                             const char * Password,
+                             long Timestamp,
+                             const char * MacAddress,
+                             long RemoteApplicationVersion );
+
+#endif /* __DM_ENCRYPTION_ROUTINES_H__ */
diff --git a/libavformat/dspic.c b/libavformat/dspic.c
new file mode 100644
index 0000000000..51c216da30
--- /dev/null
+++ b/libavformat/dspic.c
@@ -0,0 +1,317 @@
+/*
+ * Demuxer for AD-Holdings Digital Sprite stream format
+ * Copyright (c) 2006-2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "libavcodec/avcodec.h"
+#include "libavutil/bswap.h"
+#include "ds.h"
+#include "adpic.h"
+
+static int dspicProbe( AVProbeData *p );
+static int dspicReadHeader( AVFormatContext *s );
+static int dspicReadPacket( AVFormatContext *s, AVPacket *pkt );
+static int dspicReadClose( AVFormatContext *s );
+static int ReadNetworkMessageHeader( AVIOContext *context, MessageHeader
*header );
+static struct DMImageData * parseDSJFIFHeader( uint8_t *data, int dataSize
);
+static int ExtractDSFrameData( uint8_t * buffer, struct DMImageData
*frameData );
+
+
+static const long       DSPacketHeaderMagicNumber = DS_HEADER_MAGIC_NUMBER;
+static const char *     DSApp0Identifier = "DigiSpr";
+
+
+#define TRUE    1
+#define FALSE   0
+
+
+static int dspicProbe( AVProbeData *p )
+{
+    long        magicNumber = 0;
+
+    if( p->buf_size <= sizeof(long) )
+        return 0;
+
+    /* Get what should be the magic number field of the first header */
+    memcpy( &magicNumber, p->buf, sizeof(long) );
+    /* Adjust the byte ordering */
+    magicNumber = av_be2ne32(magicNumber);
+
+    if( magicNumber == DSPacketHeaderMagicNumber )
+        return AVPROBE_SCORE_MAX;
+
+    return 0;
+}
+
+static int dspicReadHeader( AVFormatContext *s )
+{
+    return 0;
+}
+
+static int dspicReadPacket( AVFormatContext *s, AVPacket *pkt )
+{
+    AVIOContext *         ioContext = s->pb;
+    int                     retVal = 0;
+    MessageHeader           header;
+    int                     dataSize = 0;
+    struct DMImageData *           videoFrameData = NULL;
+    AVStream *              stream = NULL;
+    struct ADFrameData *           frameData = NULL;
+    enum ADFrameType             frameType = FrameTypeUnknown;
+
+    /* Attempt to read in a network message header */
+    if( (retVal = ReadNetworkMessageHeader( ioContext, &header )) != 0 )
+        return retVal;
+
+    /* Validate the header */
+    if( header.magicNumber == DSPacketHeaderMagicNumber ) {
+        if( header.messageType == TCP_SRV_NUDGE ) {
+            frameType = DMNudge;
+
+            /* Read any extra bytes then try again */
+            dataSize = header.length - (SIZEOF_MESSAGE_HEADER_IO -
sizeof(unsigned long));
+
+            if( (retVal = ad_new_packet( pkt, dataSize )) < 0 )
+                return retVal;
+
+            if( avio_read( ioContext, pkt->data, dataSize ) != dataSize )
+                return AVERROR(EIO);
+        }
+        else if( header.messageType == TCP_SRV_IMG_DATA ) {
+            frameType = DMVideo;
+
+            /* This should be followed by a jfif image */
+            dataSize = header.length - (SIZEOF_MESSAGE_HEADER_IO -
sizeof(unsigned long));
+
+            /* Allocate packet data large enough for what we have */
+            if( (retVal = ad_new_packet( pkt, dataSize )) < 0 )
+                return retVal;
+
+            /* Read the jfif data out of the buffer */
+            if( avio_read( ioContext, pkt->data, dataSize ) != dataSize )
+                return AVERROR(EIO);
+
+            /* Now extract the frame info that's in there */
+            if( (videoFrameData = parseDSJFIFHeader( pkt->data, dataSize
)) == NULL )
+                return AVERROR(EIO);
+
+            /* if( audioFrameData != NULL ) frameType |=
DS1_PACKET_TYPE_AUDIO; */
+
+            if ( (stream = ad_get_vstream(s, 0, 0, 1, PIC_MODE_JPEG_422,
NULL)) == NULL )
+                return AVERROR(EIO);
+        }
+    }
+    else
+        return AVERROR(EIO);
+
+    /* Now create a wrapper to hold this frame's data which we'll store in
the packet's private member field */
+    if( (frameData = av_malloc( sizeof(*frameData) )) != NULL ) {
+        frameData->frameType = frameType;
+        frameData->frameData = videoFrameData;
+        frameData->additionalData = NULL;
+
+        pkt->data = frameData;
+    }
+    else
+        goto fail_mem;
+
+    pkt->stream_index = ( stream != NULL ) ? stream->index : 0;
+    pkt->duration =  ((int)(AV_TIME_BASE * 1.0));
+
+    return retVal;
+
+fail_mem:
+    /* Make sure everything that might have been allocated is released
before we return... */
+    av_free( frameData );
+    av_free( videoFrameData );
+    return AVERROR(ENOMEM);
+}
+
+static struct DMImageData * parseDSJFIFHeader( uint8_t *data, int dataSize
)
+{
+    struct DMImageData *            frameData = NULL;
+    int                         i;
+    unsigned short              length, marker;
+    int                         sos = FALSE;
+
+    i = 0;
+    while( ((unsigned char)data[i] != 0xff) && (i < dataSize) )
+        i++;
+
+    if ( (unsigned char) data[++i] != 0xd8)
+        return NULL;  /* Bad SOI */
+
+    i++;
+
+    while( !sos && (i < dataSize) ) {
+        memcpy(&marker, &data[i], 2 );
+        i += 2;
+        memcpy(&length, &data[i], 2 );
+        i += 2;
+        marker = av_be2ne16(marker);
+        length = av_be2ne16(length);
+
+        switch (marker) {
+            case 0xffe0 : {    // APP0
+                /* Have a little look at the data in this block, see if
it's what we're looking for */
+                if( memcmp( &data[i], DSApp0Identifier,
strlen(DSApp0Identifier) ) == 0 ) {
+                    int         offset = i;
+
+                    if( (frameData = av_mallocz( sizeof(struct
DMImageData) )) != NULL ) {
+                        /* Extract the values into a data structure */
+                        if( ExtractDSFrameData( &data[offset], frameData )
< 0 ) {
+                            av_free( frameData );
+                            return NULL;
+                        }
+                    }
+                }
+
+                i += length - 2;
+            }
+            break;
+
+            case 0xffdb :    // Q table
+                i += length - 2;
+                break;
+
+            case 0xffc0 :    // SOF
+                i += length - 2;
+                break;
+
+            case 0xffc4 :    // Huffman table
+                i += length - 2;
+                break;
+
+            case 0xffda :    // SOS
+                i += length - 2;
+                sos = TRUE;
+                break;
+
+            case 0xffdd :    // DRI
+                i += length - 2;
+                break;
+
+            case 0xfffe :    // Comment
+                i += length - 2;
+                break;
+
+            default :
+                /* Unknown marker encountered, better just skip past it */
+                i += length - 2;    // JCB 026 skip past the unknown field
+                break;
+        }
+    }
+
+    return frameData;
+}
+
+static int ExtractDSFrameData( uint8_t * buffer, struct DMImageData
*frameData )
+{
+    int         retVal = AVERROR(EIO);
+    int         bufIdx = 0;
+
+    if( buffer != NULL ) {
+        memcpy( frameData->identifier, &buffer[bufIdx], ID_LENGTH );
+        bufIdx += ID_LENGTH;
+
+        memcpy( &frameData->jpegLength, &buffer[bufIdx], sizeof(unsigned
long) );
+        bufIdx += sizeof(unsigned long);
+        frameData->jpegLength = av_be2ne32(frameData->jpegLength);
+
+        memcpy( &frameData->imgSeq, &buffer[bufIdx], sizeof(int64_t) );
+        bufIdx += sizeof(int64_t);
+        frameData->imgSeq = av_be2ne64(frameData->imgSeq);
+
+        memcpy( &frameData->imgTime, &buffer[bufIdx], sizeof(int64_t) );
+        bufIdx += sizeof(int64_t);
+
+        memcpy( &frameData->camera, &buffer[bufIdx], sizeof(unsigned char)
);
+        bufIdx += sizeof(unsigned char);
+
+        memcpy( &frameData->status, &buffer[bufIdx], sizeof(unsigned char)
);
+        bufIdx += sizeof(unsigned char);
+
+        memcpy( &frameData->activity, &buffer[bufIdx], sizeof(unsigned
short) * NUM_ACTIVITIES );
+        bufIdx += sizeof(unsigned short) * NUM_ACTIVITIES;
+
+        memcpy( &frameData->QFactor, &buffer[bufIdx], sizeof(unsigned
short) );
+        bufIdx += sizeof(unsigned short);
+        frameData->QFactor = av_be2ne16(frameData->QFactor);
+
+        memcpy( &frameData->height, &buffer[bufIdx], sizeof(unsigned
short) );
+        bufIdx += sizeof(unsigned short);
+        frameData->height = av_be2ne16(frameData->height);
+
+        memcpy( &frameData->width, &buffer[bufIdx], sizeof(unsigned short)
);
+        bufIdx += sizeof(unsigned short);
+        frameData->width = av_be2ne16(frameData->width);
+
+        memcpy( &frameData->resolution, &buffer[bufIdx], sizeof(unsigned
short) );
+        bufIdx += sizeof(unsigned short);
+        frameData->resolution = av_be2ne16(frameData->resolution);
+
+        memcpy( &frameData->interlace, &buffer[bufIdx], sizeof(unsigned
short) );
+        bufIdx += sizeof(unsigned short);
+        frameData->interlace = av_be2ne16(frameData->interlace);
+
+        memcpy( &frameData->subHeaderMask, &buffer[bufIdx],
sizeof(unsigned short) );
+        bufIdx += sizeof(unsigned short);
+        frameData->subHeaderMask = av_be2ne16(frameData->subHeaderMask);
+
+        memcpy( frameData->camTitle, &buffer[bufIdx], sizeof(char) *
CAM_TITLE_LENGTH );
+        bufIdx += sizeof(char) * CAM_TITLE_LENGTH;
+
+        memcpy( frameData->alarmText, &buffer[bufIdx], sizeof(char) *
ALARM_TEXT_LENGTH );
+        bufIdx += sizeof(char) * ALARM_TEXT_LENGTH;
+
+        retVal = 0;
+    }
+
+    return retVal;
+}
+
+static int ReadNetworkMessageHeader( AVIOContext *context, MessageHeader
*header )
+{
+    // Read the header in a piece at a time...
+    header->magicNumber    = avio_rb32(context);
+    header->length         = avio_rb32(context);
+    header->channelID      = avio_rb32(context);
+    header->sequence       = avio_rb32(context);
+    header->messageVersion = avio_rb32(context);
+    header->checksum       = avio_rb32(context);
+    header->messageType    = avio_rb32(context);
+    return 0;
+}
+
+static int dspicReadClose( AVFormatContext *s )
+{
+    return 0;
+}
+
+
+AVInputFormat ff_dspic_demuxer = {
+    .name           = "dspic",
+    .long_name      = NULL_IF_CONFIG_SMALL("AD-Holdings Digital-Sprite
format"),
+    .read_probe     = dspicProbe,
+    .read_header    = dspicReadHeader,
+    .read_packet    = dspicReadPacket,
+    .read_close     = dspicReadClose,
+};
diff --git a/libavformat/libpar.c b/libavformat/libpar.c
new file mode 100644
index 0000000000..6393b3e011
--- /dev/null
+++ b/libavformat/libpar.c
@@ -0,0 +1,1030 @@
+/*
+ * AD-Holdings PAR file demuxer
+ * Copyright (c) 2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+/**
+ * @file
+ * AD-Holdings PAR file demuxer
+ */
+
+#include <strings.h>
+#include <parreader.h>
+
+#include "avformat.h"
+#include "internal.h"
+#include "libavutil/avstring.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/time.h"
+#include "libpar.h"
+#include "adpic.h"
+
+
+typedef struct {
+    ParDisplaySettings dispSet;
+    ParFrameInfo frameInfo;
+    int fileChanged;
+    int frameCached;
+    unsigned long seqStartAdded;
+} PARDecContext;
+
+struct PAREncStreamContext {
+    int index;
+    char name[64];
+    int camera;
+    int64_t startTime;
+    int utc_offset;
+    //struct PAREncStreamContext *next;
+};
+
+typedef struct {
+    ParFrameInfo frameInfo;
+    struct PAREncStreamContext master;
+    struct PAREncStreamContext stream[20];
+    //struct PAREncStreamContext *firstStream;
+    int picHeaderSize;
+} PAREncContext;
+
+#ifdef AD_SIDEDATA_IN_PRIV
+void libpar_packet_destroy(struct AVPacket *packet);
+#endif
+
+
+const unsigned int MAX_FRAMEBUFFER_SIZE = 512 * 1024;
+
+
+static void importMetadata(const AVDictionaryEntry *tag, struct
PAREncStreamContext *ps)
+{
+    if (av_strcasecmp(tag->key, "title") == 0)
+        av_strlcpy(ps->name, tag->value, sizeof(ps->name));
+    else if (av_strcasecmp(tag->key, "date") == 0)  {
+        av_parse_time(&ps->startTime, tag->value, 0);
+        ps->startTime *= 1000;
+    }
+    else if (av_strcasecmp(tag->key, "track") == 0)
+        sscanf(tag->value, "%d", &(ps->camera));
+    else if (av_strcasecmp(tag->key, "timezone") == 0)
+        sscanf(tag->value, "%d", &(ps->utc_offset));
+}
+
+static void parreaderLogger(int level, const char *format, va_list args)
+{
+    int av_log_level = -1;
+    switch(level)  {
+        case (PARREADER_LOG_CRITICAL):
+            av_log_level = AV_LOG_FATAL;
+            break;
+        case(PARREADER_LOG_ERROR):
+            av_log_level = AV_LOG_ERROR;
+            break;
+        case(PARREADER_LOG_WARNING):
+            av_log_level = AV_LOG_WARNING;
+            break;
+        case(PARREADER_LOG_INFO):
+            av_log_level = AV_LOG_INFO;
+            break;
+        case(PARREADER_LOG_DEBUG):
+            av_log_level = AV_LOG_DEBUG;
+            break;
+    }
+    av_vlog(NULL, av_log_level, format, args);
+}
+
+
+static int par_write_header(AVFormatContext *avf)
+{
+    PAREncContext *p = avf->priv_data;
+    AVDictionaryEntry *tag = NULL;
+    int ii, result;
+    char *fnameNoExt;
+
+    p->picHeaderSize = parReader_getPicStructSize();
+    do  {
+        tag = av_dict_get(avf->metadata, "", tag, AV_DICT_IGNORE_SUFFIX);
+        if (tag)
+            importMetadata(tag, &(p->master));
+    }
+    while (tag);
+
+    for (ii = 0; ii < avf->nb_streams; ii++)  {
+        AVStream *st = avf->streams[ii];
+
+        // Set timebase to 1 millisecond, and min frame rate to 1 /
timebase
+        avpriv_set_pts_info(st, 32, 1, 1000);
+        st->r_frame_rate = (AVRational) { 1, 1 };
+    }
+
+    // parReader_initWritePartition will automatically append .par to
filename
+    // so strip it off name passed in to prevent file being called
file.par.par
+    ii = strlen(avf->filename);
+    fnameNoExt = av_malloc(ii+1);
+    strcpy(fnameNoExt, avf->filename);
+    if (av_strcasecmp(avf->filename + ii - 4, ".par") == 0)
+        fnameNoExt[ii - 4] = '\0';
+    result = parReader_initWritePartition(&p->frameInfo, fnameNoExt, -1);
+    av_free(fnameNoExt);
+
+    if (result == 1)
+        return 0;
+    else
+        return AVERROR(EIO);
+}
+
+static int par_write_packet(AVFormatContext *avf, AVPacket * pkt)
+{
+    PAREncContext *p = avf->priv_data;
+    //struct PAREncStreamContext *ps = p->firstStream;
+    struct PAREncStreamContext *ps = &(p->stream[pkt->stream_index]);
+    AVDictionaryEntry *tag = NULL;
+    int64_t parTime;
+    AVStream *stream = avf->streams[pkt->stream_index];
+    void *hdr;
+    uint8_t *ptr;
+    int parFrameFormat;
+    int64_t srcTime = pkt->pts;
+    //uint32_t pktTypeCheck;
+    int written = 0;
+    int isADformat = 0;
+
+    // Metadata
+    if (ps->camera < 1)  {
+        // Copy over the values from the file data first
+        *ps = p->master;
+
+        // Now check if there are stream-specific values
+        do  {
+            tag = av_dict_get(stream->metadata, "", tag,
AV_DICT_IGNORE_SUFFIX);
+            if (tag)
+                importMetadata(tag, ps);
+        }
+        while (tag);
+
+        if (ps->camera < 1)
+            ps->camera = pkt->stream_index + 1;
+        if (strlen(ps->name) == 0)
+            snprintf(ps->name, sizeof(ps->name), "Camera %d", ps->camera);
+    }
+
+    isADformat = 0;
+#ifdef AD_SIDEDATA
+    av_packet_split_side_data(pkt);
+    for (int ii = 0; ii < pkt->side_data_elems; ii++)  {
+        if (pkt->side_data[ii].type == AV_PKT_DATA_AD_FRAME)
+            isADformat = 1;
+    }
+#endif
+
+    if (isADformat)  {
+        uint8_t *combBuf = NULL, *combPtr = NULL;
+        int combSize = 0;
+        for (int ii = 0; ii < pkt->side_data_elems; ii++)  {
+#ifdef AD_SIDEDATA
+            if ( (pkt->side_data[ii].type == AV_PKT_DATA_AD_FRAME) ||
(pkt->side_data[ii].type == AV_PKT_DATA_AD_TEXT) )  {
+                combBuf = av_realloc(combBuf, combSize +
pkt->side_data[ii].size);
+                combPtr = combBuf + combSize;
+                memcpy(combPtr, pkt->side_data[ii].data,
pkt->side_data[ii].size);
+                combSize += pkt->side_data[ii].size;
+            }
+#endif
+        }
+
+        combBuf = av_realloc(combBuf, combSize + pkt->size);
+        combPtr = combBuf + combSize;
+        memcpy(combPtr, pkt->data, pkt->size);
+        combSize += pkt->size;
+
+        p->frameInfo.frameBuffer = combBuf;
+        p->frameInfo.frameBufferSize = combSize;
+        written = parReader_writePartition(&p->frameInfo);
+        return written;
+    }
+    else  {
+        // PAR files have timestamps that are in UTC, not elapsed time
+        // So if the pts we have been given is the latter we need to
+        // add an offset to convert it to the former
+
+        if (srcTime < 0)
+            srcTime = pkt->dts;
+        if ( (srcTime == 0) && (ps->startTime == 0) )  {
+            AVDictionaryEntry *ffstarttime = av_dict_get(avf->metadata,
"creation_time", NULL, 0);
+            int64_t ffstarttimeint = 0;
+            if (ffstarttime)
+                sscanf(ffstarttime->value, "%"PRId64"", &ffstarttimeint);
+
+            if (avf->start_time_realtime > 0)
+                ps->startTime = avf->start_time_realtime / 1000;
+            else if (&ffstarttimeint > 0)
+                ps->startTime = ffstarttimeint * 1000;
+            else
+                ps->startTime = av_gettime() / 1000;
+        }
+        parTime = srcTime;
+        if (parTime < ps->startTime)
+            parTime = parTime + ps->startTime;
+
+        if (stream->codec->codec_id == AV_CODEC_ID_MJPEG)  {
+            //p->frameInfo.frameBuffer = parReader_jpegToIMAGE(pkt->data,
parTime, pkt->stream_index);
+            if ((stream->codec->pix_fmt == AV_PIX_FMT_YUV422P) ||
(stream->codec->pix_fmt == AV_PIX_FMT_YUVJ422P) )
+                parFrameFormat = FRAME_FORMAT_JPEG_422;
+            else
+                parFrameFormat = FRAME_FORMAT_JPEG_411;
+        }
+        else if (stream->codec->codec_id == AV_CODEC_ID_MPEG4) {
+            if (pkt->flags & AV_PKT_FLAG_KEY)
+                parFrameFormat = FRAME_FORMAT_MPEG4_411_GOV_I;
+            else
+                parFrameFormat = FRAME_FORMAT_MPEG4_411_GOV_P;
+        }
+        else  {
+            if (pkt->flags & AV_PKT_FLAG_KEY)
+                parFrameFormat = FRAME_FORMAT_H264_I;
+            else
+                parFrameFormat = FRAME_FORMAT_H264_P;
+        }
+
+        hdr = parReader_generatePicHeader(ps->camera,
+                                          parFrameFormat,
+                                          pkt->size,
+                                          parTime,
+                                          ps->name,
+                                          stream->codec->width,
+                                          stream->codec->height,
+                                          ps->utc_offset
+                                         );
+        p->frameInfo.frameBufferSize = pkt->size + p->picHeaderSize;
+        ptr = av_malloc(p->frameInfo.frameBufferSize);
+        if (ptr == NULL)
+            return AVERROR(ENOMEM);
+        p->frameInfo.frameBuffer = ptr;
+        memcpy(ptr, hdr, p->picHeaderSize);
+        memcpy(ptr + p->picHeaderSize, pkt->data, pkt->size);
+        written = parReader_writePartition(&p->frameInfo);
+        av_free(ptr);
+        parReader_freePicHeader(hdr);
+
+        return written;
+    }
+}
+
+static int par_write_trailer(AVFormatContext *avf)
+{
+    PAREncContext *p = avf->priv_data;
+    parReader_closeWritePartition(&p->frameInfo);
+    return 0;
+}
+
+#ifdef AD_SIDEDATA_IN_PRIV
+void libpar_packet_destroy(struct AVPacket *packet)
+{
+    LibparFrameExtra *fed = (LibparFrameExtra*)packet->priv;
+
+    if (packet->data == NULL) {
+        return;
+    }
+
+    if (fed)  {
+        if (fed->frameInfo->frameBuffer)
+            av_free(fed->frameInfo->frameBuffer);
+        if (fed->frameInfo)
+            av_free(fed->frameInfo);
+
+        if (fed->indexInfo)
+            parReader_freeIndexInfo(fed->indexInfo);
+
+        av_free(fed);
+    }
+
+    av_destruct_packet(packet);
+}
+#endif
+
+static void endianSwapAudioData(uint8_t *data, int size)
+{
+    const uint8_t *dataEnd = data + size;
+    uint8_t upper, lower;
+    uint16_t predictor = AV_RB16(data);
+
+    AV_WL16(data, predictor);
+    data += 4;
+
+    for (;data < dataEnd; data++)  {
+        upper = ((*data) & 0xF0) >> 4;
+        lower = ((*data) & 0x0F) << 4;
+        *data = upper | lower;
+    }
+}
+
+static int64_t getLastFrameTime(int fc, ParFrameInfo *fi,
ParDisplaySettings *disp)
+{
+    int64_t lastFrame = 0;
+    int isReliable;
+
+
+    lastFrame = parReader_getEndTime(fi, &isReliable) * 1000LL;
+
+    if (isReliable == 0)  {
+        long startFrame = fi->frameNumber;
+        int streamId = fi->channel;
+        int ii = 1;
+
+        disp->cameraNum = fi->channel & 0xFFFF;
+        disp->fileSeqNo = -1;
+        disp->fileLock = 1;    // Don't seek beyond the file
+        do  {
+            disp->frameNumber = fc - ii++;
+            if (disp->frameNumber <= startFrame)
+                break;
+            parReader_loadFrame(fi, disp, NULL);
+        } while (streamId != fi->channel);
+
+        lastFrame = fi->imageTime * 1000LL + fi->imageMS;
+
+        // Now go back to where we were and reset the state
+        disp->playMode = RWND;
+        disp->frameNumber = startFrame;
+        parReader_loadFrame(fi, disp, NULL);
+        disp->playMode = PLAY;
+        disp->fileLock = 0;
+    }
+
+    return lastFrame;
+}
+
+static AVStream* createStream(AVFormatContext * avf)
+{
+    PARDecContext *p = avf->priv_data;
+    ParFrameInfo *fi = (ParFrameInfo *)&p->frameInfo;
+    char textbuf[128];
+    int w, h;
+    int fc = 0;
+
+    unsigned long startT, endT;
+    AVStream * st = NULL;
+
+    if ((NULL==avf) || (NULL==fi) || (NULL==fi->frameBuffer))
+        return NULL;
+
+    st = avformat_new_stream(avf, NULL);
+    st->id = fi->channel;
+
+    parReader_getIndexData(fi, NULL, &fc, &startT, &endT);
+
+    if (parReader_frameIsVideo(fi))  {
+        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
+
+        switch(parReader_getFrameSubType(fi))  {
+            case(FRAME_FORMAT_JPEG_422):
+            case(FRAME_FORMAT_JPEG_411):
+                st->codec->codec_id = AV_CODEC_ID_MJPEG;
+                st->codec->has_b_frames = 0;
+                break;
+            case(FRAME_FORMAT_MPEG4_411):
+            case(FRAME_FORMAT_MPEG4_411_I):
+            case(FRAME_FORMAT_MPEG4_411_GOV_P):
+            case(FRAME_FORMAT_MPEG4_411_GOV_I):
+                st->codec->codec_id = AV_CODEC_ID_MPEG4;
+                break;
+            case(FRAME_FORMAT_RAW_422I):
+            case(FRAME_FORMAT_H264_I):
+            case(FRAME_FORMAT_H264_P):
+                st->codec->codec_id = AV_CODEC_ID_H264;
+                break;
+            case(FRAME_FORMAT_PBM):
+                st->codec->codec_id = AV_CODEC_ID_PBM;
+                break;
+            default:
+                // Set unknown types to data so we don't try and play them
+                st->codec->codec_type = AVMEDIA_TYPE_DATA;
+                st->codec->codec_id = AV_CODEC_ID_NONE;
+                break;
+        }
+
+        if (AVMEDIA_TYPE_VIDEO == st->codec->codec_type)  {
+            if (AV_CODEC_ID_PBM != st->codec->codec_id)  {
+                parReader_getFrameSize(fi, &w, &h);
+                st->codec->width = w;
+                st->codec->height = h;
+
+                // Set pixel aspect ratio, display aspect is (sar * width
/ height)
+                /// \todo Could set better values here by checking
resolutions and
+                /// assuming PAL/NTSC aspect
+                if( (w > 360) && (h < 480) )
+                    st->sample_aspect_ratio = (AVRational) { 1, 2 };
+                else
+                    st->sample_aspect_ratio = (AVRational) { 1, 1 };
+
+                parReader_getStreamName(fi->frameBuffer,
+                                        fi->frameBufferSize,
+                                        textbuf,
+                                        sizeof(textbuf));
+                av_dict_set(&st->metadata, "title", textbuf, 0);
+
+                parReader_getStreamDate(fi, textbuf, sizeof(textbuf));
+                av_dict_set(&st->metadata, "date", textbuf, 0);
+
+                snprintf(textbuf, sizeof(textbuf), "%d", fi->channel &
0xFFFF);
+                av_dict_set(&st->metadata, "track", textbuf, 0);
+
+                av_dict_set(&st->metadata, "type", "camera", 0);
+            }
+            else  {
+                av_dict_set(&st->metadata, "type", "mask", 0);
+            }
+        }
+
+        // Set timebase to 1 millisecond, and min frame rate to 1 /
timebase
+        avpriv_set_pts_info(st, 32, 1, 1000);
+        st->r_frame_rate = (AVRational) { 1, 1 };
+        st->start_time   = fi->imageTime * 1000LL + fi->imageMS;
+        st->duration     = getLastFrameTime(fc, fi, &p->dispSet) -
st->start_time;
+    }
+    else if (parReader_frameIsAudio(fi))  {
+        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
+        st->codec->channels = 1;
+        st->codec->block_align = 0;
+        st->start_time = fi->imageTime * 1000LL + fi->imageMS;
+        st->duration = getLastFrameTime(fc, fi, &p->dispSet) -
st->start_time;
+
+        switch(parReader_getFrameSubType(fi))  {
+            case(FRAME_FORMAT_AUD_ADPCM_8000):
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->bits_per_coded_sample = 4;
+                st->codec->sample_rate = 8000;
+                break;
+            case(FRAME_FORMAT_AUD_ADPCM_16000):
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->bits_per_coded_sample = 4;
+                st->codec->sample_rate = 16000;
+                break;
+            case(FRAME_FORMAT_AUD_L16_44100):
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->bits_per_coded_sample = 4;
+                st->codec->sample_rate = 441000;
+                break;
+            case(FRAME_FORMAT_AUD_ADPCM_11025):
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->bits_per_coded_sample = 4;
+                st->codec->sample_rate = 11025;
+                break;
+            case(FRAME_FORMAT_AUD_ADPCM_22050):
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->bits_per_coded_sample = 4;
+                st->codec->sample_rate = 22050;
+                break;
+            case(FRAME_FORMAT_AUD_ADPCM_32000):
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->bits_per_coded_sample = 4;
+                st->codec->sample_rate = 32000;
+                break;
+            case(FRAME_FORMAT_AUD_ADPCM_44100):
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->bits_per_coded_sample = 4;
+                st->codec->sample_rate = 44100;
+                break;
+            case(FRAME_FORMAT_AUD_ADPCM_48000):
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->bits_per_coded_sample = 4;
+                st->codec->sample_rate = 48000;
+                break;
+            case(FRAME_FORMAT_AUD_L16_8000):
+                st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
+                st->codec->bits_per_coded_sample = 16;
+                st->codec->sample_rate = 8000;
+                break;
+            case(FRAME_FORMAT_AUD_L16_11025):
+                st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
+                st->codec->bits_per_coded_sample = 16;
+                st->codec->sample_rate = 11025;
+                break;
+            case(FRAME_FORMAT_AUD_L16_16000):
+                st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
+                st->codec->bits_per_coded_sample = 16;
+                st->codec->sample_rate = 16000;
+                break;
+            case(FRAME_FORMAT_AUD_L16_22050):
+                st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
+                st->codec->bits_per_coded_sample = 16;
+                st->codec->sample_rate = 22050;
+                break;
+            case(FRAME_FORMAT_AUD_L16_32000):
+                st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
+                st->codec->bits_per_coded_sample = 16;
+                st->codec->sample_rate = 32000;
+                break;
+            case(FRAME_FORMAT_AUD_L16_48000):
+                st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
+                st->codec->bits_per_coded_sample = 16;
+                st->codec->sample_rate = 48000;
+                break;
+            case(FRAME_FORMAT_AUD_L16_12000):
+                st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
+                st->codec->bits_per_coded_sample = 16;
+                st->codec->sample_rate = 12000;
+                break;
+            case(FRAME_FORMAT_AUD_L16_24000):
+                st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
+                st->codec->bits_per_coded_sample = 16;
+                st->codec->sample_rate = 24000;
+                break;
+            default:
+                st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV;
+                st->codec->sample_rate = 8000;
+                break;
+        }
+        avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
+
+        if (fi->channel & 0x8000)  {
+            // Camera associated audio
+            snprintf(textbuf, sizeof(textbuf), "%d", fi->channel &
~0x8000);
+            av_dict_set(&st->metadata, "track", textbuf, 0);
+            av_dict_set(&st->metadata, "associated", "1", 0);
+        }
+        else  {
+            snprintf(textbuf, sizeof(textbuf), "%d", fi->channel);
+            av_dict_set(&st->metadata, "track", textbuf, 0);
+            av_dict_set(&st->metadata, "associated", "0", 0);
+        }
+    }
+    else  {
+        st->codec->codec_type = AVMEDIA_TYPE_DATA;
+
+        // Set timebase to 1 millisecond, and min frame rate to 1 /
timebase
+        avpriv_set_pts_info(st, 32, 1, 1000);
+        st->r_frame_rate = (AVRational) { 1, 1 };
+
+        st->start_time = startT * 1000LL;
+        st->duration   = getLastFrameTime(fc, fi, &p->dispSet) -
st->start_time;
+    }
+
+    return st;
+}
+
+static int createPacket(AVFormatContext *avf, AVPacket *pkt, int siz)
+{
+    PARDecContext *ctxt = avf->priv_data;
+    ParFrameInfo *fi = &ctxt->frameInfo;
+    int id = fi->channel;
+    int ii;
+    AVStream *st = NULL;
+
+#if defined(AD_SIDEDATA_IN_PRIV)
+    LibparFrameExtra *pktExt = NULL;
+    ParFrameInfo *pktFI = NULL;
+#elif defined(AD_SIDEDATA)
+    int adDataSize = 0;
+    uint8_t *sideData = NULL;
+    int textBufSize = 0;
+#endif
+
+    for(ii = 0; ii < avf->nb_streams; ii++)  {
+        if ( (NULL != avf->streams[ii]) && (avf->streams[ii]->id == id) )
 {
+            st = avf->streams[ii];
+            break;
+        }
+    }
+    if (NULL == st)  {
+        st = createStream(avf);
+        if (st == NULL)
+            return -1;
+    }
+
+
+    // If frame is MPEG4 video and is the first sent then add a VOL header
to it
+    if (st->codec->codec_id == AV_CODEC_ID_MPEG4)  {
+        if ((ctxt->seqStartAdded & (1<<st->index)) == 0)  {
+            if (parReader_isIFrame(fi)) {
+                if (parReader_add_start_of_sequence(fi->frameBuffer))
+                    ctxt->seqStartAdded |= 1 << st->index;
+            }
+        }
+    }
+
+    if (st->codec->codec_id == AV_CODEC_ID_PBM)  {
+        char *comment = NULL;
+        int w, h;
+        uint8_t *pbm = av_malloc(fi->size);
+        memcpy(pbm, fi->frameData, fi->size);
+        pkt->size = ad_pbmDecompress(&comment, &pbm, fi->size, pkt, &w,
&h);
+        if (pkt->size > 0)  {
+            st->codec->width = w;
+            st->codec->height = h;
+        }
+        if (comment)  {
+            int camera = parReader_getCamera(fi, NULL, 0);
+            char name[128];
+            snprintf(name, sizeof(name), "Camera %u: %s", camera, comment);
+            av_dict_set(&st->metadata, "title", name, 0);
+            av_free(comment);
+        }
+    }
+    else  {
+        if ( ((uint8_t*)fi->frameData + siz) < ((uint8_t*)fi->frameBuffer
+ MAX_FRAMEBUFFER_SIZE) )  {
+            av_new_packet(pkt, siz);
+
+            if (NULL == pkt->data)  {
+                pkt->size = 0;
+                return AVERROR(ENOMEM);
+            }
+            memcpy(pkt->data, fi->frameData, siz);
+        }
+        else  {
+            av_log(avf, AV_LOG_ERROR, "Copying %d bytes would read beyond
framebuffer end", siz);
+            return AVERROR(ENOMEM);
+        }
+    }
+    pkt->stream_index = st->index;
+
+    if (parReader_frameIsAudio(fi))  {
+        if (st->codec->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV)
+            endianSwapAudioData(pkt->data, siz);
+    }
+    else if (parReader_frameIsVideo(fi))  {
+        if (parReader_isIFrame(fi))
+            pkt->flags |= AV_PKT_FLAG_KEY;
+    }
+
+    if (fi->imageTime > 0)  {
+        pkt->pts = fi->imageTime;
+        pkt->pts *= 1000ULL;
+        pkt->pts += fi->imageMS;
+    }
+    else if (fi->indexTime > 0)  {
+        pkt->pts = fi->indexTime;
+        pkt->pts *= 1000ULL;
+        pkt->pts += fi->indexMS;
+    }
+    else  {
+        pkt->pts = AV_NOPTS_VALUE;
+    }
+    pkt->dts = pkt->pts;
+    pkt->duration = 1;
+
+#if defined(AD_SIDEDATA_IN_PRIV)
+    pkt->destruct = libpar_packet_destroy;
+    pktExt = av_malloc(sizeof(LibparFrameExtra));
+    if (NULL == pktExt)  {
+        pkt->size = 0;
+        return AVERROR(ENOMEM);
+    }
+    pktExt->fileChanged = ctxt->fileChanged;
+    pktExt->indexInfoCount = parReader_getIndexInfo(fi,
&pktExt->indexInfo);
+
+    pktExt->frameInfo = av_mallocz(sizeof(ParFrameInfo));
+    if (NULL == pktExt->frameInfo)  {
+        pkt->size = 0;
+        return AVERROR(ENOMEM);
+    }
+    pktFI = pktExt->frameInfo;
+    if (parReader_frameIsVideo(fi))
+        pktFI->frameBufferSize = parReader_getPicStructSize();
+    else if (parReader_frameIsAudio(fi))
+        pktFI->frameBufferSize = parReader_getAudStructSize();
+
+    if (pktFI->frameBufferSize > 0)  {
+        // Make a copy of the ParFrameInfo struct and the frame header
+        // for use by client code that knows this is here
+
+        // Save frameBufferSize as it's about to be overwritten by memcpy
+        int fbs = pktFI->frameBufferSize;
+        memcpy(pktFI, fi, sizeof(ParFrameInfo));
+        pktFI->frameBufferSize = fbs;
+        pktFI->frameBuffer = av_malloc(fbs);
+        if (NULL == pktFI->frameBuffer)  {
+            pkt->size = 0;
+            return AVERROR(ENOMEM);
+        }
+        memcpy(pktFI->frameBuffer, fi->frameBuffer, fbs);
+        pktFI->frameData = NULL;
+    }
+
+    pkt->priv = pktExt;
+#elif defined(AD_SIDEDATA)
+    if (parReader_frameIsVideo(fi))
+        adDataSize = parReader_getPicStructSize();
+    else if (parReader_frameIsAudio(fi))
+        adDataSize = parReader_getAudStructSize();
+    if (adDataSize > 0)  {
+        sideData = av_packet_new_side_data(pkt, AV_PKT_DATA_AD_FRAME,
adDataSize);
+        if (sideData)
+            memcpy(sideData, fi->frameBuffer, adDataSize);
+    }
+
+    if (fi->frameText)  {
+        textBufSize = strlen(fi->frameText) + 1;
+        sideData = av_packet_new_side_data(pkt, AV_PKT_DATA_AD_TEXT,
textBufSize);
+        if (sideData)
+            memcpy(sideData, fi->frameText, textBufSize);
+    }
+
+    if (ctxt->fileChanged)  {
+        int fc = 0;
+        unsigned long startT, endT, lastFT;
+
+        parReader_getIndexData(fi, NULL, &fc, &startT, &endT);
+        lastFT = getLastFrameTime(fc, fi, &ctxt->dispSet);
+        for (ii = 0; ii < avf->nb_streams; ii++)  {
+            st->start_time = fi->imageTime * 1000LL + fi->imageMS;
+            st->duration = lastFT - st->start_time;
+        }
+
+        sideData = av_packet_new_side_data(pkt, AV_PKT_DATA_AD_PARINF,
sizeof(ctxt->frameInfo));
+        if (sideData)
+            memcpy(sideData, &(ctxt->frameInfo), sizeof(ctxt->frameInfo));
+        ctxt->fileChanged = 0;
+    }
+#endif
+
+    ctxt->frameCached = 0;
+
+    return 0;
+}
+
+
+static int par_probe(AVProbeData *p)
+{
+    unsigned long first4;
+    if (p->buf_size < 4)
+        return 0;
+
+    first4 = *((unsigned long *)p->buf);
+    first4 = av_le2ne32(first4);
+    if (first4 == 0x00524150)
+        return AVPROBE_SCORE_MAX;
+    else
+        return 0;
+}
+
+static int par_read_header(AVFormatContext * avf)
+{
+    int res, siz;
+    PARDecContext *p = avf->priv_data;
+    char **filelist;
+    int seqLen;
+    int64_t seconds = 0;
+    AVStream *strm = NULL;
+    char textbuf[128];
+
+
+    if (parReader_version(textbuf, sizeof(textbuf)) > 0)  {
+        av_log(avf, AV_LOG_INFO, "ParReader library version: %s\n",
textbuf);
+        av_dict_set(&avf->metadata, "ParReader", textbuf, 0);
+    }
+
+    parReader_initFrameInfo(&p->frameInfo, MAX_FRAMEBUFFER_SIZE,
av_malloc(MAX_FRAMEBUFFER_SIZE));
+    if (p->frameInfo.frameBuffer == NULL)
+        return AVERROR(ENOMEM);
+
+    switch(av_log_get_level())  {
+        case(AV_LOG_QUIET):
+            parReader_setLogLevel(-1);
+            break;
+        case(AV_LOG_PANIC):
+            parReader_setLogLevel(PARREADER_LOG_CRITICAL);
+            break;
+        case(AV_LOG_FATAL):
+            parReader_setLogLevel(PARREADER_LOG_CRITICAL);
+            break;
+        case(AV_LOG_ERROR):
+            parReader_setLogLevel(PARREADER_LOG_ERROR);
+            break;
+        case(AV_LOG_WARNING):
+            parReader_setLogLevel(PARREADER_LOG_WARNING);
+            break;
+        case(AV_LOG_INFO):
+            parReader_setLogLevel(PARREADER_LOG_INFO);
+            break;
+        case(AV_LOG_VERBOSE):
+            parReader_setLogLevel(PARREADER_LOG_INFO);
+            break;
+        case(AV_LOG_DEBUG):
+            parReader_setLogLevel(PARREADER_LOG_DEBUG);
+            break;
+    }
+    parReader_setLogCallback(parreaderLogger);
+
+    parReader_setDisplaySettingsDefaults(&p->dispSet);
+
+    res = parReader_loadParFile(NULL, avf->filename, -1, &p->frameInfo, 0);
+    if (0 == res)
+        return AVERROR(EIO);
+
+    seqLen = parReader_getFilelist(&p->frameInfo, &filelist);
+    if (1 == seqLen)  {
+        int frameNumber, frameCount;
+        unsigned long start, end;
+        if (parReader_getIndexData(&p->frameInfo, &frameNumber,
&frameCount, &start, &end))
+            seconds = end - start;
+    }
+    else  {
+        int res, frameNumber, frameCount;
+        unsigned long start, end, realStart, realEnd;
+        if (parReader_getIndexData(&p->frameInfo, &frameNumber,
&frameCount, &start, &end))  {
+            realStart = start;
+            av_log(avf, AV_LOG_DEBUG, "par_read_header:  %s (%d)\n",
filelist[0], seqLen - 1);
+            res = parReader_loadParFile(NULL, filelist[0], seqLen - 1,
&p->frameInfo, 0);
+            if (res && parReader_getIndexData(&p->frameInfo, &frameNumber,
&frameCount, &start, &end))  {
+                realEnd = end;
+                seconds = realEnd - realStart;
+            }
+            av_log(avf, AV_LOG_DEBUG, "par_read_header:  %s (%d)\n",
filelist[0], res);
+            res = parReader_loadParFile(NULL, filelist[0], -1,
&p->frameInfo, 0);
+        }
+    }
+
+    siz = parReader_loadFrame(&p->frameInfo, &p->dispSet, &p->fileChanged);
+
+    p->frameCached = siz;
+    p->fileChanged = 1;
+    p->seqStartAdded = 0;
+
+    snprintf(textbuf, sizeof(textbuf), "%d",
parReader_getUTCOffset(&p->frameInfo));
+    av_dict_set(&avf->metadata, "timezone", textbuf, 0);
+
+    strm = createStream(avf);
+//    if (strm)  {
+//        // Note: Do not set avf->start_time, ffmpeg computes it from
AVStream values
+//        avf->duration = av_rescale_q(seconds, secondsTB,
strm->time_base);
+//    }
+
+    avf->ctx_flags |= AVFMTCTX_NOHEADER;
+
+    return 0;
+}
+
+static int par_read_packet(AVFormatContext * avf, AVPacket * pkt)
+{
+    PARDecContext *p = avf->priv_data;
+    int siz = 0;
+
+    if (p->frameCached)  {
+        siz = p->frameCached;
+    }
+    else
+        siz = parReader_loadFrame(&p->frameInfo, &p->dispSet,
&p->fileChanged);
+
+    if (siz < 0)  {
+        p->frameCached = 0;
+        p->fileChanged = 0;
+        pkt->size = 0;
+        return AVERROR_EOF;
+    }
+
+    if (p->fileChanged)
+        parReader_getFilename(&p->frameInfo, avf->filename,
sizeof(avf->filename));
+
+    if ( (siz == 0) || (NULL == p->frameInfo.frameData) )  {
+        p->frameCached = 0;
+        return AVERROR(EAGAIN);
+    }
+
+    return createPacket(avf, pkt, siz);
+}
+
+static int par_read_seek(AVFormatContext *avf, int stream,
+                         int64_t target, int flags)
+{
+    PARDecContext *p = avf->priv_data;
+    int siz = 0;
+    int streamId = 0;
+    int isKeyFrame = 0;
+    int step;
+    int anyStreamWillDo = 0;
+    int prevPlayMode, prevLock;
+
+    av_log(avf, AV_LOG_DEBUG, "par_read_seek target    = %"PRId64"\n",
target);
+
+    if ((stream < 0) || (stream >= avf->nb_streams))  {
+        anyStreamWillDo = 1;
+        streamId = avf->streams[0]->id;
+    }
+    else
+        streamId = avf->streams[stream]->id;
+
+    prevPlayMode = p->dispSet.playMode;
+    prevLock = p->dispSet.fileLock;
+
+    p->seqStartAdded = 0;
+    p->dispSet.cameraNum = streamId;
+    if (flags & AVSEEK_FLAG_BACKWARD)
+        p->dispSet.playMode = RWND;
+
+    if ( (flags & AVSEEK_FLAG_FRAME) && (target < 0) )   {
+        p->dispSet.fileSeqNo = (-target) - 1;
+        p->dispSet.frameNumber = 0;
+    }
+    else  {
+        p->dispSet.fileSeqNo = -1;
+
+        if (flags & AVSEEK_FLAG_FRAME)  {
+            // Don't seek beyond the file
+            p->dispSet.fileLock = 1;
+            p->dispSet.frameNumber = target;
+        }
+        else  {
+            p->dispSet.timestamp = target / 1000LL;
+            p->dispSet.millisecs = target % 1000;
+        }
+    }
+
+    do  {
+        siz = parReader_loadFrame(&p->frameInfo, &p->dispSet,
&p->fileChanged);
+
+        // If this frame is not acceptable we want to just iterate through
+        // until we find one that is, not seek again, so reset targets
+        p->dispSet.frameNumber = -1;
+        p->dispSet.timestamp = 0;
+        p->dispSet.millisecs = 0;
+
+        if (siz < 0)
+            break;
+
+        if (parReader_frameIsVideo(&p->frameInfo))  {
+            if (flags & AVSEEK_FLAG_ANY)
+                isKeyFrame = 1;
+            else
+                isKeyFrame = parReader_isIFrame(&p->frameInfo);
+        }
+        else  {
+            // Always seek to a video frame
+            isKeyFrame = 0;
+        }
+
+        // If we don't care which stream then force the streamId to match
+        if (anyStreamWillDo)
+            streamId = p->frameInfo.channel;
+    }
+    while ( (streamId != p->frameInfo.channel) || (0 == isKeyFrame) );
+
+    p->dispSet.fileLock = prevLock;
+    p->dispSet.playMode = prevPlayMode;
+
+    if (siz > 0)  {
+        p->frameCached = siz;
+        for(step = 0; step < avf->nb_streams; step++)  {
+            if ( (NULL != avf->streams[step]) && (avf->streams[step]->id
== streamId) )  {
+                avf->streams[step]->codec->frame_number =
p->frameInfo.frameNumber;
+                break;
+            }
+        }
+        av_log(avf, AV_LOG_DEBUG, "par_read_seek seek done = %lu\n",
p->frameInfo.imageTime);
+        return p->frameInfo.imageTime;
+    }
+    else  {
+        av_log(avf, AV_LOG_DEBUG, "par_read_seek seek failed\n");
+        return -1;
+    }
+}
+
+static int par_read_close(AVFormatContext * avf)
+{
+    PARDecContext *p = avf->priv_data;
+    av_log(avf, AV_LOG_DEBUG, "par_read_close");
+    av_free(p->frameInfo.frameBuffer);
+    parReader_closeParFile(&p->frameInfo);
+    return 0;
+}
+
+
+AVOutputFormat ff_libparreader_muxer = {
+    .name           = "libpar",
+    .long_name      = NULL_IF_CONFIG_SMALL("AD-Holdings PAR format"),
+    .mime_type      = "video/adhbinary",
+    .extensions     = "par",
+    .priv_data_size = sizeof(PAREncContext),
+    .audio_codec    = AV_CODEC_ID_ADPCM_IMA_WAV,
+    .video_codec    = AV_CODEC_ID_MJPEG,
+    .write_header   = par_write_header,
+    .write_packet   = par_write_packet,
+    .write_trailer  = par_write_trailer,
+    .flags          = AVFMT_GLOBALHEADER,
+};
+
+AVInputFormat ff_libparreader_demuxer = {
+    .name           = "libpar",
+    .long_name      = NULL_IF_CONFIG_SMALL("AD-Holdings PAR format"),
+    .priv_data_size = sizeof(PARDecContext),
+    .read_probe     = par_probe,
+    .read_header    = par_read_header,
+    .read_packet    = par_read_packet,
+    .read_close     = par_read_close,
+    .read_seek      = par_read_seek,
+    .flags          = AVFMT_TS_DISCONT | AVFMT_VARIABLE_FPS |
AVFMT_NO_BYTE_SEEK,
+};
diff --git a/libavformat/libpar.h b/libavformat/libpar.h
new file mode 100644
index 0000000000..2d958727ba
--- /dev/null
+++ b/libavformat/libpar.h
@@ -0,0 +1,40 @@
+/*
+ * copyright (c) 2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#ifndef AVFORMAT_LIBPAR_H
+#define AVFORMAT_LIBPAR_H
+
+#include "avformat.h"
+
+
+#ifdef AD_SIDEDATA_IN_PRIV
+#include <parreader_types.h>
+
+typedef struct {
+    int indexInfoCount;
+    ParFrameInfo *frameInfo;
+    ParKeyValuePair *indexInfo;
+    int fileChanged;
+} LibparFrameExtra;
+
+#endif // AD_SIDEDATA_IN_PRIV
+
+#endif /* AVFORMAT_LIBPAR_H */
diff --git a/libavformat/netvu.c b/libavformat/netvu.c
new file mode 100644
index 0000000000..fdac338f21
--- /dev/null
+++ b/libavformat/netvu.c
@@ -0,0 +1,214 @@
+/*
+ * Netvu protocol for ffmpeg client
+ * Copyright (c) 2010 AD-Holdings plc
+ * Modified for FFmpeg by Tom Needham <06needhamt at gmail.com> (2018)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#include <ctype.h>
+#include <string.h>
+#include <strings.h>
+
+#include "avformat.h"
+#include "internal.h"
+#include "http.h"
+#include "netvu.h"
+#include "libavutil/avstring.h"
+
+
+static void copy_value_to_field(const char *value, char **dest)
+{
+    int len = strlen(value) + 1;
+    if (*dest != NULL)
+        av_free(*dest);
+
+    *dest = av_malloc(len);
+    if (*dest)  {
+        av_strlcpy(*dest, value, len);
+    }
+}
+
+static void netvu_parse_content_type_header(char * p, NetvuContext *nv)
+{
+    int finishedContentHeader = 0;
+    char *  name  = NULL;
+    char *  value = NULL;
+
+    //strip the content-type from the headder
+    value = p;
+    while((*p != ';') && (*p != '\0'))
+        p++;
+
+    if(*p == '\0')
+        finishedContentHeader = 1;
+
+    *p = '\0';
+    p++;
+    copy_value_to_field( value, &nv->hdrs[NETVU_CONTENT] );
+
+    while( *p != '\0' && finishedContentHeader != 1)  {
+        while(isspace(*p))
+            p++; // Skip whitespace
+        name = p;
+
+        // Now we get attributes in <name>=<value> pairs
+        while (*p != '\0' && *p != '=')
+            p++;
+
+        if (*p != '=')
+            return;
+
+        *p = '\0';
+        p++;
+
+        value = p;
+
+        while (*p != '\0' && *p != ';')
+            p++;
+
+        if (*p == ';')  {
+            *p = '\0';
+            p++;
+        }
+
+        // Strip any "s off
+        if( strlen(value) > 0 )  {
+            int ii;
+
+            if( *value == '"' && *(value + strlen(value) - 1) == '"' )  {
+                *(value + strlen(value) - 1) = '\0';
+                value += 1;
+            }
+
+            // Copy the attribute into the relevant field
+            for(ii = 0; ii < NETVU_MAX_HEADERS; ii++)  {
+                if( av_strcasecmp(name, nv->hdrNames[ii] ) == 0 )
+                    copy_value_to_field(value, &nv->hdrs[ii]);
+            }
+            if(av_strcasecmp(name, "utc_offset") == 0)
+                nv->utc_offset = atoi(value);
+        }
+    }
+}
+
+static void processLine(char *line, NetvuContext *nv)
+{
+    char *p = line;
+    char *tag;
+
+    while (*p != '\0' && *p != ':')
+        p++;
+    if (*p != ':')
+        return;
+
+    *p = '\0';
+    tag = line;
+    p++;
+    while (isspace(*p))
+        p++;
+
+    if (!av_strcasecmp (tag, nv->hdrNames[NETVU_CONTENT]))
+        netvu_parse_content_type_header(p, nv);
+    else if(!av_strcasecmp(tag, nv->hdrNames[NETVU_SERVER]))
+        copy_value_to_field( p, &nv->hdrs[NETVU_SERVER]);
+}
+
+static int netvu_open(URLContext *h, const char *uri, int flags)
+{
+    char hostname[1024], auth[1024], path[1024], http[1024];
+    int port, err;
+    NetvuContext *nv = h->priv_data;
+
+    nv->hdrNames[NETVU_SERVER]      = "Server";
+    nv->hdrNames[NETVU_CONTENT]     = "Content-type";
+    nv->hdrNames[NETVU_RESOLUTION]  = "resolution";
+    nv->hdrNames[NETVU_COMPRESSION] = "compression";
+    nv->hdrNames[NETVU_RATE]        = "rate";
+    nv->hdrNames[NETVU_PPS]         = "pps";
+    nv->hdrNames[NETVU_SITE_ID]     = "site_id";
+    nv->hdrNames[NETVU_BOUNDARY]    = "boundary";
+    // Set utc_offset an invalid value so if server doesn't set it we can
ignore it
+    nv->utc_offset                  = 1441;
+
+    h->is_streamed = 1;
+
+    av_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname),
&port,
+                 path, sizeof(path), uri);
+    if (port < 0)
+        port = 80;
+    ff_url_join(http, sizeof(http), "http", auth, hostname, port, "%s",
path);
+
+    err = ffurl_open(&nv->hd, http, AVIO_FLAG_READ,
&h->interrupt_callback, NULL);
+    if (err >= 0)  {
+        char headers[1024];
+        char *startOfLine = &headers[0];
+        int ii;
+
+        size_t hdrSize = ff_http_get_headers(nv->hd, headers,
sizeof(headers));
+        if (hdrSize > 0)  {
+            for (ii = 0; ii < hdrSize; ii++)  {
+                if (headers[ii] == '\n')  {
+                    headers[ii] = '\0';
+                    processLine(startOfLine, nv);
+                    startOfLine = &headers[ii+1];
+                }
+            }
+        }
+        return 0;
+    }
+    else  {
+        if (nv->hd)
+            ffurl_close(nv->hd);
+        nv->hd = NULL;
+        return AVERROR(EIO);
+    }
+}
+
+static int netvu_read(URLContext *h, uint8_t *buf, int size)
+{
+    NetvuContext *nv = h->priv_data;
+    if (nv->hd)
+        return ffurl_read(nv->hd, buf, size);
+    return AVERROR_PROTOCOL_NOT_FOUND;
+}
+
+static int netvu_close(URLContext *h)
+{
+    NetvuContext *nv = h->priv_data;
+    int i, ret = 0;
+
+    if (nv->hd)
+        ret = ffurl_close(nv->hd);
+
+    for (i = 0; i < NETVU_MAX_HEADERS; i++)  {
+        if (nv->hdrs[i])
+            av_free(nv->hdrs[i]);
+    }
+
+    return ret;
+}
+
+
+URLProtocol ff_netvu_protocol = {
+    .name               = "netvu",
+    .url_open            = netvu_open,
+    .url_read            = netvu_read,
+    .url_close           = netvu_close,
+    .priv_data_size      = sizeof(NetvuContext),
+    .flags               = URL_PROTOCOL_FLAG_NETWORK,
+};
diff --git a/libavformat/netvu.h b/libavformat/netvu.h
new file mode 100644
index 0000000000..4dd72a6e19
--- /dev/null
+++ b/libavformat/netvu.h
@@ -0,0 +1,21 @@
+#include "avformat.h"
+
+enum NetvuHeaders { NETVU_SERVER = 0,
+                    NETVU_CONTENT,
+                    NETVU_RESOLUTION,
+                    NETVU_COMPRESSION,
+                    NETVU_RATE,
+                    NETVU_PPS,
+                    NETVU_SITE_ID,
+                    NETVU_BOUNDARY,
+                    NETVU_MAX_HEADERS
+                    };
+
+typedef struct {
+    const AVClass *class;
+    URLContext *hd;
+
+    char* hdrs[NETVU_MAX_HEADERS];
+    const char* hdrNames[NETVU_MAX_HEADERS];
+    int utc_offset;
+} NetvuContext;
diff --git a/libavformat/version.h b/libavformat/version.h
index 39b00f62ab..22ed534bfb 100644
--- a/libavformat/version.h
+++ b/libavformat/version.h
@@ -32,8 +32,8 @@
 // Major bumping may affect Ticket5467, 5421, 5451(compatibility with
Chromium)
 // Also please add any ticket numbers that you believe might be affected
here
 #define LIBAVFORMAT_VERSION_MAJOR  58
-#define LIBAVFORMAT_VERSION_MINOR  28
-#define LIBAVFORMAT_VERSION_MICRO 101
+#define LIBAVFORMAT_VERSION_MINOR  29
+#define LIBAVFORMAT_VERSION_MICRO 100

 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
                                                LIBAVFORMAT_VERSION_MINOR, \
-- 
2.21.0.windows.1



<http://www.avg.com/email-signature?utm_medium=email&utm_source=link&utm_campaign=sig-email&utm_content=webmail&utm_term=oa-4885-a>
Virus-free.
www.avg.com
<http://www.avg.com/email-signature?utm_medium=email&utm_source=link&utm_campaign=sig-email&utm_content=webmail&utm_term=oa-4885-a>
<#DAB4FAD8-2DD7-40BB-A1B8-4E2AA1F9FDF2>


More information about the ffmpeg-devel mailing list