[FFmpeg-devel] Output libavdevice for Blackmagic DeckLink card
Amnon Israely
amnonbb at gmail.com
Mon Dec 9 13:23:46 CET 2013
Hi.
This patch was posted 3 years ago and it's work.
But i need more buffer before playback start.
If I change NUM_PREROLL value more then 20 , A/V sync is lost.
Can someone please look what must to be change/add to solve this problem?
-------------- next part --------------
>From fa094a3a47c52d022be14aaf544ae9aa509cc42c Mon Sep 17 00:00:00 2001
From: Deron Kazmaier <deron at pagestream.org>
Date: Fri, 24 Feb 2012 10:37:23 -0700
Subject: [PATCH] Add output libavdevice for Blackmagic DeckLink card.
---
configure | 7 +
libavdevice/Makefile | 1 +
libavdevice/alldevices.c | 1 +
libavdevice/decklink_enc.cpp | 720 ++++++++++++++++++++++++++++++++++++++++++
4 files changed, 729 insertions(+), 0 deletions(-)
create mode 100644 libavdevice/decklink_enc.cpp
diff --git a/configure b/configure
index 7fdf0c0..4bcd07a 100755
--- a/configure
+++ b/configure
@@ -167,6 +167,7 @@ Configuration options:
External library support:
--enable-avisynth enable reading of AVISynth script files [no]
--enable-bzlib enable bzlib [autodetect]
+ --enable-decklink enable DeckLink output [no]
--enable-frei0r enable frei0r video filtering
--enable-gnutls enable gnutls [no]
--enable-libaacplus enable AAC+ encoding via libaacplus [no]
@@ -1012,6 +1013,7 @@ CONFIG_LIST="
bzlib
crystalhd
dct
+ decklink
doc
dwt
dxva2
@@ -1609,6 +1611,8 @@ w64_demuxer_deps="wav_demuxer"
alsa_indev_deps="alsa_asoundlib_h snd_pcm_htimestamp"
alsa_outdev_deps="alsa_asoundlib_h"
bktr_indev_deps_any="dev_bktr_ioctl_bt848_h machine_ioctl_bt848_h dev_video_bktr_ioctl_bt848_h dev_ic_bt8xx_h"
+decklink_outdev_deps="decklink"
+decklink_outdev_extralibs="-lstdc++"
dshow_indev_deps="IBaseFilter"
dshow_indev_extralibs="-lpsapi -lole32 -lstrmiids -luuid"
dv1394_indev_deps="dv1394 dv_demuxer"
@@ -3188,6 +3192,8 @@ enabled openssl && { check_lib openssl/ssl.h SSL_library_init -lssl -lcrypto
check_lib openssl/ssl.h SSL_library_init -lssl32 -leay32 ||
check_lib openssl/ssl.h SSL_library_init -lssl -lcrypto -lws2_32 -lgdi32 ||
die "ERROR: openssl not found"; }
+#enabled decklink && { check_header DeckLinkAPI.h && check_header DeckLinkAPIDispatch.cpp || die "ERROR: No version of DeckLinkAPI.h found."; }
+
# libdc1394 check
if enabled libdc1394; then
@@ -3508,6 +3514,7 @@ echo "new filter support ${avfilter-no}"
echo "network support ${network-no}"
echo "threading support ${thread_type-no}"
echo "safe bitstream reader ${safe_bitstream_reader-no}"
+echo "DeckLink support ${decklink-no}"
echo "SDL support ${sdl-no}"
echo "libdxva2 enabled ${dxva2-no}"
echo "libva enabled ${vaapi-no}"
diff --git a/libavdevice/Makefile b/libavdevice/Makefile
index d7806ea..8131684 100644
--- a/libavdevice/Makefile
+++ b/libavdevice/Makefile
@@ -14,6 +14,7 @@ OBJS-$(CONFIG_ALSA_INDEV) += alsa-audio-common.o \
OBJS-$(CONFIG_ALSA_OUTDEV) += alsa-audio-common.o \
alsa-audio-enc.o
OBJS-$(CONFIG_BKTR_INDEV) += bktr.o
+OBJS-$(CONFIG_DECKLINK_OUTDEV) += decklink_enc.o
OBJS-$(CONFIG_DSHOW_INDEV) += dshow.o dshow_enummediatypes.o \
dshow_enumpins.o dshow_filter.o \
dshow_pin.o dshow_common.o
diff --git a/libavdevice/alldevices.c b/libavdevice/alldevices.c
index 86ebfee..a75d8e5 100644
--- a/libavdevice/alldevices.c
+++ b/libavdevice/alldevices.c
@@ -41,6 +41,7 @@ void avdevice_register_all(void)
REGISTER_INOUTDEV (ALSA, alsa);
REGISTER_INDEV (BKTR, bktr);
REGISTER_INDEV (DSHOW, dshow);
+ REGISTER_OUTDEV (DECKLINK, decklink);
REGISTER_INDEV (DV1394, dv1394);
REGISTER_INDEV (FBDEV, fbdev);
REGISTER_INDEV (JACK, jack);
diff --git a/libavdevice/decklink_enc.cpp b/libavdevice/decklink_enc.cpp
new file mode 100644
index 0000000..034d3cf
--- /dev/null
+++ b/libavdevice/decklink_enc.cpp
@@ -0,0 +1,720 @@
+/*
+ * Blackmagic DeckLink output device
+ * Copyright (c) 2012 Deron Kazmaier
+ * This code was created with help from the sdl and alsa devices,
+ * DeckLink examples, and a generic player written by Georg Lippitsch.
+ * Credit where credit due.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Blackmagic DeckLink output device
+ */
+
+/*
+ * example:
+ * ffmpeg -i INPUT -f decklink -
+ * ffmpeg -i INPUT -vcodec rawvideo -pix_fmt uyvy422 -f decklink -device 0 -
+ * ffmpeg -i "BLUE LAGOON PROMO ProRes Lite 71 Mbps.mov" -f lavfi
+ -i "amovie=BLUE LAGOON PROMO ProRes Lite 71 Mbps.mov, volume=-12dB"
+ -vcodec rawvideo -pix_fmt uyvy422 -ac 2 -ar 48000 -f decklink -device 0 -
+ * Ugg. What needs to be done to have audio filters without this mess!
+ *
+ */
+
+#include "DeckLinkAPI.h"
+#include "DeckLinkAPIDispatch.cpp"
+#include "pthread.h"
+
+extern "C" {
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libswscale/swscale.h"
+#include "avdevice.h"
+}
+
+/*
+ * This needs to be the exact string that the device names the
+ * display mode. Better would to select based on w/h/fps ??
+ */
+#define DISPLAY_MODE "HD 720p 59.94" //"HD 1080i 59.94"
+#define NUM_PREROLL 20
+
+typedef struct {
+ AVClass *avclass;
+ AVFormatContext *s;
+ SwsContext* sws;
+ int video_stream_index;
+ int audio_stream_index;
+ int audio_channels;
+
+ IDeckLinkOutput *out;
+ class DeckLinkVideoOutputCallback *outputcallback;
+ int device;
+ BMDDisplayMode display_mode;
+ enum PixelFormat frame_pixfmt;
+ BMDPixelFormat frame_bmdpixfmt;
+ int frame_width;
+ int frame_height;
+ BMDTimeValue frame_duration;
+ BMDTimeScale frame_timescale;
+ bool playing_started;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ IDeckLinkMutableVideoFrame *frame[NUM_PREROLL];
+ int frame_number;
+ int frames_buffered;
+} DeckLinkContext;
+
+/*
+ * this is not currently used. Right now, all output is made in
+ * bmdFormat8BitYUV. It would be nice to either auto select the closest format,
+ * or allow the user to select the intended format. Something like -pix_fmt auto
+ * or -pix_fmt uyvy422 ??
+ */
+static const struct decklink_bmd_pix_fmt_entry {
+ enum PixelFormat pix_fmt; BMDPixelFormat bmd_fmt;
+} decklink_bmd_pix_fmt_map[] = {
+ { PIX_FMT_UYVY422, bmdFormat8BitYUV }, /* ?UYVY? 4:2:2 Representation */
+ { PIX_FMT_ARGB, bmdFormat8BitARGB }, /* ARGB(orRGB32)4:4:4:x raw */
+ { PIX_FMT_BGRA, bmdFormat8BitBGRA }, /* BGRA 4:4:4:x raw */
+ { PIX_FMT_NONE, bmdFormat10BitYUV }, /* ?v210?4:2:2 Representation */
+ { PIX_FMT_NONE, bmdFormat10BitRGB }, /* ?r210?4:4:4raw */
+ { PIX_FMT_NONE, 0 },
+};
+
+
+class DeckLinkVideoOutputCallback : public IDeckLinkVideoOutputCallback
+{
+private:
+ DeckLinkContext *m_decklink;
+
+public:
+ DeckLinkVideoOutputCallback(DeckLinkContext *decklink)
+ {
+ m_decklink = decklink;
+ }
+
+ HRESULT ScheduledFrameCompleted(IDeckLinkVideoFrame *frame,
+ BMDOutputFrameCompletionResult result)
+ {
+ //av_log(m_decklink->s, AV_LOG_DEBUG, "Frame completed.\n");
+
+ switch (result)
+ {
+ case bmdOutputFrameCompleted:
+ case bmdOutputFrameFlushed:
+ break;
+ case bmdOutputFrameDropped:
+ av_log(m_decklink->s, AV_LOG_ERROR, "Frame dropped.\n");
+ break;
+ case bmdOutputFrameDisplayedLate:
+ av_log(m_decklink->s, AV_LOG_ERROR, "Frame late.\n");
+ break;
+ }
+
+ pthread_mutex_lock(&m_decklink->mutex);
+ (m_decklink->frames_buffered)--;
+ pthread_cond_signal(&m_decklink->cond);
+ pthread_mutex_unlock(&m_decklink->mutex);
+
+ return S_OK;
+ }
+
+ HRESULT ScheduledPlaybackHasStopped()
+ {
+ return S_OK;
+ }
+
+ HRESULT QueryInterface(REFIID iid, LPVOID *ppv)
+ {
+ return E_NOINTERFACE;
+ }
+
+ // AddRef,Release not needed
+ ULONG AddRef()
+ {
+ return 1;
+ }
+
+ ULONG Release()
+ {
+ return 0;
+ }
+};
+
+
+static int decklink_write_trailer(AVFormatContext *s)
+{
+ DeckLinkContext *decklink = (DeckLinkContext *)s->priv_data;
+ int i;
+
+ if (decklink->sws) sws_freeContext(decklink->sws);
+
+ // Stop playing, wait for scheduled frames
+ if (decklink->playing_started)
+ decklink->out->StopScheduledPlayback(
+ decklink->frame_number * decklink->frame_duration,
+ NULL,
+ decklink->frame_timescale);
+
+ if (decklink->frames_buffered)
+ {
+ pthread_mutex_lock(&decklink->mutex);
+ while (decklink->frames_buffered) {
+ pthread_cond_wait(&decklink->cond, &decklink->mutex);
+ }
+ pthread_mutex_unlock(&decklink->mutex);
+ }
+
+ // Release the cache frames.
+ for (i = 0; i < NUM_PREROLL; i++) {
+ if (decklink->frame[i]) decklink->frame[i]->Release();
+ decklink->frame[i] = NULL;
+ }
+
+ if (decklink->out) {
+ decklink->out->DisableAudioOutput();
+ decklink->out->DisableVideoOutput();
+ decklink->out->Release();
+ }
+
+ /*
+ * Does decklink->outputcallback need to be released?
+ * Not a big c++ fan...
+ */
+
+ return 0;
+}
+
+
+static int decklink_write_header(AVFormatContext *s)
+{
+ IDeckLinkDisplayModeIterator *dldmi;
+ IDeckLinkDisplayMode *dldm;
+ IDeckLinkIterator *dli;
+ IDeckLink *dl;
+ DeckLinkContext *decklink = (DeckLinkContext *)s->priv_data;
+ AVStream *vst;
+ AVCodecContext *vencctx, *aencctx;
+ const char *mode_name;
+ float sar, dar;
+ int i, ret;
+
+ decklink->s = s;
+ pthread_mutex_init(&decklink->mutex, NULL);
+ pthread_cond_init(&decklink->cond, NULL);
+ decklink->outputcallback = new DeckLinkVideoOutputCallback(decklink);
+
+ /*
+ * Find the best video and audio stream.
+ * It is possible to output just video, so maybe
+ * future revision will allow that if anyone
+ * can see a reason why that should be allowed...
+ */
+ ret = av_find_best_stream(s, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input.\n");
+ goto fail;
+ }
+ decklink->video_stream_index = ret;
+ vst = s->streams[decklink->video_stream_index];
+ vencctx = vst->codec;
+
+ ret = av_find_best_stream(s, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input.\n");
+ goto fail;
+ }
+ decklink->audio_stream_index = ret;
+
+
+
+
+ /*
+ * Is it even possible to get something besides RAWVIDEO since that
+ * is what is requested in the AVOutputFormat structure.
+ */
+ if (vencctx->codec_id != CODEC_ID_RAWVIDEO) {
+ av_log(s, AV_LOG_ERROR, "Only supports rawvideo stream\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ aencctx = s->streams[decklink->audio_stream_index]->codec;;
+
+ /*
+ * this part should be replaced with code to force audio stream
+ * into 48kHz, 2/8/16 channel output. Seems such a pain to
+ * have to always be forcing streams to match the correct
+ * output.
+ */
+
+ /*
+ * Is it even possible to get something besides PCM_S16LE since that
+ * is what is requested in the AVOutputFormat structure.
+ */
+ if (aencctx->codec_id != CODEC_ID_PCM_S16LE) {
+ av_log(s, AV_LOG_ERROR, "Only supports s16le audio stream\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if (aencctx->sample_rate != 48000) {
+ av_log(s, AV_LOG_ERROR, "Only supports 48kHz audio stream\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if ((aencctx->channels != 2) &&
+ (aencctx->channels != 8) &&
+ (aencctx->channels != 16)) {
+ av_log(s, AV_LOG_ERROR,
+ "Only supports 2, 8, or 16 channel audio stream\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ decklink->audio_channels = aencctx->channels;
+
+/*
+ This code should either allow the user to select a specific bmdFormat or
+ auto, where the best/closest output bmdFormat is picked based on
+ vencctx->pix_fmt.
+
+ for (i = 0; decklink_bmd_pix_fmt_map[i].pix_fmt != PIX_FMT_NONE; i++) {
+ if (decklink_bmd_pix_fmt_map[i].pix_fmt == encctx->pix_fmt) {
+ decklink->frame_bmd_fmt = decklink_bmd_pix_fmt_map[i].bmd_fmt;
+ break;
+ }
+ }
+
+ if (!decklink->frame_pixfmt) {
+ av_log(s, AV_LOG_ERROR,
+ "Unsupported pixel format '%s', choose one of [enum list?].\n",
+ av_get_pix_fmt_name(encctx->pix_fmt));
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+*/
+
+ decklink->frame_bmdpixfmt = bmdFormat8BitYUV;
+ decklink->frame_pixfmt = PIX_FMT_UYVY422;
+
+ /*
+ * Inits the decklink card, loads the library and creates the iterator
+ * to walk through the list of attached DeckLink "devices".
+ * I think port might be a better word, but devices is what is used in
+ * many DeckLink examples.
+ */
+ dli = CreateDeckLinkIteratorInstance();
+ if (!dli)
+ {
+ av_log(s, AV_LOG_ERROR,
+ "Error opening Decklink driver.\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ /*
+ * Select the output device based on command line option
+ * -device (0...n).
+ * Some DeckLink cards will have as many as 4 outputs
+ * and outputs are numbered starting at lowest PCIe
+ * slot.
+ */
+ dl = NULL;
+ i = 0;
+ while (dli->Next(&dl) == S_OK)
+ {
+ if (i == decklink->device)
+ {
+ if (dl->QueryInterface(
+ IID_IDeckLinkOutput,
+ (void**)&decklink->out)
+ != S_OK)
+ {
+ dl->Release();
+ dli->Release();
+ goto fail;
+ }
+ }
+
+ dl->Release();
+ i++;
+ }
+ dli->Release();
+
+ if (!decklink->out)
+ {
+ av_log(s, AV_LOG_ERROR,
+ "Unable to initialize Decklink device %d\n", decklink->device);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ /*
+ * Get the display mode iterator to find the list of modes this device
+ * supports. Not every device will support all display modes.
+ */
+
+ if (decklink->out->GetDisplayModeIterator(&dldmi) != S_OK)
+ {
+ av_log(s, AV_LOG_ERROR, "Error retrieving display mode iterator\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ dldm = NULL;
+ while (dldmi->Next(&dldm) == S_OK)
+ {
+ dldm->GetName(&mode_name);
+ /* DISPLAY_MODE needs to be configurable */
+ if (strcmp(mode_name, DISPLAY_MODE) == 0)
+ {
+ decklink->display_mode = dldm->GetDisplayMode();
+ dldm->GetFrameRate(
+ &decklink->frame_duration,
+ &decklink->frame_timescale);
+ decklink->frame_width = dldm->GetWidth();
+ decklink->frame_height = dldm->GetHeight();
+ }
+ av_free((void *)mode_name); /* this was free, but free is !defined! */
+ dldm->Release();
+ }
+ dldmi->Release();
+ if (!decklink->display_mode)
+ {
+ av_log(s, AV_LOG_ERROR, "Error retrieving display mode\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ /*
+ * Create an array of frames. For now, using the standard decklink frame.
+ * It may be necessary to create a buffer of our own for unscheduled frames
+ * depending on potential timing problems.
+ */
+ for (i = 0; i < NUM_PREROLL; i++)
+ {
+ if ((ret = decklink->out->CreateVideoFrame(
+ decklink->frame_width,
+ decklink->frame_height,
+ decklink->frame_width * 2,
+ decklink->frame_bmdpixfmt,
+ bmdFrameFlagDefault,
+ &decklink->frame[i])) != S_OK)
+ {
+ av_log(s, AV_LOG_ERROR, "Error %X creating video frame %d\n", ret, i);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ }
+
+ /*
+ * Compute overlay width and height from the codec context information.
+ * This is unused at this point. The code should cause the frames to be
+ * scaled up or down as necessary so that it fits with no loss and aspect
+ * ratio maintained. How is this best done?
+ *
+ */
+ sar = vst->sample_aspect_ratio.num ? av_q2d(vst->sample_aspect_ratio) : 1;
+ dar = sar * (float)vencctx->width / (float)vencctx->height;
+
+/*
+ decklink->overlay_height = encctx->height;
+ decklink->overlay_width = ((int)rint(decklink->overlay_height * dar));
+ if (decklink->overlay_width > encctx->width) {
+ decklink->overlay_width = encctx->width;
+ decklink->overlay_height = ((int)rint(sdl->overlay_width / dar));
+ }
+*/
+
+ /*
+ * It seems that this private data is zeroed on creation. If
+ * that is true, then these are obviously not necessary.
+ */
+ decklink->sws = NULL;
+ decklink->frame_number = 0;
+ decklink->frames_buffered = 0;
+ decklink->playing_started = false;
+
+ if (decklink->out->SetScheduledFrameCompletionCallback(
+ decklink->outputcallback) != S_OK)
+ {
+ av_log(s, AV_LOG_ERROR, "Error failed to set frame completion callback\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if ((ret = decklink->out->EnableVideoOutput(decklink->display_mode,
+ bmdVideoOutputFlagDefault)) != S_OK)
+ {
+ av_log(s, AV_LOG_ERROR,
+ "Error (%X) could not enable video output display mode:0x%X\n",
+ ret, decklink->display_mode);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if (decklink->out->EnableAudioOutput(
+ bmdAudioSampleRate48kHz,
+ bmdAudioSampleType16bitInteger,
+ aencctx->channels,
+ bmdAudioOutputStreamContinuous) != S_OK) {
+ av_log(s, AV_LOG_ERROR, "Error could not enable audio output\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ decklink->out->BeginAudioPreroll();
+
+ /*
+ * At this point, any audio or video submitted is held for until playback
+ * is started. This will happen once enough video frames are submitted.
+ */
+
+ av_log(s, AV_LOG_INFO,
+ "w:%d h:%d fmt:%s sar:%f -> device:%d w:%d h:%d fmt:%s dar:%f\n",
+ vencctx->width, vencctx->height,
+ av_get_pix_fmt_name(vencctx->pix_fmt), sar,
+ decklink->device, decklink->frame_width, decklink->frame_height,
+ av_get_pix_fmt_name(decklink->frame_pixfmt), dar);
+ return 0;
+
+
+fail:
+ decklink_write_trailer(s);
+ return ret;
+}
+
+static int decklink_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ DeckLinkContext *decklink = (DeckLinkContext *)s->priv_data;
+ AVCodecContext *encctx = s->streams[0]->codec;
+ AVPicture pic;
+ uint32_t nb_samples, samplesWritten;
+ uint8_t *buf;
+ int i;
+
+ if (pkt->stream_index == decklink->video_stream_index)
+ {
+ pthread_mutex_lock(&decklink->mutex);
+
+ while (decklink->frames_buffered >= NUM_PREROLL)
+ {
+ if (!decklink->playing_started)
+ {
+ av_log(s, AV_LOG_INFO,
+ "starting play. stream timescale %d/%d or %d/%d\n",
+ encctx->time_base.num,
+ encctx->time_base.den,
+ s->streams[0]->time_base.num,
+ s->streams[0]->time_base.den);
+
+ decklink->out->EndAudioPreroll();
+ if (pkt->pts == (int64_t)AV_NOPTS_VALUE)
+ {
+ decklink->out->StartScheduledPlayback(
+ 0,
+ decklink->frame_timescale,
+ 1.0);
+ }
+ else
+ {
+ decklink->out->StartScheduledPlayback(
+ 0,
+ (BMDTimeScale)s->streams[0]->time_base.den,
+ 1.0);
+ }
+ decklink->playing_started = true;
+ }
+
+ pthread_cond_wait(&decklink->cond, &decklink->mutex);
+ }
+
+ decklink->frame[decklink->frame_number % NUM_PREROLL]->GetBytes((void**)&buf);
+
+ avpicture_fill(
+ &pic, pkt->data, encctx->pix_fmt, encctx->width, encctx->height);
+
+/*
+ const AVPixFmtDescriptor *desc;
+ desc = &av_pix_fmt_descriptors[encctx->pix_fmt];
+*/
+
+/*
+ * this is modeled after code by Georg Lippitsch. I can see where planes are
+ * used etc., but is unclear to me what needs to be done here exactly. Tests
+ * so far only require the second half to be used. Perhaps because of the
+ * command line use of -pix_fmt uyvy422?
+ */
+ if (false) //pic.interlaced_frame
+ {
+ decklink->sws = sws_getCachedContext(decklink->sws, encctx->width,
+ encctx->height / 2, encctx->pix_fmt, decklink->frame_width,
+ decklink->frame_height / 2, decklink->frame_pixfmt,
+ SWS_BILINEAR, NULL, NULL, NULL);
+
+ uint8_t *src[] = {pic.data[0], pic.data[1], pic.data[2]};
+ int srcStride[] = {pic.linesize[0] * 2,
+ pic.linesize[1] * 2,
+ pic.linesize[2] * 2};
+ int dstStride[] = {decklink->frame_width * 4};
+
+ sws_scale(decklink->sws, src, srcStride, 0, encctx->height / 2,
+ &buf, dstStride);
+ for (i = 0; i < 3; i++)
+ {
+ src[i] += pic.linesize[i];
+ }
+ buf += decklink->frame_width * 2;
+ sws_scale(decklink->sws, src, srcStride, 0, encctx->height / 2,
+ &buf, dstStride);
+ }
+ else
+ {
+ decklink->sws = sws_getCachedContext(decklink->sws, encctx->width,
+ encctx->height, encctx->pix_fmt, decklink->frame_width,
+ decklink->frame_height, decklink->frame_pixfmt,
+ SWS_BILINEAR, NULL, NULL, NULL);
+
+ int dstStride[] = {decklink->frame_width * 2};
+
+ sws_scale(decklink->sws, pic.data, pic.linesize, 0, encctx->height,
+ &buf, dstStride);
+ }
+
+
+ av_log(s, AV_LOG_DEBUG,
+ "schedule video frame #%d pts:%ld duration:%d (%ld %ld %ld)\n",
+ decklink->frame_number, pkt->pts, pkt->duration,
+ (BMDTimeValue)pkt->pts * (BMDTimeScale)s->streams[0]->time_base.num,
+ (BMDTimeValue)pkt->duration * (BMDTimeScale)encctx->time_base.num,
+ (BMDTimeScale)s->streams[0]->time_base.num * (BMDTimeScale)s->streams[0]->time_base.den);
+
+ if (pkt->pts == (int64_t)AV_NOPTS_VALUE)
+ {
+ decklink->out->ScheduleVideoFrame(
+ decklink->frame[decklink->frame_number % NUM_PREROLL],
+ decklink->frame_number * decklink->frame_duration,
+ decklink->frame_duration, decklink->frame_timescale);
+ }
+ else
+ {
+ decklink->out->ScheduleVideoFrame(
+ decklink->frame[decklink->frame_number % NUM_PREROLL],
+ (BMDTimeValue)decklink->frame_number * (BMDTimeScale)pkt->duration,
+ (BMDTimeValue)pkt->duration,
+ (BMDTimeScale)s->streams[0]->time_base.den);
+
+ /*
+ * This would be the correct code to use, as far as I understand,
+ * but pkt->pts + pkt+duration can be greater than nextpkt->pts and
+ * this makes DeckLink drop frames and otherwise perform erraticly.
+ * Probably the best solution/hack is to keep the expected pts and
+ * if over by 1 then adjust pts/duration for this frame by 1.
+
+ decklink->out->ScheduleVideoFrame(
+ decklink->frame[decklink->frame_number % NUM_PREROLL],
+ (BMDTimeValue)pkt->pts * (BMDTimeScale)s->streams[0]->time_base.num,
+ (BMDTimeValue)pkt->duration * (BMDTimeScale)encctx->time_base.num,
+ (BMDTimeScale)s->streams[0]->time_base.num * (BMDTimeScale)s->streams[0]->time_base.den);
+
+ */
+ }
+
+ decklink->frames_buffered++;
+ decklink->frame_number++;
+
+ pthread_mutex_unlock(&decklink->mutex);
+ }
+ else if (pkt->stream_index == decklink->audio_stream_index)
+ {
+ pthread_mutex_lock(&decklink->mutex);
+
+ nb_samples = pkt->size / (decklink->audio_channels * 2);
+ samplesWritten = 0;
+
+ av_log(s, AV_LOG_DEBUG,
+ "schedule audio data nb_samples:%d channels:%d pts:%ld duration:%d\n",
+ nb_samples, decklink->audio_channels, pkt->pts, pkt->duration);
+
+ decklink->out->ScheduleAudioSamples(pkt->data, nb_samples, 0,
+ bmdAudioSampleRate48kHz,
+ &samplesWritten);
+
+ /*
+ * This may also require that audio be buffered. Simple testing of
+ * this code has not shown buffering to be necessary but previous
+ * work has required this.
+ */
+ if (samplesWritten != nb_samples)
+ {
+ av_log(s, AV_LOG_ERROR,
+ "Audio samples not all written! (%d of %d accepted)\n",
+ samplesWritten, nb_samples);
+ }
+
+ pthread_mutex_unlock(&decklink->mutex);
+ }
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(DeckLinkContext,x)
+
+static const AVOption options[] = {
+ { "device", "Decklink device", OFFSET(device), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+ { NULL },
+};
+
+static const AVClass decklink_class = {
+ /* .class_name = */ "decklink output device",
+ /* .item_name = */ av_default_item_name,
+ /* .option = */ options,
+ /* .version = */ LIBAVUTIL_VERSION_INT,
+ /* log_level_offset_offset = */ 0,
+ /* parent_log_context_offset = */ 0,
+ /* child_next = */ NULL,
+ /* child_class_next = */ NULL
+};
+
+AVOutputFormat ff_decklink_muxer = {
+ /* .name = */ "decklink",
+ /* .long_name = */ NULL_IF_CONFIG_SMALL("Blackmagic DeckLink output"),
+ /* .mime_type = */ NULL,
+ /* .extensions = */ NULL,
+ /* .audio_codec = */ CODEC_ID_PCM_S16LE,
+ /* .video_codec = */ CODEC_ID_RAWVIDEO,
+ /* .subtitle_codec = */ CODEC_ID_NONE,
+ /* .flags = */ AVFMT_NOFILE,
+ /* .codec_tag = */ NULL,
+ /* .priv_class = */ &decklink_class,
+ /* .next = */ NULL,
+ /* .priv_data_size = */ sizeof(DeckLinkContext),
+ /* .write_header = */ decklink_write_header,
+ /* .write_packet = */ decklink_write_packet,
+ /* .write_trailer = */ decklink_write_trailer,
+ /* .interleave_packet = */ NULL,
+ /* .query_codec = */ NULL,
+ /* .get_output_timestamp = */ NULL
+};
+
--
1.7.2
More information about the ffmpeg-devel
mailing list