[FFmpeg-devel] [PATCH 10/10] lavfi/dnn_backend_tf.c: Documentation for tf_infer_request functions
Shubhanshu Saxena
shubhanshu.e01 at gmail.com
Fri May 28 12:24:54 EEST 2021
Documentation for functions related to tf_infer_request
Signed-off-by: Shubhanshu Saxena <shubhanshu.e01 at gmail.com>
---
libavfilter/dnn/dnn_backend_tf.c | 41 ++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 296604461b..8a74b11cf5 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -95,6 +95,13 @@ static void free_buffer(void *data, size_t length)
av_freep(&data);
}
+/**
+ * Free the contents of TensorFlow inference request.
+ * It does not free the tf_infer_request instance.
+ *
+ * @param request pointer to tf_infer_request instance.
+ * NULL pointer is allowed.
+ */
static void tf_free_request(tf_infer_request *request)
{
if (!request)
@@ -108,6 +115,12 @@ static void tf_free_request(tf_infer_request *request)
av_freep(&request->output_tensors);
}
+/**
+ * Create a TensorFlow inference request. All properties
+ * are initially unallocated and set as NULL.
+ *
+ * @return pointer to the allocated tf_infer_request instance.
+ */
static tf_infer_request* tf_create_inference_request(void)
{
tf_infer_request* infer_request = av_malloc(sizeof(tf_infer_request));
@@ -118,8 +131,17 @@ static tf_infer_request* tf_create_inference_request(void)
return infer_request;
}
+/**
+ * Start synchronous inference for the TensorFlow model.
+ * It does not check for the status of the operation.
+ * Check using tf_model->status.
+ *
+ * @param request pointer to the RequestItem for inference
+ */
static void tf_start_inference(RequestItem *request)
{
+ if (!request)
+ return;
tf_infer_request *infer_request = request->infer_request;
InferenceItem *inference = request->inference;
TaskItem *task = inference->task;
@@ -132,6 +154,12 @@ static void tf_start_inference(RequestItem *request)
tf_model->status);
}
+/**
+ * Thread routine for async inference. It calls completion
+ * callback on completion of inference.
+ *
+ * @param arg pointer to RequestItem instance for inference
+ */
static void *tf_thread_routine(void *arg)
{
RequestItem *request = arg;
@@ -142,8 +170,21 @@ static void *tf_thread_routine(void *arg)
#endif
}
+/**
+ * Start asynchronous inference routine for the TensorFlow
+ * model on a detached thread. It calls the completion callback
+ * after the inference completes.
+ * In case pthreads aren't supported, the execution rolls back
+ * to synchronous mode, calling completion callback after inference.
+ *
+ * @param request pointer to the RequestItem for inference
+ * @retval DNN_SUCCESS on the start of async inference.
+ * @retval DNN_ERROR in case async inference cannot be started
+ */
static DNNReturnType tf_start_inference_async(RequestItem *request)
{
+ if (!request)
+ return DNN_ERROR;
InferenceItem *inference = request->inference;
TaskItem *task = inference->task;
TFModel *tf_model = task->model;
--
2.25.1
More information about the ffmpeg-devel
mailing list