[FFmpeg-devel] [PATCH 17/42] avcodec/refstruct: Add RefStruct pool API

Andreas Rheinhardt andreas.rheinhardt at outlook.com
Tue Sep 19 22:57:09 EEST 2023


Very similar to the AVBufferPool API, but with some differences:
1. Reusing an already existing entry does not incur an allocation
at all any more (the AVBufferPool API needs to allocate an AVBufferRef).
2. The tasks done while holding the lock are smaller; e.g.
allocating new entries is now performed without holding the lock.
The same goes for freeing.
3. The entries are freed as soon as possible (the AVBufferPool API
frees them in two batches: The first in av_buffer_pool_uninit() and
the second immediately before the pool is freed when the last
outstanding entry is returned to the pool).
4. The API is designed for objects and not naked buffers and
therefore has a reset callback. This is called whenever an object
is returned to the pool.
5. Just like with the RefStruct API, custom allocators are not
supported.

(If desired, the FFRefStructPool struct itself could be made
reference counted via the RefStruct API; an FFRefStructPool
would then be freed via ff_refstruct_unref().)

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt at outlook.com>
---
 libavcodec/refstruct.c | 194 ++++++++++++++++++++++++++++++++++++++++-
 libavcodec/refstruct.h | 128 +++++++++++++++++++++++++++
 2 files changed, 321 insertions(+), 1 deletion(-)

diff --git a/libavcodec/refstruct.c b/libavcodec/refstruct.c
index 604938922a..7539b7942e 100644
--- a/libavcodec/refstruct.c
+++ b/libavcodec/refstruct.c
@@ -23,8 +23,11 @@
 #include "internal.h"
 #include "refstruct.h"
 
+#include "libavutil/avassert.h"
+#include "libavutil/error.h"
 #include "libavutil/macros.h"
 #include "libavutil/mem.h"
+#include "libavutil/thread.h"
 
 typedef struct RefCount {
     /**
@@ -35,6 +38,7 @@ typedef struct RefCount {
     atomic_uintptr_t  refcount;
     FFRefStructOpaque opaque;
     void (*free_cb)(FFRefStructOpaque opaque, void *obj);
+    void (*free)(void *ref);
 } RefCount;
 
 #if __STDC_VERSION__ >= 201112L
@@ -64,6 +68,7 @@ static void refcount_init(RefCount *ref, FFRefStructOpaque opaque,
     atomic_init(&ref->refcount, 1);
     ref->opaque  = opaque;
     ref->free_cb = free_cb;
+    ref->free    = av_free;
 }
 
 void *ff_refstruct_alloc_ext_c(size_t size, unsigned flags, FFRefStructOpaque opaque,
@@ -103,7 +108,7 @@ void ff_refstruct_unref(void *objp)
     if (atomic_fetch_sub_explicit(&ref->refcount, 1, memory_order_acq_rel) == 1) {
         if (ref->free_cb)
             ref->free_cb(ref->opaque, obj);
-        av_free(ref);
+        ref->free(ref);
     }
 
     return;
@@ -151,3 +156,190 @@ int ff_refstruct_exclusive(const void *data)
      * accept const atomics in C11 (see also N1807). */
     return atomic_load_explicit((atomic_uintptr_t*)&ref->refcount, memory_order_acquire) == 1;
 }
+
+struct FFRefStructPool {
+    size_t size;
+    FFRefStructOpaque opaque;
+    int  (*init_cb)(FFRefStructOpaque opaque, void *obj);
+    void (*reset_cb)(FFRefStructOpaque opaque, void *obj);
+    void (*free_entry_cb)(FFRefStructOpaque opaque, void *obj);
+    void (*free_cb)(FFRefStructOpaque opaque);
+
+    int uninited;
+    unsigned entry_flags;
+    unsigned pool_flags;
+
+    /** The number of outstanding entries not in available_entries. */
+    atomic_uintptr_t refcount;
+    /**
+     * This is a linked list of available entries;
+     * the RefCount's opaque pointer is used as next pointer
+     * for available entries.
+     * While the entries are in use, the opaque is a pointer
+     * to the corresponding FFRefStructPool.
+     */
+    RefCount *available_entries;
+    pthread_mutex_t mutex;
+};
+
+static void pool_free(FFRefStructPool *pool)
+{
+    pthread_mutex_destroy(&pool->mutex);
+    if (pool->free_cb)
+        pool->free_cb(pool->opaque);
+    av_free(pool);
+}
+
+static void pool_free_entry(FFRefStructPool *pool, RefCount *ref)
+{
+    if (pool->free_entry_cb)
+        pool->free_entry_cb(pool->opaque, get_userdata(ref));
+    av_free(ref);
+}
+
+static void pool_return_entry(void *ref_)
+{
+    RefCount *ref = ref_;
+    FFRefStructPool *pool = ref->opaque.nc;
+
+    pthread_mutex_lock(&pool->mutex);
+    if (!pool->uninited) {
+        ref->opaque.nc = pool->available_entries;
+        pool->available_entries = ref;
+        ref = NULL;
+    }
+    pthread_mutex_unlock(&pool->mutex);
+
+    if (ref)
+        pool_free_entry(pool, ref);
+
+    if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
+        pool_free(pool);
+}
+
+static void pool_reset_entry(FFRefStructOpaque opaque, void *entry)
+{
+    FFRefStructPool *pool = opaque.nc;
+
+    pool->reset_cb(pool->opaque, entry);
+}
+
+static int refstruct_pool_get_ext(void *datap, FFRefStructPool *pool)
+{
+    void *ret = NULL;
+
+    memcpy(datap, &(void *){ NULL }, sizeof(void*));
+
+    pthread_mutex_lock(&pool->mutex);
+    av_assert1(!pool->uninited);
+    if (pool->available_entries) {
+        RefCount *ref = pool->available_entries;
+        ret = get_userdata(ref);
+        pool->available_entries = ref->opaque.nc;
+        ref->opaque.nc = pool;
+        atomic_init(&ref->refcount, 1);
+    }
+    pthread_mutex_unlock(&pool->mutex);
+
+    if (!ret) {
+        RefCount *ref;
+        ret = ff_refstruct_alloc_ext(pool->size, pool->entry_flags, pool,
+                                     pool->reset_cb ? pool_reset_entry : NULL);
+        if (!ret)
+            return AVERROR(ENOMEM);
+        ref = get_refcount(ret);
+        ref->free = pool_return_entry;
+        if (pool->init_cb) {
+            int err = pool->init_cb(pool->opaque, ret);
+            if (err < 0) {
+                if (pool->pool_flags & FF_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR)
+                    pool->reset_cb(pool->opaque, ret);
+                if (pool->pool_flags & FF_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR)
+                    pool->free_entry_cb(pool->opaque, ret);
+                av_free(ref);
+                return err;
+            }
+        }
+    }
+    atomic_fetch_add_explicit(&pool->refcount, 1, memory_order_relaxed);
+    memcpy(datap, &ret, sizeof(ret));
+
+    return 0;
+}
+
+void *ff_refstruct_pool_get(FFRefStructPool *pool)
+{
+    void *ret;
+    refstruct_pool_get_ext(&ret, pool);
+    return ret;
+}
+
+void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
+{
+    FFRefStructPool *pool = *poolp;
+    RefCount *entry;
+
+    if (!pool)
+        return;
+
+    pthread_mutex_lock(&pool->mutex);
+    av_assert1(!pool->uninited);
+    pool->uninited = 1;
+    entry = pool->available_entries;
+    pool->available_entries = NULL;
+    pthread_mutex_unlock(&pool->mutex);
+
+    while (entry) {
+        void *next = entry->opaque.nc;
+        pool_free_entry(pool, entry);
+        entry = next;
+    }
+
+    if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
+        pool_free(pool);
+
+    *poolp = NULL;
+}
+
+FFRefStructPool *ff_refstruct_pool_alloc(size_t size, unsigned flags)
+{
+    return ff_refstruct_pool_alloc_ext(size, flags, NULL, NULL, NULL, NULL, NULL);
+}
+
+FFRefStructPool *ff_refstruct_pool_alloc_ext_c(size_t size, unsigned flags,
+                                               FFRefStructOpaque opaque,
+                                               int  (*init_cb)(FFRefStructOpaque opaque, void *obj),
+                                               void (*reset_cb)(FFRefStructOpaque opaque, void *obj),
+                                               void (*free_entry_cb)(FFRefStructOpaque opaque, void *obj),
+                                               void (*free_cb)(FFRefStructOpaque opaque))
+{
+    FFRefStructPool *pool = av_mallocz(sizeof(*pool));
+    int err;
+
+    if (!pool)
+        return NULL;
+
+    pool->size          = size;
+    pool->opaque        = opaque;
+    pool->init_cb       = init_cb;
+    pool->reset_cb      = reset_cb;
+    pool->free_entry_cb = free_entry_cb;
+    pool->free_cb       = free_cb;
+#define COMMON_FLAGS FF_REFSTRUCT_POOL_FLAG_NO_ZEROING
+    pool->entry_flags   = flags & COMMON_FLAGS;
+    // Filter out nonsense combinations to avoid checks later.
+    if (!pool->reset_cb)
+        flags &= ~FF_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR;
+    if (!pool->free_entry_cb)
+        flags &= ~FF_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR;
+    pool->pool_flags    = flags;
+
+    atomic_init(&pool->refcount, 1);
+
+    err = pthread_mutex_init(&pool->mutex, NULL);
+    if (err) {
+        av_free(pool);
+        return NULL;
+    }
+    return pool;
+}
diff --git a/libavcodec/refstruct.h b/libavcodec/refstruct.h
index ee6936d77a..e2fe45e77d 100644
--- a/libavcodec/refstruct.h
+++ b/libavcodec/refstruct.h
@@ -151,4 +151,132 @@ void ff_refstruct_replace(void *dstp, const void *src);
  */
 int ff_refstruct_exclusive(const void *obj);
 
+/**
+ * FFRefStructPool is an API for a thread-safe pool of objects managed
+ * via the RefStruct API.
+ *
+ * Frequently allocating and freeing large or complicated objects may be slow
+ * and wasteful. This API is meant to solve this in cases when the caller
+ * needs a set of interchangable objects.
+ *
+ * At the beginning, the user must call allocate the pool via
+ * ff_refstruct_pool_alloc() or its analogue ff_refstruct_pool_alloc_ext().
+ * Then whenever an object is needed, call ff_refstruct_pool_get() to
+ * get a new or reused object from the pool. This new object works in all
+ * aspects the same way as the ones created by ff_refstruct_alloc_ext().
+ * However, when the last reference to this object is unreferenced, it is
+ * (optionally) reset and returned to the pool instead of being freed and
+ * will be reused for subsequent ff_refstruct_pool_get() calls.
+ *
+ * When the caller is done with the pool and no longer needs to create any new
+ * objects, ff_refstruct_pool_uninit() must be called to mark the pool as
+ * freeable. Then entries returned to the pool will then be freed.
+ * Once all the entries are freed, the pool will automatically be freed.
+ *
+ * Allocating and releasing objects with this API is thread-safe as long as
+ * the user-supplied callbacks (if provided) are thread-safe.
+ */
+
+/**
+ * The buffer pool. This structure is opaque and not meant to be accessed
+ * directly. It is allocated with the allocators below and freed with
+ * ff_refstruct_pool_uninit().
+ */
+typedef struct FFRefStructPool FFRefStructPool;
+
+/**
+ * If this flag is not set, every object in the pool will be zeroed before
+ * the init callback is called or before it is turned over to the user
+ * for the first time if no init callback has been provided.
+ */
+#define FF_REFSTRUCT_POOL_FLAG_NO_ZEROING         FF_REFSTRUCT_FLAG_NO_ZEROING
+/**
+ * If this flag is set and both init_cb and reset_cb callbacks are provided,
+ * then reset_cb will be called if init_cb fails.
+ * The object passed to reset_cb will be in the state left by init_cb.
+ */
+#define FF_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR                   (1 << 16)
+/**
+ * If this flag is set and both init_cb and free_entry_cb callbacks are
+ * provided, then free_cb will be called if init_cb fails.
+ *
+ * It will be called after reset_cb in case reset_cb and the
+ * FF_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR flag are also set.
+ *
+ * The object passed to free_cb will be in the state left by
+ * the callbacks applied earlier (init_cb potentially followed by reset_cb).
+ */
+#define FF_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR                    (1 << 17)
+
+/**
+ * Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
+ */
+FFRefStructPool *ff_refstruct_pool_alloc(size_t size, unsigned flags);
+
+/**
+ * Allocate an FFRefStructPool, potentially using complex callbacks.
+ *
+ * @param size size of the entries of the pool
+ * @param flags a bitwise combination of FF_REFSTRUCT_POOL_FLAG_* flags
+ * @param opaque A pointer that will be passed to the callbacks below.
+ * @param init  A callback that will be called directly after a new entry
+ *              has been allocated. obj has already been zeroed unless
+ *              the FF_REFSTRUCT_POOL_FLAG_NO_ZEROING flag is in use.
+ * @param reset A callback that will be called after an entry has been
+ *              returned to the pool and before it is reused.
+ * @param free_entry A callback that will be called when an entry is freed
+ *                   after the pool has been marked as to be uninitialized.
+ * @param free       A callback that will be called when the pool itself is
+ *                   freed (after the last entry has been returned and freed).
+ */
+FFRefStructPool *ff_refstruct_pool_alloc_ext_c(size_t size, unsigned flags,
+                                               FFRefStructOpaque opaque,
+                                               int  (*init_cb)(FFRefStructOpaque opaque, void *obj),
+                                               void (*reset_cb)(FFRefStructOpaque opaque, void *obj),
+                                               void (*free_entry_cb)(FFRefStructOpaque opaque, void *obj),
+                                               void (*free_cb)(FFRefStructOpaque opaque));
+
+/**
+ * A wrapper around ff_refstruct_pool_alloc_ext_c() for the common case
+ * of a non-const qualified opaque.
+ *
+ * @see ff_refstruct_pool_alloc_ext_c()
+ */
+static inline
+FFRefStructPool *ff_refstruct_pool_alloc_ext(size_t size, unsigned flags,
+                                             void *opaque,
+                                             int  (*init_cb)(FFRefStructOpaque opaque, void *obj),
+                                             void (*reset_cb)(FFRefStructOpaque opaque, void *obj),
+                                             void (*free_entry_cb)(FFRefStructOpaque opaque, void *obj),
+                                             void (*free_cb)(FFRefStructOpaque opaque))
+{
+    return ff_refstruct_pool_alloc_ext_c(size, flags, (FFRefStructOpaque){.nc = opaque},
+                                         init_cb, reset_cb, free_entry_cb, free_cb);
+}
+
+/**
+ * Get an object from the pool, reusing an old one from the pool when
+ * available.
+ *
+ * Every call to this function must happen before ff_refstruct_pool_uninit().
+ * Otherwise undefined behaviour may occur.
+ *
+ * @param pool the pool from which to get the object
+ * @return a reference to the object on success, NULL on error.
+ */
+void *ff_refstruct_pool_get(FFRefStructPool *pool);
+
+/**
+ * Mark the pool as being available for freeing. It will actually be freed
+ * only once all the allocated buffers associated with the pool are released.
+ * Thus it is safe to call this function while some of the allocated buffers
+ * are still in use.
+ *
+ * It is illegal to try to get a new entry after this function has been called.
+ *
+ * @param poolp pointer to a pointer to either NULL or a pool to be freed.
+ *              `*poolp` will be set to NULL.
+ */
+void ff_refstruct_pool_uninit(FFRefStructPool **poolp);
+
 #endif /* AVCODEC_REFSTRUCT_H */
-- 
2.34.1



More information about the ffmpeg-devel mailing list