lavu: fix memory leaks by using a mutex instead of atomics

The buffer pool has to atomically add and remove entries from the linked
list of available buffers. This was done by removing the entire list
with a CAS operation, working on it, and then setting it back again
(using a retry-loop in case another thread was doing the same thing).

This could effectively cause memory leaks: while a thread was working on
the buffer list, other threads would allocate new buffers, increasing
the pool's total size. There was no real leak, but since these extra
buffers were not needed, but not free'd either (except when the buffer
pool was destroyed), this had the same effects as a real leak. For some
reason, growth was exponential, and could easily kill the process due
to OOM in real-world uses.

Fix this by using a mutex to protect the list operations. The fancy
way atomics remove the whole list to work on it is not needed anymore,
which also avoids the situation which was causing the leak.

Signed-off-by: Anton Khirnov <anton@khirnov.net>
pull/100/head
wm4 10 years ago committed by Anton Khirnov
parent 2443e522f0
commit fbd6c97f9c
  1. 79
      libavutil/buffer.c
  2. 6
      libavutil/buffer_internal.h

@ -23,6 +23,7 @@
#include "buffer_internal.h" #include "buffer_internal.h"
#include "common.h" #include "common.h"
#include "mem.h" #include "mem.h"
#include "thread.h"
AVBufferRef *av_buffer_create(uint8_t *data, int size, AVBufferRef *av_buffer_create(uint8_t *data, int size,
void (*free)(void *opaque, uint8_t *data), void (*free)(void *opaque, uint8_t *data),
@ -199,6 +200,8 @@ AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size))
if (!pool) if (!pool)
return NULL; return NULL;
ff_mutex_init(&pool->mutex, NULL);
pool->size = size; pool->size = size;
pool->alloc = alloc ? alloc : av_buffer_alloc; pool->alloc = alloc ? alloc : av_buffer_alloc;
@ -220,6 +223,7 @@ static void buffer_pool_free(AVBufferPool *pool)
buf->free(buf->opaque, buf->data); buf->free(buf->opaque, buf->data);
av_freep(&buf); av_freep(&buf);
} }
ff_mutex_destroy(&pool->mutex);
av_freep(&pool); av_freep(&pool);
} }
@ -236,47 +240,16 @@ void av_buffer_pool_uninit(AVBufferPool **ppool)
buffer_pool_free(pool); buffer_pool_free(pool);
} }
/* remove the whole buffer list from the pool and return it */
static BufferPoolEntry *get_pool(AVBufferPool *pool)
{
BufferPoolEntry *cur = NULL, *last = NULL;
do {
FFSWAP(BufferPoolEntry*, cur, last);
cur = avpriv_atomic_ptr_cas((void * volatile *)&pool->pool, last, NULL);
if (!cur)
return NULL;
} while (cur != last);
return cur;
}
static void add_to_pool(BufferPoolEntry *buf)
{
AVBufferPool *pool;
BufferPoolEntry *cur, *end = buf;
if (!buf)
return;
pool = buf->pool;
while (end->next)
end = end->next;
while ((cur = avpriv_atomic_ptr_cas((void * volatile *)&pool->pool, NULL, buf))) {
/* pool is not empty, retrieve it and append it to our list */
cur = get_pool(pool);
end->next = cur;
while (end->next)
end = end->next;
}
}
static void pool_release_buffer(void *opaque, uint8_t *data) static void pool_release_buffer(void *opaque, uint8_t *data)
{ {
BufferPoolEntry *buf = opaque; BufferPoolEntry *buf = opaque;
AVBufferPool *pool = buf->pool; AVBufferPool *pool = buf->pool;
add_to_pool(buf);
ff_mutex_lock(&pool->mutex);
buf->next = pool->pool;
pool->pool = buf;
ff_mutex_unlock(&pool->mutex);
if (!avpriv_atomic_int_add_and_fetch(&pool->refcount, -1)) if (!avpriv_atomic_int_add_and_fetch(&pool->refcount, -1))
buffer_pool_free(pool); buffer_pool_free(pool);
} }
@ -306,8 +279,6 @@ static AVBufferRef *pool_alloc_buffer(AVBufferPool *pool)
ret->buffer->opaque = buf; ret->buffer->opaque = buf;
ret->buffer->free = pool_release_buffer; ret->buffer->free = pool_release_buffer;
avpriv_atomic_int_add_and_fetch(&pool->refcount, 1);
return ret; return ret;
} }
@ -316,22 +287,22 @@ AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
AVBufferRef *ret; AVBufferRef *ret;
BufferPoolEntry *buf; BufferPoolEntry *buf;
/* check whether the pool is empty */ ff_mutex_lock(&pool->mutex);
buf = get_pool(pool); buf = pool->pool;
if (!buf) if (buf) {
return pool_alloc_buffer(pool); ret = av_buffer_create(buf->data, pool->size, pool_release_buffer,
buf, 0);
/* keep the first entry, return the rest of the list to the pool */ if (ret) {
add_to_pool(buf->next); pool->pool = buf->next;
buf->next = NULL; buf->next = NULL;
}
ret = av_buffer_create(buf->data, pool->size, pool_release_buffer, } else {
buf, 0); ret = pool_alloc_buffer(pool);
if (!ret) {
add_to_pool(buf);
return NULL;
} }
avpriv_atomic_int_add_and_fetch(&pool->refcount, 1); ff_mutex_unlock(&pool->mutex);
if (ret)
avpriv_atomic_int_add_and_fetch(&pool->refcount, 1);
return ret; return ret;
} }

@ -22,6 +22,7 @@
#include <stdint.h> #include <stdint.h>
#include "buffer.h" #include "buffer.h"
#include "thread.h"
/** /**
* The buffer is always treated as read-only. * The buffer is always treated as read-only.
@ -68,11 +69,12 @@ typedef struct BufferPoolEntry {
void (*free)(void *opaque, uint8_t *data); void (*free)(void *opaque, uint8_t *data);
AVBufferPool *pool; AVBufferPool *pool;
struct BufferPoolEntry * volatile next; struct BufferPoolEntry *next;
} BufferPoolEntry; } BufferPoolEntry;
struct AVBufferPool { struct AVBufferPool {
BufferPoolEntry * volatile pool; AVMutex mutex;
BufferPoolEntry *pool;
/* /*
* This is used to track when the pool is to be freed. * This is used to track when the pool is to be freed.

Loading…
Cancel
Save