summaryrefslogtreecommitdiff
path: root/src/gallium/auxiliary/pipebuffer
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/auxiliary/pipebuffer')
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c24
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c38
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c77
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c20
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c24
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c124
6 files changed, 202 insertions, 105 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
index 410d336fef..17b2781052 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -69,7 +69,7 @@
struct fenced_buffer_list
{
- _glthread_Mutex mutex;
+ pipe_mutex mutex;
struct pipe_winsys *winsys;
@@ -240,7 +240,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_buffer_list *fenced_list = fenced_buf->list;
- _glthread_LOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_lock(fenced_list->mutex);
assert(fenced_buf->base.base.refcount == 0);
if (fenced_buf->fence) {
struct pipe_winsys *winsys = fenced_list->winsys;
@@ -263,7 +263,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
else {
_fenced_buffer_destroy(fenced_buf);
}
- _glthread_UNLOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_unlock(fenced_list->mutex);
}
@@ -396,7 +396,7 @@ buffer_fence(struct pb_buffer *buf,
return;
}
- _glthread_LOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_lock(fenced_list->mutex);
if (fenced_buf->fence)
_fenced_buffer_remove(fenced_list, fenced_buf);
if (fence) {
@@ -404,7 +404,7 @@ buffer_fence(struct pb_buffer *buf,
fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE;
_fenced_buffer_add(fenced_buf);
}
- _glthread_UNLOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_unlock(fenced_list->mutex);
}
@@ -423,7 +423,7 @@ fenced_buffer_list_create(struct pipe_winsys *winsys)
fenced_list->numDelayed = 0;
- _glthread_INIT_MUTEX(fenced_list->mutex);
+ pipe_mutex_init(fenced_list->mutex);
return fenced_list;
}
@@ -433,28 +433,28 @@ void
fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
int wait)
{
- _glthread_LOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_lock(fenced_list->mutex);
_fenced_buffer_list_check_free(fenced_list, wait);
- _glthread_UNLOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_unlock(fenced_list->mutex);
}
void
fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
{
- _glthread_LOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_lock(fenced_list->mutex);
/* Wait on outstanding fences */
while (fenced_list->numDelayed) {
- _glthread_UNLOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_unlock(fenced_list->mutex);
#if defined(PIPE_OS_LINUX)
sched_yield();
#endif
_fenced_buffer_list_check_free(fenced_list, 1);
- _glthread_LOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_lock(fenced_list->mutex);
}
- _glthread_UNLOCK_MUTEX(fenced_list->mutex);
+ pipe_mutex_unlock(fenced_list->mutex);
FREE(fenced_list);
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
index b914c2d0fe..1ec422fb19 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
@@ -79,7 +79,7 @@ struct pb_cache_manager
struct pb_manager *provider;
unsigned usecs;
- _glthread_Mutex mutex;
+ pipe_mutex mutex;
struct list_head delayed;
size_t numDelayed;
@@ -153,7 +153,7 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf)
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
struct pb_cache_manager *mgr = buf->mgr;
- _glthread_LOCK_MUTEX(mgr->mutex);
+ pipe_mutex_lock(mgr->mutex);
assert(buf->base.base.refcount == 0);
_pb_cache_buffer_list_check_free(mgr);
@@ -162,7 +162,7 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf)
util_time_add(&buf->start, mgr->usecs, &buf->end);
LIST_ADDTAIL(&buf->head, &mgr->delayed);
++mgr->numDelayed;
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
}
@@ -235,7 +235,7 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
struct list_head *curr, *next;
struct util_time now;
- _glthread_LOCK_MUTEX(mgr->mutex);
+ pipe_mutex_lock(mgr->mutex);
buf = NULL;
curr = mgr->delayed.next;
@@ -249,27 +249,35 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
buf = curr_buf;
else if(util_time_timeout(&curr_buf->start, &curr_buf->end, &now))
_pb_cache_buffer_destroy(curr_buf);
+ else
+ /* This buffer (and all hereafter) are still hot in cache */
+ break;
curr = next;
next = curr->next;
}
/* keep searching in the hot buffers */
- while(!buf && curr != &mgr->delayed) {
- curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
- if(pb_cache_is_buffer_compat(curr_buf, size, desc))
- buf = curr_buf;
- curr = next;
- next = curr->next;
+ if(!buf) {
+ while(curr != &mgr->delayed) {
+ curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ if(pb_cache_is_buffer_compat(curr_buf, size, desc)) {
+ buf = curr_buf;
+ break;
+ }
+ /* no need to check the timeout here */
+ curr = next;
+ next = curr->next;
+ }
}
if(buf) {
LIST_DEL(&buf->head);
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
++buf->base.base.refcount;
return &buf->base;
}
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
buf = CALLOC_STRUCT(pb_cache_buffer);
if(!buf)
@@ -305,7 +313,7 @@ pb_cache_flush(struct pb_manager *_mgr)
struct list_head *curr, *next;
struct pb_cache_buffer *buf;
- _glthread_LOCK_MUTEX(mgr->mutex);
+ pipe_mutex_lock(mgr->mutex);
curr = mgr->delayed.next;
next = curr->next;
while(curr != &mgr->delayed) {
@@ -314,7 +322,7 @@ pb_cache_flush(struct pb_manager *_mgr)
curr = next;
next = curr->next;
}
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
}
@@ -345,7 +353,7 @@ pb_cache_manager_create(struct pb_manager *provider,
mgr->usecs = usecs;
LIST_INITHEAD(&mgr->delayed);
mgr->numDelayed = 0;
- _glthread_INIT_MUTEX(mgr->mutex);
+ pipe_mutex_init(mgr->mutex);
return &mgr->base;
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
index 4cb8c3bb55..5f1ed3e5a8 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
@@ -129,7 +129,7 @@ check_random_pattern(const uint8_t *dst, size_t size,
for(i = 0; i < size; ++i) {
if(*dst++ != random_pattern[i % sizeof(random_pattern)]) {
*min_ofs = MIN2(*min_ofs, i);
- *max_ofs = MIN2(*max_ofs, i);
+ *max_ofs = MAX2(*max_ofs, i);
result = FALSE;
}
}
@@ -138,12 +138,30 @@ check_random_pattern(const uint8_t *dst, size_t size,
static void
-pb_debug_buffer_destroy(struct pb_buffer *_buf)
+pb_debug_buffer_fill(struct pb_debug_buffer *buf)
{
- struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
uint8_t *map;
- assert(!buf->base.base.refcount);
+ map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
+ assert(map);
+ if(map) {
+ fill_random_pattern(map, buf->underflow_size);
+ fill_random_pattern(map + buf->underflow_size + buf->base.base.size,
+ buf->overflow_size);
+ pb_unmap(buf->buffer);
+ }
+}
+
+
+/**
+ * Check for under/over flows.
+ *
+ * Should be called with the buffer unmaped.
+ */
+static void
+pb_debug_buffer_check(struct pb_debug_buffer *buf)
+{
+ uint8_t *map;
map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
assert(map);
@@ -154,24 +172,45 @@ pb_debug_buffer_destroy(struct pb_buffer *_buf)
underflow = !check_random_pattern(map, buf->underflow_size,
&min_ofs, &max_ofs);
if(underflow) {
- debug_printf("buffer underflow (%u of %u bytes) detected\n",
- buf->underflow_size - min_ofs,
- buf->underflow_size);
+ debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
+ buf->underflow_size - min_ofs,
+ min_ofs == 0 ? "+" : "",
+ buf->underflow_size - max_ofs);
}
overflow = !check_random_pattern(map + buf->underflow_size + buf->base.base.size,
buf->overflow_size,
&min_ofs, &max_ofs);
if(overflow) {
- debug_printf("buffer overflow (%u of %u bytes) detected\n",
+ debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
+ buf->base.base.size,
+ min_ofs,
max_ofs,
- buf->overflow_size);
+ max_ofs == buf->overflow_size - 1 ? "+" : "");
}
debug_assert(!underflow && !overflow);
-
+
+ /* re-fill if not aborted */
+ if(underflow)
+ fill_random_pattern(map, buf->underflow_size);
+ if(overflow)
+ fill_random_pattern(map + buf->underflow_size + buf->base.base.size,
+ buf->overflow_size);
+
pb_unmap(buf->buffer);
}
+}
+
+
+static void
+pb_debug_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+
+ assert(!buf->base.base.refcount);
+
+ pb_debug_buffer_check(buf);
pb_reference(&buf->buffer, NULL);
FREE(buf);
@@ -183,9 +222,14 @@ pb_debug_buffer_map(struct pb_buffer *_buf,
unsigned flags)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
- void *map = pb_map(buf->buffer, flags);
+ void *map;
+
+ pb_debug_buffer_check(buf);
+
+ map = pb_map(buf->buffer, flags);
if(!map)
return NULL;
+
return (uint8_t *)map + buf->underflow_size;
}
@@ -195,6 +239,8 @@ pb_debug_buffer_unmap(struct pb_buffer *_buf)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
pb_unmap(buf->buffer);
+
+ pb_debug_buffer_check(buf);
}
@@ -227,7 +273,6 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
struct pb_debug_buffer *buf;
struct pb_desc real_desc;
size_t real_size;
- uint8_t *map;
buf = CALLOC_STRUCT(pb_debug_buffer);
if(!buf)
@@ -262,13 +307,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
buf->underflow_size = mgr->band_size;
buf->overflow_size = buf->buffer->base.size - buf->underflow_size - size;
- map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
- assert(map);
- if(map) {
- fill_random_pattern(map, buf->underflow_size);
- fill_random_pattern(map + buf->underflow_size + size, buf->overflow_size);
- pb_unmap(buf->buffer);
- }
+ pb_debug_buffer_fill(buf);
return &buf->base;
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
index b40eb6cc90..e8c7f8e1f8 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
@@ -53,7 +53,7 @@ struct mm_pb_manager
{
struct pb_manager base;
- _glthread_Mutex mutex;
+ pipe_mutex mutex;
size_t size;
struct mem_block *heap;
@@ -99,10 +99,10 @@ mm_buffer_destroy(struct pb_buffer *buf)
assert(buf->base.refcount == 0);
- _glthread_LOCK_MUTEX(mm->mutex);
+ pipe_mutex_lock(mm->mutex);
mmFreeMem(mm_buf->block);
FREE(buf);
- _glthread_UNLOCK_MUTEX(mm->mutex);
+ pipe_mutex_unlock(mm->mutex);
}
@@ -158,11 +158,11 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
if(desc->alignment % (1 << mm->align2))
return NULL;
- _glthread_LOCK_MUTEX(mm->mutex);
+ pipe_mutex_lock(mm->mutex);
mm_buf = CALLOC_STRUCT(mm_buffer);
if (!mm_buf) {
- _glthread_UNLOCK_MUTEX(mm->mutex);
+ pipe_mutex_unlock(mm->mutex);
return NULL;
}
@@ -185,7 +185,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
mm_buf->block = mmAllocMem(mm->heap, size, mm->align2, 0);
if(!mm_buf->block) {
FREE(mm_buf);
- _glthread_UNLOCK_MUTEX(mm->mutex);
+ pipe_mutex_unlock(mm->mutex);
return NULL;
}
}
@@ -194,7 +194,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
assert(0 <= (unsigned)mm_buf->block->ofs && (unsigned)mm_buf->block->ofs < mm->size);
assert(size <= (unsigned)mm_buf->block->size && (unsigned)mm_buf->block->ofs + (unsigned)mm_buf->block->size <= mm->size);
- _glthread_UNLOCK_MUTEX(mm->mutex);
+ pipe_mutex_unlock(mm->mutex);
return SUPER(mm_buf);
}
@@ -204,14 +204,14 @@ mm_bufmgr_destroy(struct pb_manager *mgr)
{
struct mm_pb_manager *mm = mm_pb_manager(mgr);
- _glthread_LOCK_MUTEX(mm->mutex);
+ pipe_mutex_lock(mm->mutex);
mmDestroy(mm->heap);
pb_unmap(mm->buffer);
pb_reference(&mm->buffer, NULL);
- _glthread_UNLOCK_MUTEX(mm->mutex);
+ pipe_mutex_unlock(mm->mutex);
FREE(mgr);
}
@@ -236,7 +236,7 @@ mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
mm->size = size;
mm->align2 = align2; /* 64-byte alignment */
- _glthread_INIT_MUTEX(mm->mutex);
+ pipe_mutex_init(mm->mutex);
mm->buffer = buffer;
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
index 93d2cc9635..3ef72c5bbb 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
@@ -56,7 +56,7 @@ struct pool_pb_manager
{
struct pb_manager base;
- _glthread_Mutex mutex;
+ pipe_mutex mutex;
size_t bufSize;
size_t bufAlign;
@@ -110,10 +110,10 @@ pool_buffer_destroy(struct pb_buffer *buf)
assert(pool_buf->base.base.refcount == 0);
- _glthread_LOCK_MUTEX(pool->mutex);
+ pipe_mutex_lock(pool->mutex);
LIST_ADD(&pool_buf->head, &pool->free);
pool->numFree++;
- _glthread_UNLOCK_MUTEX(pool->mutex);
+ pipe_mutex_unlock(pool->mutex);
}
@@ -124,9 +124,9 @@ pool_buffer_map(struct pb_buffer *buf, unsigned flags)
struct pool_pb_manager *pool = pool_buf->mgr;
void *map;
- _glthread_LOCK_MUTEX(pool->mutex);
+ pipe_mutex_lock(pool->mutex);
map = (unsigned char *) pool->map + pool_buf->start;
- _glthread_UNLOCK_MUTEX(pool->mutex);
+ pipe_mutex_unlock(pool->mutex);
return map;
}
@@ -171,10 +171,10 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
assert(size == pool->bufSize);
assert(pool->bufAlign % desc->alignment == 0);
- _glthread_LOCK_MUTEX(pool->mutex);
+ pipe_mutex_lock(pool->mutex);
if (pool->numFree == 0) {
- _glthread_UNLOCK_MUTEX(pool->mutex);
+ pipe_mutex_unlock(pool->mutex);
debug_printf("warning: out of fixed size buffer objects\n");
return NULL;
}
@@ -182,7 +182,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
item = pool->free.next;
if (item == &pool->free) {
- _glthread_UNLOCK_MUTEX(pool->mutex);
+ pipe_mutex_unlock(pool->mutex);
debug_printf("error: fixed size buffer pool corruption\n");
return NULL;
}
@@ -190,7 +190,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
LIST_DEL(item);
--pool->numFree;
- _glthread_UNLOCK_MUTEX(pool->mutex);
+ pipe_mutex_unlock(pool->mutex);
pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
assert(pool_buf->base.base.refcount == 0);
@@ -206,14 +206,14 @@ static void
pool_bufmgr_destroy(struct pb_manager *mgr)
{
struct pool_pb_manager *pool = pool_pb_manager(mgr);
- _glthread_LOCK_MUTEX(pool->mutex);
+ pipe_mutex_lock(pool->mutex);
FREE(pool->bufs);
pb_unmap(pool->buffer);
pb_reference(&pool->buffer, NULL);
- _glthread_UNLOCK_MUTEX(pool->mutex);
+ pipe_mutex_unlock(pool->mutex);
FREE(mgr);
}
@@ -246,7 +246,7 @@ pool_bufmgr_create(struct pb_manager *provider,
pool->bufSize = bufSize;
pool->bufAlign = desc->alignment;
- _glthread_INIT_MUTEX(pool->mutex);
+ pipe_mutex_init(pool->mutex);
pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc);
if (!pool->buffer)
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
index af307e265a..8698c4cff6 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
@@ -30,6 +30,8 @@
* @file
* S-lab pool implementation.
*
+ * @sa http://en.wikipedia.org/wiki/Slab_allocation
+ *
* @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* @author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
@@ -49,46 +51,96 @@
struct pb_slab;
+
+/**
+ * Buffer in a slab.
+ *
+ * Sub-allocation of a contiguous buffer.
+ */
struct pb_slab_buffer
{
struct pb_buffer base;
struct pb_slab *slab;
+
struct list_head head;
+
unsigned mapCount;
+
+ /** Offset relative to the start of the slab buffer. */
size_t start;
- _glthread_Cond event;
+
+ /** Use when validating, to signal that all mappings are finished */
+ /* TODO: Actually validation does not reach this stage yet */
+ pipe_condvar event;
};
+
+/**
+ * Slab -- a contiguous piece of memory.
+ */
struct pb_slab
{
struct list_head head;
struct list_head freeBuffers;
size_t numBuffers;
size_t numFree;
+
struct pb_slab_buffer *buffers;
struct pb_slab_manager *mgr;
+ /** Buffer from the provider */
struct pb_buffer *bo;
+
void *virtual;
};
+
+/**
+ * It adds/removes slabs as needed in order to meet the allocation/destruction
+ * of individual buffers.
+ */
struct pb_slab_manager
{
struct pb_manager base;
+ /** From where we get our buffers */
struct pb_manager *provider;
+
+ /** Size of the buffers we hand on downstream */
size_t bufSize;
+
+ /** Size of the buffers we request upstream */
size_t slabSize;
+
+ /**
+ * Alignment, usage to be used to allocate the slab buffers.
+ *
+ * We can only provide buffers which are consistent (in alignment, usage)
+ * with this description.
+ */
struct pb_desc desc;
+ /**
+ * Partial slabs
+ *
+ * Full slabs are not stored in any list. Empty slabs are destroyed
+ * immediatly.
+ */
struct list_head slabs;
- struct list_head freeSlabs;
- _glthread_Mutex mutex;
+ pipe_mutex mutex;
};
+
/**
+ * Wrapper around several slabs, therefore capable of handling buffers of
+ * multiple sizes.
+ *
+ * This buffer manager just dispatches buffer allocations to the appropriate slab
+ * manager, according to the requested buffer size, or by passes the slab
+ * managers altogether for even greater sizes.
+ *
* The data of this structure remains constant after
* initialization and thus needs no mutex protection.
*/
@@ -97,12 +149,17 @@ struct pb_slab_range_manager
struct pb_manager base;
struct pb_manager *provider;
+
size_t minBufSize;
size_t maxBufSize;
+
+ /** @sa pb_slab_manager::desc */
struct pb_desc desc;
unsigned numBuckets;
size_t *bucketSizes;
+
+ /** Array of pb_slab_manager, one for each bucket size */
struct pb_manager **buckets;
};
@@ -143,7 +200,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
struct pb_slab_manager *mgr = slab->mgr;
struct list_head *list = &buf->head;
- _glthread_LOCK_MUTEX(mgr->mutex);
+ pipe_mutex_lock(mgr->mutex);
assert(buf->base.base.refcount == 0);
@@ -156,30 +213,16 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
if (slab->head.next == &slab->head)
LIST_ADDTAIL(&slab->head, &mgr->slabs);
+ /* If the slab becomes totally empty, free it */
if (slab->numFree == slab->numBuffers) {
list = &slab->head;
- LIST_DEL(list);
- LIST_ADDTAIL(list, &mgr->freeSlabs);
+ LIST_DELINIT(list);
+ pb_reference(&slab->bo, NULL);
+ FREE(slab->buffers);
+ FREE(slab);
}
- if (mgr->slabs.next == &mgr->slabs || slab->numFree
- != slab->numBuffers) {
-
- struct list_head *next;
-
- for (list = mgr->freeSlabs.next, next = list->next; list
- != &mgr->freeSlabs; list = next, next = list->next) {
-
- slab = LIST_ENTRY(struct pb_slab, list, head);
-
- LIST_DELINIT(list);
- pb_reference(&slab->bo, NULL);
- FREE(slab->buffers);
- FREE(slab);
- }
- }
-
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
}
@@ -201,7 +244,7 @@ pb_slab_buffer_unmap(struct pb_buffer *_buf)
--buf->mapCount;
if (buf->mapCount == 0)
- _glthread_COND_BROADCAST(buf->event);
+ pipe_condvar_broadcast(buf->event);
}
@@ -225,6 +268,11 @@ pb_slab_buffer_vtbl = {
};
+/**
+ * Create a new slab.
+ *
+ * Called when we ran out of free slabs.
+ */
static enum pipe_error
pb_slab_create(struct pb_slab_manager *mgr)
{
@@ -238,17 +286,14 @@ pb_slab_create(struct pb_slab_manager *mgr)
if (!slab)
return PIPE_ERROR_OUT_OF_MEMORY;
- /*
- * FIXME: We should perhaps allow some variation in slabsize in order
- * to efficiently reuse slabs.
- */
-
slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
if(!slab->bo) {
ret = PIPE_ERROR_OUT_OF_MEMORY;
goto out_err0;
}
+ /* Note down the slab virtual address. All mappings are accessed directly
+ * through this address so it is required that the buffer is pinned. */
slab->virtual = pb_map(slab->bo,
PIPE_BUFFER_USAGE_CPU_READ |
PIPE_BUFFER_USAGE_CPU_WRITE);
@@ -256,7 +301,6 @@ pb_slab_create(struct pb_slab_manager *mgr)
ret = PIPE_ERROR_OUT_OF_MEMORY;
goto out_err1;
}
-
pb_unmap(slab->bo);
numBuffers = slab->bo->base.size / mgr->bufSize;
@@ -283,12 +327,13 @@ pb_slab_create(struct pb_slab_manager *mgr)
buf->slab = slab;
buf->start = i* mgr->bufSize;
buf->mapCount = 0;
- _glthread_INIT_COND(buf->event);
+ pipe_condvar_init(buf->event);
LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
}
+ /* Add this slab to the list of partial slabs */
LIST_ADDTAIL(&slab->head, &mgr->slabs);
return PIPE_OK;
@@ -328,23 +373,29 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
if(!pb_check_usage(desc->usage, mgr->desc.usage))
return NULL;
- _glthread_LOCK_MUTEX(mgr->mutex);
+ pipe_mutex_lock(mgr->mutex);
+
+ /* Create a new slab, if we run out of partial slabs */
if (mgr->slabs.next == &mgr->slabs) {
(void) pb_slab_create(mgr);
if (mgr->slabs.next == &mgr->slabs) {
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
return NULL;
}
}
+
+ /* Allocate the buffer from a partial (or just created) slab */
list = mgr->slabs.next;
slab = LIST_ENTRY(struct pb_slab, list, head);
+
+ /* If totally full remove from the partial slab list */
if (--slab->numFree == 0)
LIST_DELINIT(list);
list = slab->freeBuffers.next;
LIST_DELINIT(list);
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
++buf->base.base.refcount;
@@ -386,9 +437,8 @@ pb_slab_manager_create(struct pb_manager *provider,
mgr->desc = *desc;
LIST_INITHEAD(&mgr->slabs);
- LIST_INITHEAD(&mgr->freeSlabs);
- _glthread_INIT_MUTEX(mgr->mutex);
+ pipe_mutex_init(mgr->mutex);
return &mgr->base;
}