summaryrefslogtreecommitdiff
path: root/src/gallium/auxiliary/pipebuffer
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/auxiliary/pipebuffer')
-rw-r--r--src/gallium/auxiliary/pipebuffer/Makefile2
-rw-r--r--src/gallium/auxiliary/pipebuffer/SConscript2
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer.h11
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c136
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr.h38
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c344
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c8
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c495
8 files changed, 1017 insertions, 19 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/Makefile b/src/gallium/auxiliary/pipebuffer/Makefile
index a9fa518c67..d654dbcc91 100644
--- a/src/gallium/auxiliary/pipebuffer/Makefile
+++ b/src/gallium/auxiliary/pipebuffer/Makefile
@@ -6,9 +6,11 @@ LIBNAME = pipebuffer
C_SOURCES = \
pb_buffer_fenced.c \
pb_buffer_malloc.c \
+ pb_bufmgr_cache.c \
pb_bufmgr_fenced.c \
pb_bufmgr_mm.c \
pb_bufmgr_pool.c \
+ pb_bufmgr_slab.c \
pb_winsys.c
include ../../Makefile.template
diff --git a/src/gallium/auxiliary/pipebuffer/SConscript b/src/gallium/auxiliary/pipebuffer/SConscript
index 3d41fd84a6..604a217982 100644
--- a/src/gallium/auxiliary/pipebuffer/SConscript
+++ b/src/gallium/auxiliary/pipebuffer/SConscript
@@ -5,9 +5,11 @@ pipebuffer = env.ConvenienceLibrary(
source = [
'pb_buffer_fenced.c',
'pb_buffer_malloc.c',
+ 'pb_bufmgr_cache.c',
'pb_bufmgr_fenced.c',
'pb_bufmgr_mm.c',
'pb_bufmgr_pool.c',
+ 'pb_bufmgr_slab.c',
'pb_winsys.c',
])
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer.h b/src/gallium/auxiliary/pipebuffer/pb_buffer.h
index 4b09c80b2a..49705cb862 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_buffer.h
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer.h
@@ -193,6 +193,17 @@ pb_reference(struct pb_buffer **dst,
/**
+ * Utility function to check whether a requested alignment is consistent with
+ * the provided alignment or not.
+ */
+static INLINE int
+pb_check_alignment(size_t requested, size_t provided)
+{
+ return requested <= provided && (provided % requested) == 0;
+}
+
+
+/**
* Malloc-based buffer to store data that can't be used by the graphics
* hardware.
*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
index 24ba61a0b7..2fa0842971 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -34,7 +34,14 @@
*/
+#include "pipe/p_config.h"
+
+#if defined(PIPE_OS_LINUX)
+#include <unistd.h>
+#endif
+
#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
#include "pipe/p_debug.h"
#include "pipe/p_winsys.h"
#include "pipe/p_thread.h"
@@ -44,9 +51,6 @@
#include "pb_buffer.h"
#include "pb_buffer_fenced.h"
-#ifndef WIN32
-#include <unistd.h>
-#endif
/**
@@ -54,6 +58,13 @@
*/
#define SUPER(__derived) (&(__derived)->base)
+#define PIPE_BUFFER_USAGE_CPU_READ_WRITE \
+ ( PIPE_BUFFER_USAGE_CPU_READ | PIPE_BUFFER_USAGE_CPU_WRITE )
+#define PIPE_BUFFER_USAGE_GPU_READ_WRITE \
+ ( PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE )
+#define PIPE_BUFFER_USAGE_WRITE \
+ ( PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_GPU_WRITE )
+
struct fenced_buffer_list
{
@@ -76,6 +87,15 @@ struct fenced_buffer
struct pb_buffer *buffer;
+ /* FIXME: protect access with mutex */
+
+ /**
+ * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
+ * buffer usage.
+ */
+ unsigned flags;
+
+ unsigned mapcount;
struct pipe_fence_handle *fence;
struct list_head head;
@@ -97,7 +117,10 @@ _fenced_buffer_add(struct fenced_buffer *fenced_buf)
{
struct fenced_buffer_list *fenced_list = fenced_buf->list;
+ assert(fenced_buf->base.base.refcount);
+ assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
assert(fenced_buf->fence);
+
assert(!fenced_buf->head.prev);
assert(!fenced_buf->head.next);
LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
@@ -111,8 +134,6 @@ _fenced_buffer_add(struct fenced_buffer *fenced_buf)
static INLINE void
_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
{
- struct fenced_buffer_list *fenced_list = fenced_buf->list;
-
assert(!fenced_buf->base.base.refcount);
assert(!fenced_buf->fence);
pb_reference(&fenced_buf->buffer, NULL);
@@ -128,8 +149,8 @@ _fenced_buffer_remove(struct fenced_buffer *fenced_buf)
assert(fenced_buf->fence);
- assert(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0);
winsys->fence_reference(winsys, &fenced_buf->fence, NULL);
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
assert(fenced_buf->head.prev);
assert(fenced_buf->head.next);
@@ -174,6 +195,9 @@ _fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
break;
prev_fence = fenced_buf->fence;
}
+ else {
+ assert(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0);
+ }
_fenced_buffer_remove(fenced_buf);
@@ -183,6 +207,40 @@ _fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
}
+/**
+ * Serialize writes, but allow concurrent reads.
+ */
+static INLINE enum pipe_error
+fenced_buffer_serialize(struct fenced_buffer *fenced_buf, unsigned flags)
+{
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+ struct pipe_winsys *winsys = fenced_list->winsys;
+
+ /* Allow concurrent reads */
+ if(((fenced_buf->flags | flags) & PIPE_BUFFER_USAGE_WRITE) == 0)
+ return PIPE_OK;
+
+ /* Wait for the CPU to finish */
+ if(fenced_buf->mapcount) {
+ /* FIXME: Use thread conditions variables to signal when mapcount
+ * reaches zero */
+ debug_warning("attemp to write concurrently to buffer");
+ /* XXX: we must not fail here in order to support texture mipmap generation
+ return PIPE_ERROR_RETRY;
+ */
+ }
+
+ /* Wait for the GPU to finish */
+ if(fenced_buf->fence) {
+ if(winsys->fence_finish(winsys, fenced_buf->fence, 0) != 0)
+ return PIPE_ERROR_RETRY;
+ _fenced_buffer_remove(fenced_buf);
+ }
+
+ return PIPE_OK;
+}
+
+
static void
fenced_buffer_destroy(struct pb_buffer *buf)
{
@@ -199,6 +257,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
prev = curr->prev;
do {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ assert(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0);
_fenced_buffer_remove(fenced_buf);
curr = prev;
prev = curr->prev;
@@ -219,16 +278,30 @@ static void *
fenced_buffer_map(struct pb_buffer *buf,
unsigned flags)
{
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- return pb_map(fenced_buf->buffer, flags);
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ void *map;
+ assert((flags & ~PIPE_BUFFER_USAGE_CPU_READ_WRITE) == 0);
+
+ if(fenced_buffer_serialize(fenced_buf, flags) != PIPE_OK)
+ return NULL;
+
+ map = pb_map(fenced_buf->buffer, flags);
+ if(map)
+ ++fenced_buf->mapcount;
+ fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ return map;
}
static void
fenced_buffer_unmap(struct pb_buffer *buf)
{
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ assert(fenced_buf->mapcount);
pb_unmap(fenced_buf->buffer);
+ --fenced_buf->mapcount;
+ if(!fenced_buf->mapcount)
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
}
@@ -281,16 +354,49 @@ void
buffer_fence(struct pb_buffer *buf,
struct pipe_fence_handle *fence)
{
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- struct fenced_buffer_list *fenced_list = fenced_buf->list;
- struct pipe_winsys *winsys = fenced_list->winsys;
+ struct fenced_buffer *fenced_buf;
+ struct fenced_buffer_list *fenced_list;
+ struct pipe_winsys *winsys;
+ /* FIXME: receive this as a parameter */
+ unsigned flags = fence ? PIPE_BUFFER_USAGE_GPU_READ_WRITE : 0;
+
+ /* This is a public function, so be extra cautious with the buffer passed,
+ * as happens frequently to receive null buffers, or pointer to buffers
+ * other than fenced buffers. */
+ assert(buf);
+ if(!buf)
+ return;
+ assert(buf->vtbl == &fenced_buffer_vtbl);
+ if(buf->vtbl != &fenced_buffer_vtbl)
+ return;
+
+ fenced_buf = fenced_buffer(buf);
+ fenced_list = fenced_buf->list;
+ winsys = fenced_list->winsys;
+
+ if(fence == fenced_buf->fence) {
+ /* Handle the same fence case specially, not only because it is a fast
+ * path, but mostly to avoid serializing two writes with the same fence,
+ * as that would bring the hardware down to synchronous operation without
+ * any benefit.
+ */
+ fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+ return;
+ }
+
+ if(fenced_buffer_serialize(fenced_buf, flags) != PIPE_OK) {
+ /* FIXME: propagate error */
+ (void)0;
+ }
_glthread_LOCK_MUTEX(fenced_list->mutex);
if (fenced_buf->fence)
_fenced_buffer_remove(fenced_buf);
- winsys->fence_reference(winsys, &fenced_buf->fence, fence);
- if (fenced_buf->fence)
+ if (fence) {
+ winsys->fence_reference(winsys, &fenced_buf->fence, fence);
+ fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE;
_fenced_buffer_add(fenced_buf);
+ }
_glthread_UNLOCK_MUTEX(fenced_list->mutex);
}
@@ -334,7 +440,7 @@ fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
/* Wait on outstanding fences */
while (fenced_list->numDelayed) {
_glthread_UNLOCK_MUTEX(fenced_list->mutex);
-#ifndef WIN32
+#if defined(PIPE_OS_LINUX)
sched_yield();
#endif
_fenced_buffer_list_check_free(fenced_list, 1);
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
index 0cf8e92e37..8de286e3f9 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
@@ -80,7 +80,7 @@ struct pb_manager
/**
- * Static buffer pool manager.
+ * Static buffer pool sub-allocator.
*
* Manages the allocation of equally sized buffers. It does so by allocating
* a single big buffer and divide it equally sized buffers.
@@ -94,7 +94,7 @@ pool_bufmgr_create(struct pb_manager *provider,
/**
- * Wraper around the old memory manager.
+ * Static sub-allocator based the old memory manager.
*
* It managers buffers of different sizes. It does so by allocating a buffer
* with the size of the heap, and then using the old mm memory manager to manage
@@ -114,6 +114,40 @@ mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
size_t size, size_t align2);
+/**
+ * Slab sub-allocator.
+ */
+struct pb_manager *
+pb_slab_manager_create(struct pb_manager *provider,
+ size_t bufSize,
+ size_t slabSize,
+ const struct pb_desc *desc);
+
+/**
+ * Allow a range of buffer size, by aggregating multiple slabs sub-allocators
+ * with different bucket sizes.
+ */
+struct pb_manager *
+pb_slab_range_manager_create(struct pb_manager *provider,
+ size_t minBufSize,
+ size_t maxBufSize,
+ size_t slabSize,
+ const struct pb_desc *desc);
+
+
+/**
+ * Time-based buffer cache.
+ *
+ * This manager keeps a cache of destroyed buffers during a time interval.
+ */
+struct pb_manager *
+pb_cache_manager_create(struct pb_manager *provider,
+ unsigned usecs);
+
+void
+pb_cache_flush(struct pb_manager *mgr);
+
+
/**
* Fenced buffer manager.
*
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
new file mode 100644
index 0000000000..4bd3f94a6c
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
@@ -0,0 +1,344 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer cache.
+ *
+ * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_winsys.h"
+#include "pipe/p_thread.h"
+#include "pipe/p_util.h"
+#include "util/u_double_list.h"
+#include "util/u_time.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct pb_cache_manager;
+
+
+/**
+ * Wrapper around a pipe buffer which adds delayed destruction.
+ */
+struct pb_cache_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_buffer *buffer;
+ struct pb_cache_manager *mgr;
+
+ /** Caching time interval */
+ struct util_time start, end;
+
+ struct list_head head;
+};
+
+
+struct pb_cache_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+ unsigned usecs;
+
+ _glthread_Mutex mutex;
+
+ struct list_head delayed;
+ size_t numDelayed;
+};
+
+
+static INLINE struct pb_cache_buffer *
+pb_cache_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pb_cache_buffer *)buf;
+}
+
+
+static INLINE struct pb_cache_manager *
+pb_cache_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_cache_manager *)mgr;
+}
+
+
+/**
+ * Actually destroy the buffer.
+ */
+static INLINE void
+_pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
+{
+ struct pb_cache_manager *mgr = buf->mgr;
+
+ LIST_DEL(&buf->head);
+ assert(mgr->numDelayed);
+ --mgr->numDelayed;
+ assert(!buf->base.base.refcount);
+ pb_reference(&buf->buffer, NULL);
+ FREE(buf);
+}
+
+
+/**
+ * Free as many cache buffers from the list head as possible.
+ */
+static void
+_pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr)
+{
+ struct list_head *curr, *next;
+ struct pb_cache_buffer *buf;
+ struct util_time now;
+
+ util_time_get(&now);
+
+ curr = mgr->delayed.next;
+ next = curr->next;
+ while(curr != &mgr->delayed) {
+ buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+
+ if(!util_time_timeout(&buf->start, &buf->end, &now))
+ break;
+
+ _pb_cache_buffer_destroy(buf);
+
+ curr = next;
+ next = curr->next;
+ }
+}
+
+
+static void
+pb_cache_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ struct pb_cache_manager *mgr = buf->mgr;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ assert(buf->base.base.refcount == 0);
+
+ _pb_cache_buffer_list_check_free(mgr);
+
+ util_time_get(&buf->start);
+ util_time_add(&buf->start, mgr->usecs, &buf->end);
+ LIST_ADDTAIL(&buf->head, &mgr->delayed);
+ ++mgr->numDelayed;
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+}
+
+
+static void *
+pb_cache_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ return pb_map(buf->buffer, flags);
+}
+
+
+static void
+pb_cache_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ pb_unmap(buf->buffer);
+}
+
+
+static void
+pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ pb_get_base_buffer(buf->buffer, base_buf, offset);
+}
+
+
+const struct pb_vtbl
+pb_cache_buffer_vtbl = {
+ pb_cache_buffer_destroy,
+ pb_cache_buffer_map,
+ pb_cache_buffer_unmap,
+ pb_cache_buffer_get_base_buffer
+};
+
+
+static INLINE boolean
+pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ /* TODO: be more lenient with size */
+ if(buf->base.base.size != size)
+ return FALSE;
+
+ if(!pb_check_alignment(desc->alignment, buf->base.base.alignment))
+ return FALSE;
+
+ /* XXX: check usage too? */
+
+ return TRUE;
+}
+
+
+static struct pb_buffer *
+pb_cache_manager_create_buffer(struct pb_manager *_mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
+ struct pb_cache_buffer *buf;
+ struct pb_cache_buffer *curr_buf;
+ struct list_head *curr, *next;
+ struct util_time now;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+
+ buf = NULL;
+ curr = mgr->delayed.next;
+ next = curr->next;
+
+ /* search in the expired buffers, freeing them in the process */
+ util_time_get(&now);
+ while(curr != &mgr->delayed) {
+ curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc))
+ buf = curr_buf;
+ else if(util_time_timeout(&curr_buf->start, &curr_buf->end, &now))
+ _pb_cache_buffer_destroy(curr_buf);
+ curr = next;
+ next = curr->next;
+ }
+
+ /* keep searching in the hot buffers */
+ while(!buf && curr != &mgr->delayed) {
+ curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ if(pb_cache_is_buffer_compat(curr_buf, size, desc))
+ buf = curr_buf;
+ curr = next;
+ next = curr->next;
+ }
+
+ if(buf) {
+ LIST_DEL(&buf->head);
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ ++buf->base.base.refcount;
+ return &buf->base;
+ }
+
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+
+ buf = CALLOC_STRUCT(pb_cache_buffer);
+ if(!buf)
+ return NULL;
+
+ buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
+ if(!buf->buffer) {
+ FREE(buf);
+ return NULL;
+ }
+
+ assert(buf->buffer->base.refcount >= 1);
+ assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment));
+ assert((buf->buffer->base.usage & desc->usage) == desc->usage);
+ assert(buf->buffer->base.size >= size);
+
+ buf->base.base.refcount = 1;
+ buf->base.base.alignment = buf->buffer->base.alignment;
+ buf->base.base.usage = buf->buffer->base.usage;
+ buf->base.base.size = buf->buffer->base.size;
+
+ buf->base.vtbl = &pb_cache_buffer_vtbl;
+ buf->mgr = mgr;
+
+ return &buf->base;
+}
+
+
+void
+pb_cache_flush(struct pb_manager *_mgr)
+{
+ struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
+ struct list_head *curr, *next;
+ struct pb_cache_buffer *buf;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ curr = mgr->delayed.next;
+ next = curr->next;
+ while(curr != &mgr->delayed) {
+ buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ _pb_cache_buffer_destroy(buf);
+ curr = next;
+ next = curr->next;
+ }
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+}
+
+
+static void
+pb_cache_manager_destroy(struct pb_manager *mgr)
+{
+ pb_cache_flush(mgr);
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_cache_manager_create(struct pb_manager *provider,
+ unsigned usecs)
+{
+ struct pb_cache_manager *mgr;
+
+ mgr = (struct pb_cache_manager *)CALLOC(1, sizeof(*mgr));
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_cache_manager_destroy;
+ mgr->base.create_buffer = pb_cache_manager_create_buffer;
+ mgr->provider = provider;
+ mgr->usecs = usecs;
+ LIST_INITHEAD(&mgr->delayed);
+ mgr->numDelayed = 0;
+ _glthread_INIT_MUTEX(mgr->mutex);
+
+ return &mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c
index bffca5b244..9d809e2f9b 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c
@@ -30,7 +30,7 @@
* \file
* A buffer manager that wraps buffers in fenced buffers.
*
- * \author José Fonseca <jrfonseca@tungstengraphics.dot.com>
+ * \author José Fonseca <jrfonseca@tungstengraphics.dot.com>
*/
@@ -101,7 +101,8 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
fenced_buffer_list_destroy(fenced_mgr->fenced_list);
- fenced_mgr->provider->destroy(fenced_mgr->provider);
+ if(fenced_mgr->provider)
+ fenced_mgr->provider->destroy(fenced_mgr->provider);
FREE(fenced_mgr);
}
@@ -113,6 +114,9 @@ fenced_bufmgr_create(struct pb_manager *provider,
{
struct fenced_pb_manager *fenced_mgr;
+ if(!provider)
+ return NULL;
+
fenced_mgr = (struct fenced_pb_manager *)CALLOC(1, sizeof(*fenced_mgr));
if (!fenced_mgr)
return NULL;
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
new file mode 100644
index 0000000000..b931455056
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
@@ -0,0 +1,495 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, FREE of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * S-lab pool implementation.
+ *
+ * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_thread.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_util.h"
+#include "util/u_double_list.h"
+#include "util/u_time.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+#define DRI_SLABPOOL_ALLOC_RETRIES 100
+
+
+struct pb_slab;
+
+struct pb_slab_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_slab *slab;
+ struct list_head head;
+ unsigned mapCount;
+ size_t start;
+ _glthread_Cond event;
+};
+
+struct pb_slab
+{
+ struct list_head head;
+ struct list_head freeBuffers;
+ size_t numBuffers;
+ size_t numFree;
+ struct pb_slab_buffer *buffers;
+ struct pb_slab_manager *mgr;
+
+ struct pb_buffer *bo;
+ void *virtual;
+};
+
+struct pb_slab_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+ size_t bufSize;
+ size_t slabSize;
+ struct pb_desc desc;
+
+ struct list_head slabs;
+ struct list_head freeSlabs;
+
+ _glthread_Mutex mutex;
+};
+
+/**
+ * The data of this structure remains constant after
+ * initialization and thus needs no mutex protection.
+ */
+struct pb_slab_range_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+ size_t minBufSize;
+ size_t maxBufSize;
+ struct pb_desc desc;
+
+ unsigned numBuckets;
+ size_t *bucketSizes;
+ struct pb_manager **buckets;
+};
+
+
+static INLINE struct pb_slab_buffer *
+pb_slab_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pb_slab_buffer *)buf;
+}
+
+
+static INLINE struct pb_slab_manager *
+pb_slab_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_slab_manager *)mgr;
+}
+
+
+static INLINE struct pb_slab_range_manager *
+pb_slab_range_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_slab_range_manager *)mgr;
+}
+
+
+/**
+ * Delete a buffer from the slab delayed list and put
+ * it on the slab FREE list.
+ */
+static void
+pb_slab_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ struct pb_slab *slab = buf->slab;
+ struct pb_slab_manager *mgr = slab->mgr;
+ struct list_head *list = &buf->head;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+
+ assert(buf->base.base.refcount == 0);
+
+ buf->mapCount = 0;
+
+ LIST_DEL(list);
+ LIST_ADDTAIL(list, &slab->freeBuffers);
+ slab->numFree++;
+
+ if (slab->head.next == &slab->head)
+ LIST_ADDTAIL(&slab->head, &mgr->slabs);
+
+ if (slab->numFree == slab->numBuffers) {
+ list = &slab->head;
+ LIST_DEL(list);
+ LIST_ADDTAIL(list, &mgr->freeSlabs);
+ }
+
+ if (mgr->slabs.next == &mgr->slabs || slab->numFree
+ != slab->numBuffers) {
+
+ struct list_head *next;
+
+ for (list = mgr->freeSlabs.next, next = list->next; list
+ != &mgr->freeSlabs; list = next, next = list->next) {
+
+ slab = LIST_ENTRY(struct pb_slab, list, head);
+
+ LIST_DELINIT(list);
+ pb_reference(&slab->bo, NULL);
+ FREE(slab->buffers);
+ FREE(slab);
+ }
+ }
+
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+}
+
+
+static void *
+pb_slab_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+
+ ++buf->mapCount;
+ return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
+}
+
+
+static void
+pb_slab_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+
+ --buf->mapCount;
+ if (buf->mapCount == 0)
+ _glthread_COND_BROADCAST(buf->event);
+}
+
+
+static void
+pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ pb_get_base_buffer(buf->slab->bo, base_buf, offset);
+ *offset += buf->start;
+}
+
+
+static const struct pb_vtbl
+pb_slab_buffer_vtbl = {
+ pb_slab_buffer_destroy,
+ pb_slab_buffer_map,
+ pb_slab_buffer_unmap,
+ pb_slab_buffer_get_base_buffer
+};
+
+
+static enum pipe_error
+pb_slab_create(struct pb_slab_manager *mgr)
+{
+ struct pb_slab *slab;
+ struct pb_slab_buffer *buf;
+ unsigned numBuffers;
+ unsigned i;
+ enum pipe_error ret;
+
+ slab = CALLOC_STRUCT(pb_slab);
+ if (!slab)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ /*
+ * FIXME: We should perhaps allow some variation in slabsize in order
+ * to efficiently reuse slabs.
+ */
+
+ slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
+ if(!slab->bo) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out_err0;
+ }
+
+ slab->virtual = pb_map(slab->bo,
+ PIPE_BUFFER_USAGE_CPU_READ |
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(!slab->virtual) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out_err1;
+ }
+
+ pb_unmap(slab->bo);
+
+ numBuffers = slab->bo->base.size / mgr->bufSize;
+
+ slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
+ if (!slab->buffers) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out_err1;
+ }
+
+ LIST_INITHEAD(&slab->head);
+ LIST_INITHEAD(&slab->freeBuffers);
+ slab->numBuffers = numBuffers;
+ slab->numFree = 0;
+ slab->mgr = mgr;
+
+ buf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ buf->base.base.refcount = 0;
+ buf->base.base.size = mgr->bufSize;
+ buf->base.base.alignment = 0;
+ buf->base.base.usage = 0;
+ buf->base.vtbl = &pb_slab_buffer_vtbl;
+ buf->slab = slab;
+ buf->start = i* mgr->bufSize;
+ buf->mapCount = 0;
+ _glthread_INIT_COND(buf->event);
+ LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
+ slab->numFree++;
+ buf++;
+ }
+
+ LIST_ADDTAIL(&slab->head, &mgr->slabs);
+
+ return PIPE_OK;
+
+out_err1:
+ pb_reference(&slab->bo, NULL);
+out_err0:
+ FREE(slab);
+ return ret;
+}
+
+
+static struct pb_buffer *
+pb_slab_manager_create_buffer(struct pb_manager *_mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
+ static struct pb_slab_buffer *buf;
+ struct pb_slab *slab;
+ struct list_head *list;
+ int count = DRI_SLABPOOL_ALLOC_RETRIES;
+
+ /* check size */
+ assert(size == mgr->bufSize);
+ if(size != mgr->bufSize)
+ return NULL;
+
+ /* check if we can provide the requested alignment */
+ assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
+ if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
+ return NULL;
+ assert(pb_check_alignment(desc->alignment, mgr->bufSize));
+ if(!pb_check_alignment(desc->alignment, mgr->bufSize))
+ return NULL;
+
+ /* XXX: check for compatible buffer usage too? */
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ while (mgr->slabs.next == &mgr->slabs && count > 0) {
+ if (mgr->slabs.next != &mgr->slabs)
+ break;
+
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ if (count != DRI_SLABPOOL_ALLOC_RETRIES)
+ util_time_sleep(1);
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ (void) pb_slab_create(mgr);
+ count--;
+ }
+
+ list = mgr->slabs.next;
+ if (list == &mgr->slabs) {
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ return NULL;
+ }
+ slab = LIST_ENTRY(struct pb_slab, list, head);
+ if (--slab->numFree == 0)
+ LIST_DELINIT(list);
+
+ list = slab->freeBuffers.next;
+ LIST_DELINIT(list);
+
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
+
+ ++buf->base.base.refcount;
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+
+ return &buf->base;
+}
+
+
+static void
+pb_slab_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
+
+ /* TODO: cleanup all allocated buffers */
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_slab_manager_create(struct pb_manager *provider,
+ size_t bufSize,
+ size_t slabSize,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_manager *mgr;
+
+ mgr = CALLOC_STRUCT(pb_slab_manager);
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_slab_manager_destroy;
+ mgr->base.create_buffer = pb_slab_manager_create_buffer;
+
+ mgr->provider = provider;
+ mgr->bufSize = bufSize;
+ mgr->slabSize = slabSize;
+ mgr->desc = *desc;
+
+ LIST_INITHEAD(&mgr->slabs);
+ LIST_INITHEAD(&mgr->freeSlabs);
+
+ _glthread_INIT_MUTEX(mgr->mutex);
+
+ return &mgr->base;
+}
+
+
+static struct pb_buffer *
+pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
+ size_t bufSize;
+ unsigned i;
+
+ bufSize = mgr->minBufSize;
+ for (i = 0; i < mgr->numBuckets; ++i) {
+ if(bufSize >= size)
+ return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
+ bufSize *= 2;
+ }
+
+ /* Fall back to allocate a buffer object directly from the provider. */
+ return mgr->provider->create_buffer(mgr->provider, size, desc);
+}
+
+
+static void
+pb_slab_range_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
+ unsigned i;
+
+ for (i = 0; i < mgr->numBuckets; ++i)
+ mgr->buckets[i]->destroy(mgr->buckets[i]);
+ FREE(mgr->buckets);
+ FREE(mgr->bucketSizes);
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_slab_range_manager_create(struct pb_manager *provider,
+ size_t minBufSize,
+ size_t maxBufSize,
+ size_t slabSize,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_range_manager *mgr;
+ size_t bufSize;
+ unsigned i;
+
+ mgr = CALLOC_STRUCT(pb_slab_range_manager);
+ if (!mgr)
+ goto out_err0;
+
+ mgr->base.destroy = pb_slab_range_manager_destroy;
+ mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
+
+ mgr->provider = provider;
+ mgr->minBufSize = minBufSize;
+ mgr->maxBufSize = maxBufSize;
+
+ mgr->numBuckets = 1;
+ bufSize = minBufSize;
+ while(bufSize < maxBufSize) {
+ bufSize *= 2;
+ ++mgr->numBuckets;
+ }
+
+ mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
+ if (!mgr->buckets)
+ goto out_err1;
+
+ bufSize = minBufSize;
+ for (i = 0; i < mgr->numBuckets; ++i) {
+ mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
+ if(!mgr->buckets[i])
+ goto out_err2;
+ bufSize *= 2;
+ }
+
+ return &mgr->base;
+
+out_err2:
+ for (i = 0; i < mgr->numBuckets; ++i)
+ if(mgr->buckets[i])
+ mgr->buckets[i]->destroy(mgr->buckets[i]);
+ FREE(mgr->buckets);
+out_err1:
+ FREE(mgr);
+out_err0:
+ return NULL;
+}