summaryrefslogtreecommitdiff
path: root/src/gallium/auxiliary/pipebuffer
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/auxiliary/pipebuffer')
-rw-r--r--src/gallium/auxiliary/pipebuffer/Makefile22
-rw-r--r--src/gallium/auxiliary/pipebuffer/SConscript19
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer.h292
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c500
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h107
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c186
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr.h212
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c120
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c384
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c390
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c148
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c322
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c303
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c321
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c584
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_validate.c193
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_validate.h97
17 files changed, 4200 insertions, 0 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/Makefile b/src/gallium/auxiliary/pipebuffer/Makefile
new file mode 100644
index 0000000000..3b501c51df
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/Makefile
@@ -0,0 +1,22 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = pipebuffer
+
+C_SOURCES = \
+ pb_buffer_fenced.c \
+ pb_buffer_malloc.c \
+ pb_bufmgr_alt.c \
+ pb_bufmgr_cache.c \
+ pb_bufmgr_debug.c \
+ pb_bufmgr_fenced.c \
+ pb_bufmgr_mm.c \
+ pb_bufmgr_ondemand.c \
+ pb_bufmgr_pool.c \
+ pb_bufmgr_slab.c \
+ pb_validate.c
+
+include ../../Makefile.template
+
+symlinks:
+
diff --git a/src/gallium/auxiliary/pipebuffer/SConscript b/src/gallium/auxiliary/pipebuffer/SConscript
new file mode 100644
index 0000000000..8e9f06abe4
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/SConscript
@@ -0,0 +1,19 @@
+Import('*')
+
+pipebuffer = env.ConvenienceLibrary(
+ target = 'pipebuffer',
+ source = [
+ 'pb_buffer_fenced.c',
+ 'pb_buffer_malloc.c',
+ 'pb_bufmgr_alt.c',
+ 'pb_bufmgr_cache.c',
+ 'pb_bufmgr_debug.c',
+ 'pb_bufmgr_fenced.c',
+ 'pb_bufmgr_mm.c',
+ 'pb_bufmgr_ondemand.c',
+ 'pb_bufmgr_pool.c',
+ 'pb_bufmgr_slab.c',
+ 'pb_validate.c',
+ ])
+
+auxiliaries.insert(0, pipebuffer)
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer.h b/src/gallium/auxiliary/pipebuffer/pb_buffer.h
new file mode 100644
index 0000000000..d8f1f02d68
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer.h
@@ -0,0 +1,292 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Generic code for buffers.
+ *
+ * Behind a pipe buffle handle there can be DMA buffers, client (or user)
+ * buffers, regular malloced buffers, etc. This file provides an abstract base
+ * buffer handle that allows the driver to cope with all those kinds of buffers
+ * in a more flexible way.
+ *
+ * There is no obligation of a winsys driver to use this library. And a pipe
+ * driver should be completly agnostic about it.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_BUFFER_H_
+#define PB_BUFFER_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_error.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pb_vtbl;
+struct pb_validate;
+
+
+/**
+ * Buffer description.
+ *
+ * Used when allocating the buffer.
+ */
+struct pb_desc
+{
+ unsigned alignment;
+ unsigned usage;
+};
+
+
+/**
+ * Base class for all pb_* buffers.
+ */
+struct pb_buffer
+{
+ struct pipe_buffer base;
+
+ /**
+ * Pointer to the virtual function table.
+ *
+ * Avoid accessing this table directly. Use the inline functions below
+ * instead to avoid mistakes.
+ */
+ const struct pb_vtbl *vtbl;
+};
+
+
+/**
+ * Virtual function table for the buffer storage operations.
+ *
+ * Note that creation is not done through this table.
+ */
+struct pb_vtbl
+{
+ void (*destroy)( struct pb_buffer *buf );
+
+ /**
+ * Map the entire data store of a buffer object into the client's address.
+ * flags is bitmask of PIPE_BUFFER_FLAG_READ/WRITE.
+ */
+ void *(*map)( struct pb_buffer *buf,
+ unsigned flags );
+
+ void (*unmap)( struct pb_buffer *buf );
+
+ enum pipe_error (*validate)( struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags );
+
+ void (*fence)( struct pb_buffer *buf,
+ struct pipe_fence_handle *fence );
+
+ /**
+ * Get the base buffer and the offset.
+ *
+ * A buffer can be subdivided in smaller buffers. This method should return
+ * the underlaying buffer, and the relative offset.
+ *
+ * Buffers without an underlaying base buffer should return themselves, with
+ * a zero offset.
+ *
+ * Note that this will increase the reference count of the base buffer.
+ */
+ void (*get_base_buffer)( struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset );
+
+};
+
+
+static INLINE struct pipe_buffer *
+pb_pipe_buffer( struct pb_buffer *pbuf )
+{
+ assert(pbuf);
+ return &pbuf->base;
+}
+
+
+static INLINE struct pb_buffer *
+pb_buffer( struct pipe_buffer *buf )
+{
+ assert(buf);
+ /* Could add a magic cookie check on debug builds.
+ */
+ return (struct pb_buffer *)buf;
+}
+
+
+/* Accessor functions for pb->vtbl:
+ */
+static INLINE void *
+pb_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ assert(buf);
+ if(!buf)
+ return NULL;
+ assert(buf->base.refcount > 0);
+ return buf->vtbl->map(buf, flags);
+}
+
+
+static INLINE void
+pb_unmap(struct pb_buffer *buf)
+{
+ assert(buf);
+ if(!buf)
+ return;
+ assert(buf->base.refcount > 0);
+ buf->vtbl->unmap(buf);
+}
+
+
+static INLINE void
+pb_get_base_buffer( struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset )
+{
+ assert(buf);
+ if(!buf) {
+ base_buf = NULL;
+ offset = 0;
+ return;
+ }
+ assert(buf->base.refcount > 0);
+ assert(buf->vtbl->get_base_buffer);
+ buf->vtbl->get_base_buffer(buf, base_buf, offset);
+ assert(*base_buf);
+ assert(*offset < (*base_buf)->base.size);
+}
+
+
+static INLINE enum pipe_error
+pb_validate(struct pb_buffer *buf, struct pb_validate *vl, unsigned flags)
+{
+ assert(buf);
+ if(!buf)
+ return PIPE_ERROR;
+ assert(buf->vtbl->validate);
+ return buf->vtbl->validate(buf, vl, flags);
+}
+
+
+static INLINE void
+pb_fence(struct pb_buffer *buf, struct pipe_fence_handle *fence)
+{
+ assert(buf);
+ if(!buf)
+ return;
+ assert(buf->vtbl->fence);
+ buf->vtbl->fence(buf, fence);
+}
+
+
+static INLINE void
+pb_destroy(struct pb_buffer *buf)
+{
+ assert(buf);
+ if(!buf)
+ return;
+ assert(buf->base.refcount == 0);
+ buf->vtbl->destroy(buf);
+}
+
+
+/* XXX: thread safety issues!
+ */
+static INLINE void
+pb_reference(struct pb_buffer **dst,
+ struct pb_buffer *src)
+{
+ if (src) {
+ assert(src->base.refcount);
+ src->base.refcount++;
+ }
+
+ if (*dst) {
+ assert((*dst)->base.refcount);
+ if(--(*dst)->base.refcount == 0)
+ pb_destroy( *dst );
+ }
+
+ *dst = src;
+}
+
+
+/**
+ * Utility function to check whether the provided alignment is consistent with
+ * the requested or not.
+ */
+static INLINE boolean
+pb_check_alignment(size_t requested, size_t provided)
+{
+ if(!requested)
+ return TRUE;
+ if(requested > provided)
+ return FALSE;
+ if(provided % requested != 0)
+ return FALSE;
+ return TRUE;
+}
+
+
+/**
+ * Utility function to check whether the provided alignment is consistent with
+ * the requested or not.
+ */
+static INLINE boolean
+pb_check_usage(unsigned requested, unsigned provided)
+{
+ return (requested & provided) == requested ? TRUE : FALSE;
+}
+
+
+/**
+ * Malloc-based buffer to store data that can't be used by the graphics
+ * hardware.
+ */
+struct pb_buffer *
+pb_malloc_buffer_create(size_t size,
+ const struct pb_desc *desc);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*PB_BUFFER_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
new file mode 100644
index 0000000000..61afdfe82a
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -0,0 +1,500 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Implementation of fenced buffers.
+ *
+ * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_OS_LINUX)
+#include <unistd.h>
+#include <sched.h>
+#endif
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+#include "pipe/p_debug.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_thread.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+
+#include "pb_buffer.h"
+#include "pb_buffer_fenced.h"
+
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct fenced_buffer_list
+{
+ pipe_mutex mutex;
+
+ struct pipe_winsys *winsys;
+
+ size_t numDelayed;
+
+ struct list_head delayed;
+};
+
+
+/**
+ * Wrapper around a pipe buffer which adds fencing and reference counting.
+ */
+struct fenced_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_buffer *buffer;
+
+ /* FIXME: protect access with mutex */
+
+ /**
+ * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
+ * buffer usage.
+ */
+ unsigned flags;
+
+ unsigned mapcount;
+ struct pb_validate *vl;
+ unsigned validation_flags;
+ struct pipe_fence_handle *fence;
+
+ struct list_head head;
+ struct fenced_buffer_list *list;
+};
+
+
+static INLINE struct fenced_buffer *
+fenced_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct fenced_buffer *)buf;
+}
+
+
+static INLINE void
+_fenced_buffer_add(struct fenced_buffer *fenced_buf)
+{
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+
+ assert(fenced_buf->base.base.refcount);
+ assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
+ assert(fenced_buf->fence);
+
+ assert(!fenced_buf->head.prev);
+ assert(!fenced_buf->head.next);
+ LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
+ ++fenced_list->numDelayed;
+}
+
+
+/**
+ * Actually destroy the buffer.
+ */
+static INLINE void
+_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
+{
+ assert(!fenced_buf->base.base.refcount);
+ assert(!fenced_buf->fence);
+ pb_reference(&fenced_buf->buffer, NULL);
+ FREE(fenced_buf);
+}
+
+
+static INLINE void
+_fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
+ struct fenced_buffer *fenced_buf)
+{
+ struct pipe_winsys *winsys = fenced_list->winsys;
+
+ assert(fenced_buf->fence);
+ assert(fenced_buf->list == fenced_list);
+
+ winsys->fence_reference(winsys, &fenced_buf->fence, NULL);
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+
+ assert(fenced_buf->head.prev);
+ assert(fenced_buf->head.next);
+ LIST_DEL(&fenced_buf->head);
+#ifdef DEBUG
+ fenced_buf->head.prev = NULL;
+ fenced_buf->head.next = NULL;
+#endif
+
+ assert(fenced_list->numDelayed);
+ --fenced_list->numDelayed;
+
+ if(!fenced_buf->base.base.refcount)
+ _fenced_buffer_destroy(fenced_buf);
+}
+
+
+static INLINE enum pipe_error
+_fenced_buffer_finish(struct fenced_buffer *fenced_buf)
+{
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+ struct pipe_winsys *winsys = fenced_list->winsys;
+
+#if 0
+ debug_warning("waiting for GPU");
+#endif
+
+ assert(fenced_buf->fence);
+ if(fenced_buf->fence) {
+ if(winsys->fence_finish(winsys, fenced_buf->fence, 0) != 0) {
+ return PIPE_ERROR;
+ }
+ /* Remove from the fenced list */
+ /* TODO: remove consequents */
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ }
+
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+ return PIPE_OK;
+}
+
+
+/**
+ * Free as many fenced buffers from the list head as possible.
+ */
+static void
+_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait)
+{
+ struct pipe_winsys *winsys = fenced_list->winsys;
+ struct list_head *curr, *next;
+ struct fenced_buffer *fenced_buf;
+ struct pipe_fence_handle *prev_fence = NULL;
+
+ curr = fenced_list->delayed.next;
+ next = curr->next;
+ while(curr != &fenced_list->delayed) {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+
+ if(fenced_buf->fence != prev_fence) {
+ int signaled;
+ if (wait)
+ signaled = winsys->fence_finish(winsys, fenced_buf->fence, 0);
+ else
+ signaled = winsys->fence_signalled(winsys, fenced_buf->fence, 0);
+ if (signaled != 0)
+ break;
+ prev_fence = fenced_buf->fence;
+ }
+ else {
+ assert(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0);
+ }
+
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+
+ curr = next;
+ next = curr->next;
+ }
+}
+
+
+static void
+fenced_buffer_destroy(struct pb_buffer *buf)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+
+ pipe_mutex_lock(fenced_list->mutex);
+ assert(fenced_buf->base.base.refcount == 0);
+ if (fenced_buf->fence) {
+ struct pipe_winsys *winsys = fenced_list->winsys;
+ if(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0) {
+ struct list_head *curr, *prev;
+ curr = &fenced_buf->head;
+ prev = curr->prev;
+ do {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ assert(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0);
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ curr = prev;
+ prev = curr->prev;
+ } while (curr != &fenced_list->delayed);
+ }
+ else {
+ /* delay destruction */
+ }
+ }
+ else {
+ _fenced_buffer_destroy(fenced_buf);
+ }
+ pipe_mutex_unlock(fenced_list->mutex);
+}
+
+
+static void *
+fenced_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ void *map;
+
+ assert(flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE);
+ assert(!(flags & ~PIPE_BUFFER_USAGE_CPU_READ_WRITE));
+ flags &= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+
+ /* Check for GPU read/write access */
+ if(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) {
+ /* Wait for the GPU to finish writing */
+ _fenced_buffer_finish(fenced_buf);
+ }
+
+#if 0
+ /* Check for CPU write access (read is OK) */
+ if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
+ /* this is legal -- just for debugging */
+ debug_warning("concurrent CPU writes");
+ }
+#endif
+
+ map = pb_map(fenced_buf->buffer, flags);
+ if(map) {
+ ++fenced_buf->mapcount;
+ fenced_buf->flags |= flags;
+ }
+
+ return map;
+}
+
+
+static void
+fenced_buffer_unmap(struct pb_buffer *buf)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ assert(fenced_buf->mapcount);
+ if(fenced_buf->mapcount) {
+ pb_unmap(fenced_buf->buffer);
+ --fenced_buf->mapcount;
+ if(!fenced_buf->mapcount)
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ }
+}
+
+
+static enum pipe_error
+fenced_buffer_validate(struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ enum pipe_error ret;
+
+ if(!vl) {
+ /* invalidate */
+ fenced_buf->vl = NULL;
+ fenced_buf->validation_flags = 0;
+ return PIPE_OK;
+ }
+
+ assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
+ assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
+ flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+
+ /* Buffer cannot be validated in two different lists */
+ if(fenced_buf->vl && fenced_buf->vl != vl)
+ return PIPE_ERROR_RETRY;
+
+ /* Do not validate if buffer is still mapped */
+ if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
+ /* TODO: wait for the thread that mapped the buffer to unmap it */
+ return PIPE_ERROR_RETRY;
+ }
+
+ if(fenced_buf->vl == vl &&
+ (fenced_buf->validation_flags & flags) == flags) {
+ /* Nothing to do -- buffer already validated */
+ return PIPE_OK;
+ }
+
+ /* Final sanity checking */
+ assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
+ assert(!fenced_buf->mapcount);
+
+ ret = pb_validate(fenced_buf->buffer, vl, flags);
+ if (ret != PIPE_OK)
+ return ret;
+
+ fenced_buf->vl = vl;
+ fenced_buf->validation_flags |= flags;
+
+ return PIPE_OK;
+}
+
+
+static void
+fenced_buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ struct fenced_buffer *fenced_buf;
+ struct fenced_buffer_list *fenced_list;
+ struct pipe_winsys *winsys;
+
+ fenced_buf = fenced_buffer(buf);
+ fenced_list = fenced_buf->list;
+ winsys = fenced_list->winsys;
+
+ if(fence == fenced_buf->fence) {
+ /* Nothing to do */
+ return;
+ }
+
+ assert(fenced_buf->vl);
+ assert(fenced_buf->validation_flags);
+
+ pipe_mutex_lock(fenced_list->mutex);
+ if (fenced_buf->fence)
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ if (fence) {
+ winsys->fence_reference(winsys, &fenced_buf->fence, fence);
+ fenced_buf->flags |= fenced_buf->validation_flags;
+ _fenced_buffer_add(fenced_buf);
+ }
+ pipe_mutex_unlock(fenced_list->mutex);
+
+ pb_fence(fenced_buf->buffer, fence);
+
+ fenced_buf->vl = NULL;
+ fenced_buf->validation_flags = 0;
+}
+
+
+static void
+fenced_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
+}
+
+
+static const struct pb_vtbl
+fenced_buffer_vtbl = {
+ fenced_buffer_destroy,
+ fenced_buffer_map,
+ fenced_buffer_unmap,
+ fenced_buffer_validate,
+ fenced_buffer_fence,
+ fenced_buffer_get_base_buffer
+};
+
+
+struct pb_buffer *
+fenced_buffer_create(struct fenced_buffer_list *fenced_list,
+ struct pb_buffer *buffer)
+{
+ struct fenced_buffer *buf;
+
+ if(!buffer)
+ return NULL;
+
+ buf = CALLOC_STRUCT(fenced_buffer);
+ if(!buf) {
+ pb_reference(&buffer, NULL);
+ return NULL;
+ }
+
+ buf->base.base.refcount = 1;
+ buf->base.base.alignment = buffer->base.alignment;
+ buf->base.base.usage = buffer->base.usage;
+ buf->base.base.size = buffer->base.size;
+
+ buf->base.vtbl = &fenced_buffer_vtbl;
+ buf->buffer = buffer;
+ buf->list = fenced_list;
+
+ return &buf->base;
+}
+
+
+struct fenced_buffer_list *
+fenced_buffer_list_create(struct pipe_winsys *winsys)
+{
+ struct fenced_buffer_list *fenced_list;
+
+ fenced_list = CALLOC_STRUCT(fenced_buffer_list);
+ if (!fenced_list)
+ return NULL;
+
+ fenced_list->winsys = winsys;
+
+ LIST_INITHEAD(&fenced_list->delayed);
+
+ fenced_list->numDelayed = 0;
+
+ pipe_mutex_init(fenced_list->mutex);
+
+ return fenced_list;
+}
+
+
+void
+fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait)
+{
+ pipe_mutex_lock(fenced_list->mutex);
+ _fenced_buffer_list_check_free(fenced_list, wait);
+ pipe_mutex_unlock(fenced_list->mutex);
+}
+
+
+void
+fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
+{
+ pipe_mutex_lock(fenced_list->mutex);
+
+ /* Wait on outstanding fences */
+ while (fenced_list->numDelayed) {
+ pipe_mutex_unlock(fenced_list->mutex);
+#if defined(PIPE_OS_LINUX)
+ sched_yield();
+#endif
+ _fenced_buffer_list_check_free(fenced_list, 1);
+ pipe_mutex_lock(fenced_list->mutex);
+ }
+
+ pipe_mutex_unlock(fenced_list->mutex);
+
+ FREE(fenced_list);
+}
+
+
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h
new file mode 100644
index 0000000000..b15c676194
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer fencing.
+ *
+ * "Fenced buffers" is actually a misnomer. They should be referred as
+ * "fenceable buffers", i.e, buffers that can be fenced, but I couldn't find
+ * the word "fenceable" in the dictionary.
+ *
+ * A "fenced buffer" is a decorator around a normal buffer, which adds two
+ * special properties:
+ * - the ability for the destruction to be delayed by a fence;
+ * - reference counting.
+ *
+ * Usually DMA buffers have a life-time that will extend the life-time of its
+ * handle. The end-of-life is dictated by the fence signalling.
+ *
+ * Between the handle's destruction, and the fence signalling, the buffer is
+ * stored in a fenced buffer list.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_BUFFER_FENCED_H_
+#define PB_BUFFER_FENCED_H_
+
+
+#include "pipe/p_debug.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_winsys;
+struct pipe_buffer;
+struct pipe_fence_handle;
+
+
+/**
+ * List of buffers which are awaiting fence signalling.
+ */
+struct fenced_buffer_list;
+
+
+/**
+ * Create a fenced buffer list.
+ *
+ * See also fenced_bufmgr_create for a more convenient way to use this.
+ */
+struct fenced_buffer_list *
+fenced_buffer_list_create(struct pipe_winsys *winsys);
+
+
+/**
+ * Walk the fenced buffer list to check and free signalled buffers.
+ */
+void
+fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait);
+
+void
+fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list);
+
+
+/**
+ * Wrap a buffer in a fenced buffer.
+ *
+ * NOTE: this will not increase the buffer reference count.
+ */
+struct pb_buffer *
+fenced_buffer_create(struct fenced_buffer_list *fenced,
+ struct pb_buffer *buffer);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*PB_BUFFER_FENCED_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c
new file mode 100644
index 0000000000..53f497cfb0
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c
@@ -0,0 +1,186 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Implementation of malloc-based buffers to store data that can't be processed
+ * by the hardware.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_debug.h"
+#include "util/u_memory.h"
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+struct malloc_buffer
+{
+ struct pb_buffer base;
+ void *data;
+};
+
+
+extern const struct pb_vtbl malloc_buffer_vtbl;
+
+static INLINE struct malloc_buffer *
+malloc_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ assert(buf->vtbl == &malloc_buffer_vtbl);
+ return (struct malloc_buffer *)buf;
+}
+
+
+static void
+malloc_buffer_destroy(struct pb_buffer *buf)
+{
+ align_free(malloc_buffer(buf)->data);
+ FREE(buf);
+}
+
+
+static void *
+malloc_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ return malloc_buffer(buf)->data;
+}
+
+
+static void
+malloc_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static enum pipe_error
+malloc_buffer_validate(struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ assert(0);
+ return PIPE_ERROR;
+}
+
+
+static void
+malloc_buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ assert(0);
+}
+
+
+static void
+malloc_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ *base_buf = buf;
+ *offset = 0;
+}
+
+
+const struct pb_vtbl
+malloc_buffer_vtbl = {
+ malloc_buffer_destroy,
+ malloc_buffer_map,
+ malloc_buffer_unmap,
+ malloc_buffer_validate,
+ malloc_buffer_fence,
+ malloc_buffer_get_base_buffer
+};
+
+
+struct pb_buffer *
+pb_malloc_buffer_create(size_t size,
+ const struct pb_desc *desc)
+{
+ struct malloc_buffer *buf;
+
+ /* TODO: do a single allocation */
+
+ buf = CALLOC_STRUCT(malloc_buffer);
+ if(!buf)
+ return NULL;
+
+ buf->base.base.refcount = 1;
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+ buf->base.base.size = size;
+ buf->base.vtbl = &malloc_buffer_vtbl;
+
+ buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment);
+ if(!buf->data) {
+ FREE(buf);
+ return NULL;
+ }
+
+ return &buf->base;
+}
+
+
+static struct pb_buffer *
+pb_malloc_bufmgr_create_buffer(struct pb_manager *mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ return pb_malloc_buffer_create(size, desc);
+}
+
+
+static void
+pb_malloc_bufmgr_flush(struct pb_manager *mgr)
+{
+ /* No-op */
+}
+
+
+static void
+pb_malloc_bufmgr_destroy(struct pb_manager *mgr)
+{
+ /* No-op */
+}
+
+
+static struct pb_manager
+pb_malloc_bufmgr = {
+ pb_malloc_bufmgr_destroy,
+ pb_malloc_bufmgr_create_buffer,
+ pb_malloc_bufmgr_flush
+};
+
+
+struct pb_manager *
+pb_malloc_bufmgr_create(void)
+{
+ return &pb_malloc_bufmgr;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
new file mode 100644
index 0000000000..0a8264a924
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
@@ -0,0 +1,212 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer management.
+ *
+ * A buffer manager does only one basic thing: it creates buffers. Actually,
+ * "buffer factory" would probably a more accurate description.
+ *
+ * You can chain buffer managers so that you can have a finer grained memory
+ * management and pooling.
+ *
+ * For example, for a simple batch buffer manager you would chain:
+ * - the native buffer manager, which provides DMA memory from the graphics
+ * memory space;
+ * - the pool buffer manager, which keep around a pool of equally sized buffers
+ * to avoid latency associated with the native buffer manager;
+ * - the fenced buffer manager, which will delay buffer destruction until the
+ * the moment the card finishing processing it.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_BUFMGR_H_
+#define PB_BUFMGR_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pb_desc;
+struct pipe_buffer;
+struct pipe_winsys;
+
+
+/**
+ * Abstract base class for all buffer managers.
+ */
+struct pb_manager
+{
+ void
+ (*destroy)( struct pb_manager *mgr );
+
+ struct pb_buffer *
+ (*create_buffer)( struct pb_manager *mgr,
+ size_t size,
+ const struct pb_desc *desc);
+
+ /**
+ * Flush all temporary-held buffers.
+ *
+ * Used mostly to aid debugging memory issues or to clean up resources when
+ * the drivers are long lived.
+ */
+ void
+ (*flush)( struct pb_manager *mgr );
+};
+
+
+/**
+ * Malloc buffer provider.
+ *
+ * Simple wrapper around pb_malloc_buffer_create for convenience.
+ */
+struct pb_manager *
+pb_malloc_bufmgr_create(void);
+
+
+/**
+ * Static buffer pool sub-allocator.
+ *
+ * Manages the allocation of equally sized buffers. It does so by allocating
+ * a single big buffer and divide it equally sized buffers.
+ *
+ * It is meant to manage the allocation of batch buffer pools.
+ */
+struct pb_manager *
+pool_bufmgr_create(struct pb_manager *provider,
+ size_t n, size_t size,
+ const struct pb_desc *desc);
+
+
+/**
+ * Static sub-allocator based the old memory manager.
+ *
+ * It managers buffers of different sizes. It does so by allocating a buffer
+ * with the size of the heap, and then using the old mm memory manager to manage
+ * that heap.
+ */
+struct pb_manager *
+mm_bufmgr_create(struct pb_manager *provider,
+ size_t size, size_t align2);
+
+/**
+ * Same as mm_bufmgr_create.
+ *
+ * Buffer will be release when the manager is destroyed.
+ */
+struct pb_manager *
+mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
+ size_t size, size_t align2);
+
+
+/**
+ * Slab sub-allocator.
+ */
+struct pb_manager *
+pb_slab_manager_create(struct pb_manager *provider,
+ size_t bufSize,
+ size_t slabSize,
+ const struct pb_desc *desc);
+
+/**
+ * Allow a range of buffer size, by aggregating multiple slabs sub-allocators
+ * with different bucket sizes.
+ */
+struct pb_manager *
+pb_slab_range_manager_create(struct pb_manager *provider,
+ size_t minBufSize,
+ size_t maxBufSize,
+ size_t slabSize,
+ const struct pb_desc *desc);
+
+
+/**
+ * Time-based buffer cache.
+ *
+ * This manager keeps a cache of destroyed buffers during a time interval.
+ */
+struct pb_manager *
+pb_cache_manager_create(struct pb_manager *provider,
+ unsigned usecs);
+
+
+/**
+ * Fenced buffer manager.
+ *
+ * This manager is just meant for convenience. It wraps the buffers returned
+ * by another manager in fenced buffers, so that
+ *
+ * NOTE: the buffer manager that provides the buffers will be destroyed
+ * at the same time.
+ */
+struct pb_manager *
+fenced_bufmgr_create(struct pb_manager *provider,
+ struct pipe_winsys *winsys);
+
+
+struct pb_manager *
+pb_alt_manager_create(struct pb_manager *provider1,
+ struct pb_manager *provider2);
+
+
+/**
+ * Ondemand buffer manager.
+ *
+ * Buffers are created in malloc'ed memory (fast and cached), and the constents
+ * is transfered to a buffer from the provider (typically in slow uncached
+ * memory) when there is an attempt to validate the buffer.
+ *
+ * Ideal for situations where one does not know before hand whether a given
+ * buffer will effectively be used by the hardware or not.
+ */
+struct pb_manager *
+pb_ondemand_manager_create(struct pb_manager *provider);
+
+
+/**
+ * Debug buffer manager to detect buffer under- and overflows.
+ *
+ * Band size should be a multiple of the largest alignment
+ */
+struct pb_manager *
+pb_debug_manager_create(struct pb_manager *provider, size_t band_size);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*PB_BUFMGR_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c
new file mode 100644
index 0000000000..c956924cc7
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c
@@ -0,0 +1,120 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Allocate buffers from two alternative buffer providers.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_debug.h"
+#include "util/u_memory.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+struct pb_alt_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider1;
+ struct pb_manager *provider2;
+};
+
+
+static INLINE struct pb_alt_manager *
+pb_alt_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_alt_manager *)mgr;
+}
+
+
+static struct pb_buffer *
+pb_alt_manager_create_buffer(struct pb_manager *_mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pb_alt_manager *mgr = pb_alt_manager(_mgr);
+ struct pb_buffer *buf;
+
+ buf = mgr->provider1->create_buffer(mgr->provider1, size, desc);
+ if(buf)
+ return buf;
+
+ buf = mgr->provider2->create_buffer(mgr->provider2, size, desc);
+ return buf;
+}
+
+
+static void
+pb_alt_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_alt_manager *mgr = pb_alt_manager(_mgr);
+
+ assert(mgr->provider1->flush);
+ if(mgr->provider1->flush)
+ mgr->provider1->flush(mgr->provider1);
+
+ assert(mgr->provider2->flush);
+ if(mgr->provider2->flush)
+ mgr->provider2->flush(mgr->provider2);
+}
+
+
+static void
+pb_alt_manager_destroy(struct pb_manager *mgr)
+{
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_alt_manager_create(struct pb_manager *provider1,
+ struct pb_manager *provider2)
+{
+ struct pb_alt_manager *mgr;
+
+ if(!provider1 || !provider2)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_alt_manager);
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_alt_manager_destroy;
+ mgr->base.create_buffer = pb_alt_manager_create_buffer;
+ mgr->base.flush = pb_alt_manager_flush;
+ mgr->provider1 = provider1;
+ mgr->provider2 = provider2;
+
+ return &mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
new file mode 100644
index 0000000000..a168853713
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
@@ -0,0 +1,384 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer cache.
+ *
+ * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_thread.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+#include "util/u_time.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct pb_cache_manager;
+
+
+/**
+ * Wrapper around a pipe buffer which adds delayed destruction.
+ */
+struct pb_cache_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_buffer *buffer;
+ struct pb_cache_manager *mgr;
+
+ /** Caching time interval */
+ struct util_time start, end;
+
+ struct list_head head;
+};
+
+
+struct pb_cache_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+ unsigned usecs;
+
+ pipe_mutex mutex;
+
+ struct list_head delayed;
+ size_t numDelayed;
+};
+
+
+static INLINE struct pb_cache_buffer *
+pb_cache_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pb_cache_buffer *)buf;
+}
+
+
+static INLINE struct pb_cache_manager *
+pb_cache_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_cache_manager *)mgr;
+}
+
+
+/**
+ * Actually destroy the buffer.
+ */
+static INLINE void
+_pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
+{
+ struct pb_cache_manager *mgr = buf->mgr;
+
+ LIST_DEL(&buf->head);
+ assert(mgr->numDelayed);
+ --mgr->numDelayed;
+ assert(!buf->base.base.refcount);
+ pb_reference(&buf->buffer, NULL);
+ FREE(buf);
+}
+
+
+/**
+ * Free as many cache buffers from the list head as possible.
+ */
+static void
+_pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr)
+{
+ struct list_head *curr, *next;
+ struct pb_cache_buffer *buf;
+ struct util_time now;
+
+ util_time_get(&now);
+
+ curr = mgr->delayed.next;
+ next = curr->next;
+ while(curr != &mgr->delayed) {
+ buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+
+ if(!util_time_timeout(&buf->start, &buf->end, &now))
+ break;
+
+ _pb_cache_buffer_destroy(buf);
+
+ curr = next;
+ next = curr->next;
+ }
+}
+
+
+static void
+pb_cache_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ struct pb_cache_manager *mgr = buf->mgr;
+
+ pipe_mutex_lock(mgr->mutex);
+ assert(buf->base.base.refcount == 0);
+
+ _pb_cache_buffer_list_check_free(mgr);
+
+ util_time_get(&buf->start);
+ util_time_add(&buf->start, mgr->usecs, &buf->end);
+ LIST_ADDTAIL(&buf->head, &mgr->delayed);
+ ++mgr->numDelayed;
+ pipe_mutex_unlock(mgr->mutex);
+}
+
+
+static void *
+pb_cache_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ return pb_map(buf->buffer, flags);
+}
+
+
+static void
+pb_cache_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ pb_unmap(buf->buffer);
+}
+
+
+static enum pipe_error
+pb_cache_buffer_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ return pb_validate(buf->buffer, vl, flags);
+}
+
+
+static void
+pb_cache_buffer_fence(struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ pb_fence(buf->buffer, fence);
+}
+
+
+static void
+pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ pb_get_base_buffer(buf->buffer, base_buf, offset);
+}
+
+
+const struct pb_vtbl
+pb_cache_buffer_vtbl = {
+ pb_cache_buffer_destroy,
+ pb_cache_buffer_map,
+ pb_cache_buffer_unmap,
+ pb_cache_buffer_validate,
+ pb_cache_buffer_fence,
+ pb_cache_buffer_get_base_buffer
+};
+
+
+static INLINE boolean
+pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ if(buf->base.base.size < size)
+ return FALSE;
+
+ /* be lenient with size */
+ if(buf->base.base.size >= 2*size)
+ return FALSE;
+
+ if(!pb_check_alignment(desc->alignment, buf->base.base.alignment))
+ return FALSE;
+
+ if(!pb_check_usage(desc->usage, buf->base.base.usage))
+ return FALSE;
+
+ return TRUE;
+}
+
+
+static struct pb_buffer *
+pb_cache_manager_create_buffer(struct pb_manager *_mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
+ struct pb_cache_buffer *buf;
+ struct pb_cache_buffer *curr_buf;
+ struct list_head *curr, *next;
+ struct util_time now;
+
+ pipe_mutex_lock(mgr->mutex);
+
+ buf = NULL;
+ curr = mgr->delayed.next;
+ next = curr->next;
+
+ /* search in the expired buffers, freeing them in the process */
+ util_time_get(&now);
+ while(curr != &mgr->delayed) {
+ curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc))
+ buf = curr_buf;
+ else if(util_time_timeout(&curr_buf->start, &curr_buf->end, &now))
+ _pb_cache_buffer_destroy(curr_buf);
+ else
+ /* This buffer (and all hereafter) are still hot in cache */
+ break;
+ curr = next;
+ next = curr->next;
+ }
+
+ /* keep searching in the hot buffers */
+ if(!buf) {
+ while(curr != &mgr->delayed) {
+ curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ if(pb_cache_is_buffer_compat(curr_buf, size, desc)) {
+ buf = curr_buf;
+ break;
+ }
+ /* no need to check the timeout here */
+ curr = next;
+ next = curr->next;
+ }
+ }
+
+ if(buf) {
+ LIST_DEL(&buf->head);
+ pipe_mutex_unlock(mgr->mutex);
+ ++buf->base.base.refcount;
+ return &buf->base;
+ }
+
+ pipe_mutex_unlock(mgr->mutex);
+
+ buf = CALLOC_STRUCT(pb_cache_buffer);
+ if(!buf)
+ return NULL;
+
+ buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
+ if(!buf->buffer) {
+ FREE(buf);
+ return NULL;
+ }
+
+ assert(buf->buffer->base.refcount >= 1);
+ assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment));
+ assert(pb_check_usage(desc->usage, buf->buffer->base.usage));
+ assert(buf->buffer->base.size >= size);
+
+ buf->base.base.refcount = 1;
+ buf->base.base.alignment = buf->buffer->base.alignment;
+ buf->base.base.usage = buf->buffer->base.usage;
+ buf->base.base.size = buf->buffer->base.size;
+
+ buf->base.vtbl = &pb_cache_buffer_vtbl;
+ buf->mgr = mgr;
+
+ return &buf->base;
+}
+
+
+static void
+pb_cache_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
+ struct list_head *curr, *next;
+ struct pb_cache_buffer *buf;
+
+ pipe_mutex_lock(mgr->mutex);
+ curr = mgr->delayed.next;
+ next = curr->next;
+ while(curr != &mgr->delayed) {
+ buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ _pb_cache_buffer_destroy(buf);
+ curr = next;
+ next = curr->next;
+ }
+ pipe_mutex_unlock(mgr->mutex);
+
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_cache_manager_destroy(struct pb_manager *mgr)
+{
+ pb_cache_manager_flush(mgr);
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_cache_manager_create(struct pb_manager *provider,
+ unsigned usecs)
+{
+ struct pb_cache_manager *mgr;
+
+ if(!provider)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_cache_manager);
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_cache_manager_destroy;
+ mgr->base.create_buffer = pb_cache_manager_create_buffer;
+ mgr->base.flush = pb_cache_manager_flush;
+ mgr->provider = provider;
+ mgr->usecs = usecs;
+ LIST_INITHEAD(&mgr->delayed);
+ mgr->numDelayed = 0;
+ pipe_mutex_init(mgr->mutex);
+
+ return &mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
new file mode 100644
index 0000000000..26d9c24aec
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
@@ -0,0 +1,390 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Debug buffer manager to detect buffer under- and overflows.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_thread.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+#include "util/u_time.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+#ifdef DEBUG
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct pb_debug_manager;
+
+
+/**
+ * Wrapper around a pipe buffer which adds delayed destruction.
+ */
+struct pb_debug_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_buffer *buffer;
+ struct pb_debug_manager *mgr;
+
+ size_t underflow_size;
+ size_t overflow_size;
+};
+
+
+struct pb_debug_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+
+ size_t band_size;
+};
+
+
+static INLINE struct pb_debug_buffer *
+pb_debug_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pb_debug_buffer *)buf;
+}
+
+
+static INLINE struct pb_debug_manager *
+pb_debug_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_debug_manager *)mgr;
+}
+
+
+static const uint8_t random_pattern[32] = {
+ 0xaf, 0xcf, 0xa5, 0xa2, 0xc2, 0x63, 0x15, 0x1a,
+ 0x7e, 0xe2, 0x7e, 0x84, 0x15, 0x49, 0xa2, 0x1e,
+ 0x49, 0x63, 0xf5, 0x52, 0x74, 0x66, 0x9e, 0xc4,
+ 0x6d, 0xcf, 0x2c, 0x4a, 0x74, 0xe6, 0xfd, 0x94
+};
+
+
+static INLINE void
+fill_random_pattern(uint8_t *dst, size_t size)
+{
+ size_t i = 0;
+ while(size--) {
+ *dst++ = random_pattern[i++];
+ i &= sizeof(random_pattern) - 1;
+ }
+}
+
+
+static INLINE boolean
+check_random_pattern(const uint8_t *dst, size_t size,
+ size_t *min_ofs, size_t *max_ofs)
+{
+ boolean result = TRUE;
+ size_t i;
+ *min_ofs = size;
+ *max_ofs = 0;
+ for(i = 0; i < size; ++i) {
+ if(*dst++ != random_pattern[i % sizeof(random_pattern)]) {
+ *min_ofs = MIN2(*min_ofs, i);
+ *max_ofs = MAX2(*max_ofs, i);
+ result = FALSE;
+ }
+ }
+ return result;
+}
+
+
+static void
+pb_debug_buffer_fill(struct pb_debug_buffer *buf)
+{
+ uint8_t *map;
+
+ map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
+ assert(map);
+ if(map) {
+ fill_random_pattern(map, buf->underflow_size);
+ fill_random_pattern(map + buf->underflow_size + buf->base.base.size,
+ buf->overflow_size);
+ pb_unmap(buf->buffer);
+ }
+}
+
+
+/**
+ * Check for under/over flows.
+ *
+ * Should be called with the buffer unmaped.
+ */
+static void
+pb_debug_buffer_check(struct pb_debug_buffer *buf)
+{
+ uint8_t *map;
+
+ map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
+ assert(map);
+ if(map) {
+ boolean underflow, overflow;
+ size_t min_ofs, max_ofs;
+
+ underflow = !check_random_pattern(map, buf->underflow_size,
+ &min_ofs, &max_ofs);
+ if(underflow) {
+ debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
+ buf->underflow_size - min_ofs,
+ min_ofs == 0 ? "+" : "",
+ buf->underflow_size - max_ofs);
+ }
+
+ overflow = !check_random_pattern(map + buf->underflow_size + buf->base.base.size,
+ buf->overflow_size,
+ &min_ofs, &max_ofs);
+ if(overflow) {
+ debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
+ buf->base.base.size,
+ min_ofs,
+ max_ofs,
+ max_ofs == buf->overflow_size - 1 ? "+" : "");
+ }
+
+ debug_assert(!underflow && !overflow);
+
+ /* re-fill if not aborted */
+ if(underflow)
+ fill_random_pattern(map, buf->underflow_size);
+ if(overflow)
+ fill_random_pattern(map + buf->underflow_size + buf->base.base.size,
+ buf->overflow_size);
+
+ pb_unmap(buf->buffer);
+ }
+}
+
+
+static void
+pb_debug_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+
+ assert(!buf->base.base.refcount);
+
+ pb_debug_buffer_check(buf);
+
+ pb_reference(&buf->buffer, NULL);
+ FREE(buf);
+}
+
+
+static void *
+pb_debug_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+ void *map;
+
+ pb_debug_buffer_check(buf);
+
+ map = pb_map(buf->buffer, flags);
+ if(!map)
+ return NULL;
+
+ return (uint8_t *)map + buf->underflow_size;
+}
+
+
+static void
+pb_debug_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+ pb_unmap(buf->buffer);
+
+ pb_debug_buffer_check(buf);
+}
+
+
+static void
+pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+ pb_get_base_buffer(buf->buffer, base_buf, offset);
+ *offset += buf->underflow_size;
+}
+
+
+static enum pipe_error
+pb_debug_buffer_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+
+ pb_debug_buffer_check(buf);
+
+ return pb_validate(buf->buffer, vl, flags);
+}
+
+
+static void
+pb_debug_buffer_fence(struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+ pb_fence(buf->buffer, fence);
+}
+
+
+const struct pb_vtbl
+pb_debug_buffer_vtbl = {
+ pb_debug_buffer_destroy,
+ pb_debug_buffer_map,
+ pb_debug_buffer_unmap,
+ pb_debug_buffer_validate,
+ pb_debug_buffer_fence,
+ pb_debug_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+pb_debug_manager_create_buffer(struct pb_manager *_mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
+ struct pb_debug_buffer *buf;
+ struct pb_desc real_desc;
+ size_t real_size;
+
+ buf = CALLOC_STRUCT(pb_debug_buffer);
+ if(!buf)
+ return NULL;
+
+ real_size = size + 2*mgr->band_size;
+ real_desc = *desc;
+ real_desc.usage |= PIPE_BUFFER_USAGE_CPU_WRITE;
+ real_desc.usage |= PIPE_BUFFER_USAGE_CPU_READ;
+
+ buf->buffer = mgr->provider->create_buffer(mgr->provider,
+ real_size,
+ &real_desc);
+ if(!buf->buffer) {
+ FREE(buf);
+ return NULL;
+ }
+
+ assert(buf->buffer->base.refcount >= 1);
+ assert(pb_check_alignment(real_desc.alignment, buf->buffer->base.alignment));
+ assert(pb_check_usage(real_desc.usage, buf->buffer->base.usage));
+ assert(buf->buffer->base.size >= real_size);
+
+ buf->base.base.refcount = 1;
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+ buf->base.base.size = size;
+
+ buf->base.vtbl = &pb_debug_buffer_vtbl;
+ buf->mgr = mgr;
+
+ buf->underflow_size = mgr->band_size;
+ buf->overflow_size = buf->buffer->base.size - buf->underflow_size - size;
+
+ pb_debug_buffer_fill(buf);
+
+ return &buf->base;
+}
+
+
+static void
+pb_debug_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_debug_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
+ mgr->provider->destroy(mgr->provider);
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_debug_manager_create(struct pb_manager *provider, size_t band_size)
+{
+ struct pb_debug_manager *mgr;
+
+ if(!provider)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_debug_manager);
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_debug_manager_destroy;
+ mgr->base.create_buffer = pb_debug_manager_create_buffer;
+ mgr->base.flush = pb_debug_manager_flush;
+ mgr->provider = provider;
+ mgr->band_size = band_size;
+
+ return &mgr->base;
+}
+
+
+#else /* !DEBUG */
+
+
+struct pb_manager *
+pb_debug_manager_create(struct pb_manager *provider, size_t band_size)
+{
+ return provider;
+}
+
+
+#endif /* !DEBUG */
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c
new file mode 100644
index 0000000000..513ed28ca6
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c
@@ -0,0 +1,148 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * A buffer manager that wraps buffers in fenced buffers.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.dot.com>
+ */
+
+
+#include "pipe/p_debug.h"
+#include "util/u_memory.h"
+
+#include "pb_buffer.h"
+#include "pb_buffer_fenced.h"
+#include "pb_bufmgr.h"
+
+
+struct fenced_pb_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+
+ struct fenced_buffer_list *fenced_list;
+};
+
+
+static INLINE struct fenced_pb_manager *
+fenced_pb_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct fenced_pb_manager *)mgr;
+}
+
+
+static struct pb_buffer *
+fenced_bufmgr_create_buffer(struct pb_manager *mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr);
+ struct pb_buffer *buf;
+ struct pb_buffer *fenced_buf;
+
+ /* check for free buffers before allocating new ones */
+ fenced_buffer_list_check_free(fenced_mgr->fenced_list, 0);
+
+ buf = fenced_mgr->provider->create_buffer(fenced_mgr->provider, size, desc);
+ if(!buf) {
+ /* try harder to get a buffer */
+ fenced_buffer_list_check_free(fenced_mgr->fenced_list, 1);
+
+ buf = fenced_mgr->provider->create_buffer(fenced_mgr->provider, size, desc);
+ if(!buf) {
+ /* give up */
+ return NULL;
+ }
+ }
+
+ fenced_buf = fenced_buffer_create(fenced_mgr->fenced_list, buf);
+ if(!fenced_buf) {
+ pb_reference(&buf, NULL);
+ }
+
+ return fenced_buf;
+}
+
+
+static void
+fenced_bufmgr_flush(struct pb_manager *mgr)
+{
+ struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr);
+
+ fenced_buffer_list_check_free(fenced_mgr->fenced_list, TRUE);
+
+ assert(fenced_mgr->provider->flush);
+ if(fenced_mgr->provider->flush)
+ fenced_mgr->provider->flush(fenced_mgr->provider);
+}
+
+
+static void
+fenced_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr);
+
+ fenced_buffer_list_destroy(fenced_mgr->fenced_list);
+
+ if(fenced_mgr->provider)
+ fenced_mgr->provider->destroy(fenced_mgr->provider);
+
+ FREE(fenced_mgr);
+}
+
+
+struct pb_manager *
+fenced_bufmgr_create(struct pb_manager *provider,
+ struct pipe_winsys *winsys)
+{
+ struct fenced_pb_manager *fenced_mgr;
+
+ if(!provider)
+ return NULL;
+
+ fenced_mgr = CALLOC_STRUCT(fenced_pb_manager);
+ if (!fenced_mgr)
+ return NULL;
+
+ fenced_mgr->base.destroy = fenced_bufmgr_destroy;
+ fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
+ fenced_mgr->base.flush = fenced_bufmgr_flush;
+
+ fenced_mgr->provider = provider;
+ fenced_mgr->fenced_list = fenced_buffer_list_create(winsys);
+ if(!fenced_mgr->fenced_list) {
+ FREE(fenced_mgr);
+ return NULL;
+ }
+
+ return &fenced_mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
new file mode 100644
index 0000000000..2f5a5d8ea0
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
@@ -0,0 +1,322 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer manager using the old texture memory manager.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_defines.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_thread.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+#include "util/u_mm.h"
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct mm_pb_manager
+{
+ struct pb_manager base;
+
+ pipe_mutex mutex;
+
+ size_t size;
+ struct mem_block *heap;
+
+ size_t align2;
+
+ struct pb_buffer *buffer;
+ void *map;
+};
+
+
+static INLINE struct mm_pb_manager *
+mm_pb_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct mm_pb_manager *)mgr;
+}
+
+
+struct mm_buffer
+{
+ struct pb_buffer base;
+
+ struct mm_pb_manager *mgr;
+
+ struct mem_block *block;
+};
+
+
+static INLINE struct mm_buffer *
+mm_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct mm_buffer *)buf;
+}
+
+
+static void
+mm_buffer_destroy(struct pb_buffer *buf)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+
+ assert(buf->base.refcount == 0);
+
+ pipe_mutex_lock(mm->mutex);
+ u_mmFreeMem(mm_buf->block);
+ FREE(buf);
+ pipe_mutex_unlock(mm->mutex);
+}
+
+
+static void *
+mm_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+
+ return (unsigned char *) mm->map + mm_buf->block->ofs;
+}
+
+
+static void
+mm_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static enum pipe_error
+mm_buffer_validate(struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+ return pb_validate(mm->buffer, vl, flags);
+}
+
+
+static void
+mm_buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+ pb_fence(mm->buffer, fence);
+}
+
+
+static void
+mm_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+ pb_get_base_buffer(mm->buffer, base_buf, offset);
+ *offset += mm_buf->block->ofs;
+}
+
+
+static const struct pb_vtbl
+mm_buffer_vtbl = {
+ mm_buffer_destroy,
+ mm_buffer_map,
+ mm_buffer_unmap,
+ mm_buffer_validate,
+ mm_buffer_fence,
+ mm_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+mm_bufmgr_create_buffer(struct pb_manager *mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct mm_pb_manager *mm = mm_pb_manager(mgr);
+ struct mm_buffer *mm_buf;
+
+ /* We don't handle alignments larger then the one initially setup */
+ assert(pb_check_alignment(desc->alignment, 1 << mm->align2));
+ if(!pb_check_alignment(desc->alignment, 1 << mm->align2))
+ return NULL;
+
+ pipe_mutex_lock(mm->mutex);
+
+ mm_buf = CALLOC_STRUCT(mm_buffer);
+ if (!mm_buf) {
+ pipe_mutex_unlock(mm->mutex);
+ return NULL;
+ }
+
+ mm_buf->base.base.refcount = 1;
+ mm_buf->base.base.alignment = desc->alignment;
+ mm_buf->base.base.usage = desc->usage;
+ mm_buf->base.base.size = size;
+
+ mm_buf->base.vtbl = &mm_buffer_vtbl;
+
+ mm_buf->mgr = mm;
+
+ mm_buf->block = u_mmAllocMem(mm->heap, size, mm->align2, 0);
+ if(!mm_buf->block) {
+ debug_printf("warning: heap full\n");
+#if 0
+ mmDumpMemInfo(mm->heap);
+#endif
+
+ mm_buf->block = u_mmAllocMem(mm->heap, size, mm->align2, 0);
+ if(!mm_buf->block) {
+ FREE(mm_buf);
+ pipe_mutex_unlock(mm->mutex);
+ return NULL;
+ }
+ }
+
+ /* Some sanity checks */
+ assert(0 <= (unsigned)mm_buf->block->ofs && (unsigned)mm_buf->block->ofs < mm->size);
+ assert(size <= (unsigned)mm_buf->block->size && (unsigned)mm_buf->block->ofs + (unsigned)mm_buf->block->size <= mm->size);
+
+ pipe_mutex_unlock(mm->mutex);
+ return SUPER(mm_buf);
+}
+
+
+static void
+mm_bufmgr_flush(struct pb_manager *mgr)
+{
+ /* No-op */
+}
+
+
+static void
+mm_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct mm_pb_manager *mm = mm_pb_manager(mgr);
+
+ pipe_mutex_lock(mm->mutex);
+
+ u_mmDestroy(mm->heap);
+
+ pb_unmap(mm->buffer);
+ pb_reference(&mm->buffer, NULL);
+
+ pipe_mutex_unlock(mm->mutex);
+
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
+ size_t size, size_t align2)
+{
+ struct mm_pb_manager *mm;
+
+ if(!buffer)
+ return NULL;
+
+ mm = CALLOC_STRUCT(mm_pb_manager);
+ if (!mm)
+ return NULL;
+
+ mm->base.destroy = mm_bufmgr_destroy;
+ mm->base.create_buffer = mm_bufmgr_create_buffer;
+ mm->base.flush = mm_bufmgr_flush;
+
+ mm->size = size;
+ mm->align2 = align2; /* 64-byte alignment */
+
+ pipe_mutex_init(mm->mutex);
+
+ mm->buffer = buffer;
+
+ mm->map = pb_map(mm->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ |
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(!mm->map)
+ goto failure;
+
+ mm->heap = u_mmInit(0, size);
+ if (!mm->heap)
+ goto failure;
+
+ return SUPER(mm);
+
+failure:
+if(mm->heap)
+ u_mmDestroy(mm->heap);
+ if(mm->map)
+ pb_unmap(mm->buffer);
+ if(mm)
+ FREE(mm);
+ return NULL;
+}
+
+
+struct pb_manager *
+mm_bufmgr_create(struct pb_manager *provider,
+ size_t size, size_t align2)
+{
+ struct pb_buffer *buffer;
+ struct pb_manager *mgr;
+ struct pb_desc desc;
+
+ if(!provider)
+ return NULL;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.alignment = 1 << align2;
+
+ buffer = provider->create_buffer(provider, size, &desc);
+ if (!buffer)
+ return NULL;
+
+ mgr = mm_bufmgr_create_from_buffer(buffer, size, align2);
+ if (!mgr) {
+ pb_reference(&buffer, NULL);
+ return NULL;
+ }
+
+ return mgr;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c
new file mode 100644
index 0000000000..ba02a84e62
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c
@@ -0,0 +1,303 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * A variation of malloc buffers which get transferred to real graphics memory
+ * when there is an attempt to validate them.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_debug.h"
+#include "util/u_memory.h"
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+struct pb_ondemand_manager;
+
+
+struct pb_ondemand_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_ondemand_manager *mgr;
+
+ /** Regular malloc'ed memory */
+ void *data;
+ unsigned mapcount;
+
+ /** Real buffer */
+ struct pb_buffer *buffer;
+ size_t size;
+ struct pb_desc desc;
+};
+
+
+struct pb_ondemand_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+};
+
+
+extern const struct pb_vtbl pb_ondemand_buffer_vtbl;
+
+static INLINE struct pb_ondemand_buffer *
+pb_ondemand_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ assert(buf->vtbl == &pb_ondemand_buffer_vtbl);
+ return (struct pb_ondemand_buffer *)buf;
+}
+
+static INLINE struct pb_ondemand_manager *
+pb_ondemand_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_ondemand_manager *)mgr;
+}
+
+
+static void
+pb_ondemand_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ pb_reference(&buf->buffer, NULL);
+
+ align_free(buf->data);
+
+ FREE(buf);
+}
+
+
+static void *
+pb_ondemand_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ if(buf->buffer) {
+ assert(!buf->data);
+ return pb_map(buf->buffer, flags);
+ }
+ else {
+ assert(buf->data);
+ ++buf->mapcount;
+ return buf->data;
+ }
+}
+
+
+static void
+pb_ondemand_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ if(buf->buffer) {
+ assert(!buf->data);
+ pb_unmap(buf->buffer);
+ }
+ else {
+ assert(buf->data);
+ assert(buf->mapcount);
+ if(buf->mapcount)
+ --buf->mapcount;
+ }
+}
+
+
+static enum pipe_error
+pb_ondemand_buffer_instantiate(struct pb_ondemand_buffer *buf)
+{
+ if(!buf->buffer) {
+ struct pb_manager *provider = buf->mgr->provider;
+ uint8_t *map;
+
+ assert(!buf->mapcount);
+
+ buf->buffer = provider->create_buffer(provider, buf->size, &buf->desc);
+ if(!buf->buffer)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
+ if(!map) {
+ pb_reference(&buf->buffer, NULL);
+ return PIPE_ERROR;
+ }
+
+ memcpy(map, buf->data, buf->size);
+
+ pb_unmap(buf->buffer);
+
+ if(!buf->mapcount) {
+ FREE(buf->data);
+ buf->data = NULL;
+ }
+ }
+
+ return PIPE_OK;
+}
+
+static enum pipe_error
+pb_ondemand_buffer_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+ enum pipe_error ret;
+
+ assert(!buf->mapcount);
+ if(buf->mapcount)
+ return PIPE_ERROR;
+
+ ret = pb_ondemand_buffer_instantiate(buf);
+ if(ret != PIPE_OK)
+ return ret;
+
+ return pb_validate(buf->buffer, vl, flags);
+}
+
+
+static void
+pb_ondemand_buffer_fence(struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ assert(buf->buffer);
+ if(!buf->buffer)
+ return;
+
+ pb_fence(buf->buffer, fence);
+}
+
+
+static void
+pb_ondemand_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ if(pb_ondemand_buffer_instantiate(buf) != PIPE_OK) {
+ assert(0);
+ *base_buf = &buf->base;
+ *offset = 0;
+ return;
+ }
+
+ pb_get_base_buffer(buf->buffer, base_buf, offset);
+}
+
+
+const struct pb_vtbl
+pb_ondemand_buffer_vtbl = {
+ pb_ondemand_buffer_destroy,
+ pb_ondemand_buffer_map,
+ pb_ondemand_buffer_unmap,
+ pb_ondemand_buffer_validate,
+ pb_ondemand_buffer_fence,
+ pb_ondemand_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+pb_ondemand_manager_create_buffer(struct pb_manager *_mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr);
+ struct pb_ondemand_buffer *buf;
+
+ buf = CALLOC_STRUCT(pb_ondemand_buffer);
+ if(!buf)
+ return NULL;
+
+ buf->base.base.refcount = 1;
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+ buf->base.base.size = size;
+ buf->base.vtbl = &pb_ondemand_buffer_vtbl;
+
+ buf->mgr = mgr;
+
+ buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment);
+ if(!buf->data) {
+ FREE(buf);
+ return NULL;
+ }
+
+ buf->size = size;
+ buf->desc = *desc;
+
+ return &buf->base;
+}
+
+
+static void
+pb_ondemand_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr);
+
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_ondemand_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr);
+
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_ondemand_manager_create(struct pb_manager *provider)
+{
+ struct pb_ondemand_manager *mgr;
+
+ if(!provider)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_ondemand_manager);
+ if(!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_ondemand_manager_destroy;
+ mgr->base.create_buffer = pb_ondemand_manager_create_buffer;
+ mgr->base.flush = pb_ondemand_manager_flush;
+
+ mgr->provider = provider;
+
+ return &mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
new file mode 100644
index 0000000000..a6ff37653e
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
@@ -0,0 +1,321 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Batch buffer pool management.
+ *
+ * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_thread.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct pool_pb_manager
+{
+ struct pb_manager base;
+
+ pipe_mutex mutex;
+
+ size_t bufSize;
+ size_t bufAlign;
+
+ size_t numFree;
+ size_t numTot;
+
+ struct list_head free;
+
+ struct pb_buffer *buffer;
+ void *map;
+
+ struct pool_buffer *bufs;
+};
+
+
+static INLINE struct pool_pb_manager *
+pool_pb_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pool_pb_manager *)mgr;
+}
+
+
+struct pool_buffer
+{
+ struct pb_buffer base;
+
+ struct pool_pb_manager *mgr;
+
+ struct list_head head;
+
+ size_t start;
+};
+
+
+static INLINE struct pool_buffer *
+pool_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pool_buffer *)buf;
+}
+
+
+
+static void
+pool_buffer_destroy(struct pb_buffer *buf)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+
+ assert(pool_buf->base.base.refcount == 0);
+
+ pipe_mutex_lock(pool->mutex);
+ LIST_ADD(&pool_buf->head, &pool->free);
+ pool->numFree++;
+ pipe_mutex_unlock(pool->mutex);
+}
+
+
+static void *
+pool_buffer_map(struct pb_buffer *buf, unsigned flags)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ void *map;
+
+ pipe_mutex_lock(pool->mutex);
+ map = (unsigned char *) pool->map + pool_buf->start;
+ pipe_mutex_unlock(pool->mutex);
+ return map;
+}
+
+
+static void
+pool_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static enum pipe_error
+pool_buffer_validate(struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ return pb_validate(pool->buffer, vl, flags);
+}
+
+
+static void
+pool_buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ pb_fence(pool->buffer, fence);
+}
+
+
+static void
+pool_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ pb_get_base_buffer(pool->buffer, base_buf, offset);
+ *offset += pool_buf->start;
+}
+
+
+static const struct pb_vtbl
+pool_buffer_vtbl = {
+ pool_buffer_destroy,
+ pool_buffer_map,
+ pool_buffer_unmap,
+ pool_buffer_validate,
+ pool_buffer_fence,
+ pool_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+pool_bufmgr_create_buffer(struct pb_manager *mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pool_pb_manager *pool = pool_pb_manager(mgr);
+ struct pool_buffer *pool_buf;
+ struct list_head *item;
+
+ assert(size == pool->bufSize);
+ assert(pool->bufAlign % desc->alignment == 0);
+
+ pipe_mutex_lock(pool->mutex);
+
+ if (pool->numFree == 0) {
+ pipe_mutex_unlock(pool->mutex);
+ debug_printf("warning: out of fixed size buffer objects\n");
+ return NULL;
+ }
+
+ item = pool->free.next;
+
+ if (item == &pool->free) {
+ pipe_mutex_unlock(pool->mutex);
+ debug_printf("error: fixed size buffer pool corruption\n");
+ return NULL;
+ }
+
+ LIST_DEL(item);
+ --pool->numFree;
+
+ pipe_mutex_unlock(pool->mutex);
+
+ pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
+ assert(pool_buf->base.base.refcount == 0);
+ pool_buf->base.base.refcount = 1;
+ pool_buf->base.base.alignment = desc->alignment;
+ pool_buf->base.base.usage = desc->usage;
+
+ return SUPER(pool_buf);
+}
+
+
+static void
+pool_bufmgr_flush(struct pb_manager *mgr)
+{
+ /* No-op */
+}
+
+
+static void
+pool_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct pool_pb_manager *pool = pool_pb_manager(mgr);
+ pipe_mutex_lock(pool->mutex);
+
+ FREE(pool->bufs);
+
+ pb_unmap(pool->buffer);
+ pb_reference(&pool->buffer, NULL);
+
+ pipe_mutex_unlock(pool->mutex);
+
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pool_bufmgr_create(struct pb_manager *provider,
+ size_t numBufs,
+ size_t bufSize,
+ const struct pb_desc *desc)
+{
+ struct pool_pb_manager *pool;
+ struct pool_buffer *pool_buf;
+ size_t i;
+
+ if(!provider)
+ return NULL;
+
+ pool = CALLOC_STRUCT(pool_pb_manager);
+ if (!pool)
+ return NULL;
+
+ pool->base.destroy = pool_bufmgr_destroy;
+ pool->base.create_buffer = pool_bufmgr_create_buffer;
+ pool->base.flush = pool_bufmgr_flush;
+
+ LIST_INITHEAD(&pool->free);
+
+ pool->numTot = numBufs;
+ pool->numFree = numBufs;
+ pool->bufSize = bufSize;
+ pool->bufAlign = desc->alignment;
+
+ pipe_mutex_init(pool->mutex);
+
+ pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc);
+ if (!pool->buffer)
+ goto failure;
+
+ pool->map = pb_map(pool->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ |
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(!pool->map)
+ goto failure;
+
+ pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs));
+ if (!pool->bufs)
+ goto failure;
+
+ pool_buf = pool->bufs;
+ for (i = 0; i < numBufs; ++i) {
+ pool_buf->base.base.refcount = 0;
+ pool_buf->base.base.alignment = 0;
+ pool_buf->base.base.usage = 0;
+ pool_buf->base.base.size = bufSize;
+ pool_buf->base.vtbl = &pool_buffer_vtbl;
+ pool_buf->mgr = pool;
+ pool_buf->start = i * bufSize;
+ LIST_ADDTAIL(&pool_buf->head, &pool->free);
+ pool_buf++;
+ }
+
+ return SUPER(pool);
+
+failure:
+ if(pool->bufs)
+ FREE(pool->bufs);
+ if(pool->map)
+ pb_unmap(pool->buffer);
+ if(pool->buffer)
+ pb_reference(&pool->buffer, NULL);
+ if(pool)
+ FREE(pool);
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
new file mode 100644
index 0000000000..9b9fedccb4
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
@@ -0,0 +1,584 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, FREE of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * S-lab pool implementation.
+ *
+ * @sa http://en.wikipedia.org/wiki/Slab_allocation
+ *
+ * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_thread.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+#include "util/u_time.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+struct pb_slab;
+
+
+/**
+ * Buffer in a slab.
+ *
+ * Sub-allocation of a contiguous buffer.
+ */
+struct pb_slab_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_slab *slab;
+
+ struct list_head head;
+
+ unsigned mapCount;
+
+ /** Offset relative to the start of the slab buffer. */
+ size_t start;
+
+ /** Use when validating, to signal that all mappings are finished */
+ /* TODO: Actually validation does not reach this stage yet */
+ pipe_condvar event;
+};
+
+
+/**
+ * Slab -- a contiguous piece of memory.
+ */
+struct pb_slab
+{
+ struct list_head head;
+ struct list_head freeBuffers;
+ size_t numBuffers;
+ size_t numFree;
+
+ struct pb_slab_buffer *buffers;
+ struct pb_slab_manager *mgr;
+
+ /** Buffer from the provider */
+ struct pb_buffer *bo;
+
+ void *virtual;
+};
+
+
+/**
+ * It adds/removes slabs as needed in order to meet the allocation/destruction
+ * of individual buffers.
+ */
+struct pb_slab_manager
+{
+ struct pb_manager base;
+
+ /** From where we get our buffers */
+ struct pb_manager *provider;
+
+ /** Size of the buffers we hand on downstream */
+ size_t bufSize;
+
+ /** Size of the buffers we request upstream */
+ size_t slabSize;
+
+ /**
+ * Alignment, usage to be used to allocate the slab buffers.
+ *
+ * We can only provide buffers which are consistent (in alignment, usage)
+ * with this description.
+ */
+ struct pb_desc desc;
+
+ /**
+ * Partial slabs
+ *
+ * Full slabs are not stored in any list. Empty slabs are destroyed
+ * immediatly.
+ */
+ struct list_head slabs;
+
+ pipe_mutex mutex;
+};
+
+
+/**
+ * Wrapper around several slabs, therefore capable of handling buffers of
+ * multiple sizes.
+ *
+ * This buffer manager just dispatches buffer allocations to the appropriate slab
+ * manager, according to the requested buffer size, or by passes the slab
+ * managers altogether for even greater sizes.
+ *
+ * The data of this structure remains constant after
+ * initialization and thus needs no mutex protection.
+ */
+struct pb_slab_range_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+
+ size_t minBufSize;
+ size_t maxBufSize;
+
+ /** @sa pb_slab_manager::desc */
+ struct pb_desc desc;
+
+ unsigned numBuckets;
+ size_t *bucketSizes;
+
+ /** Array of pb_slab_manager, one for each bucket size */
+ struct pb_manager **buckets;
+};
+
+
+static INLINE struct pb_slab_buffer *
+pb_slab_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pb_slab_buffer *)buf;
+}
+
+
+static INLINE struct pb_slab_manager *
+pb_slab_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_slab_manager *)mgr;
+}
+
+
+static INLINE struct pb_slab_range_manager *
+pb_slab_range_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_slab_range_manager *)mgr;
+}
+
+
+/**
+ * Delete a buffer from the slab delayed list and put
+ * it on the slab FREE list.
+ */
+static void
+pb_slab_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ struct pb_slab *slab = buf->slab;
+ struct pb_slab_manager *mgr = slab->mgr;
+ struct list_head *list = &buf->head;
+
+ pipe_mutex_lock(mgr->mutex);
+
+ assert(buf->base.base.refcount == 0);
+
+ buf->mapCount = 0;
+
+ LIST_DEL(list);
+ LIST_ADDTAIL(list, &slab->freeBuffers);
+ slab->numFree++;
+
+ if (slab->head.next == &slab->head)
+ LIST_ADDTAIL(&slab->head, &mgr->slabs);
+
+ /* If the slab becomes totally empty, free it */
+ if (slab->numFree == slab->numBuffers) {
+ list = &slab->head;
+ LIST_DELINIT(list);
+ pb_reference(&slab->bo, NULL);
+ FREE(slab->buffers);
+ FREE(slab);
+ }
+
+ pipe_mutex_unlock(mgr->mutex);
+}
+
+
+static void *
+pb_slab_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+
+ ++buf->mapCount;
+ return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
+}
+
+
+static void
+pb_slab_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+
+ --buf->mapCount;
+ if (buf->mapCount == 0)
+ pipe_condvar_broadcast(buf->event);
+}
+
+
+static enum pipe_error
+pb_slab_buffer_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ return pb_validate(buf->slab->bo, vl, flags);
+}
+
+
+static void
+pb_slab_buffer_fence(struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ pb_fence(buf->slab->bo, fence);
+}
+
+
+static void
+pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ pb_get_base_buffer(buf->slab->bo, base_buf, offset);
+ *offset += buf->start;
+}
+
+
+static const struct pb_vtbl
+pb_slab_buffer_vtbl = {
+ pb_slab_buffer_destroy,
+ pb_slab_buffer_map,
+ pb_slab_buffer_unmap,
+ pb_slab_buffer_validate,
+ pb_slab_buffer_fence,
+ pb_slab_buffer_get_base_buffer
+};
+
+
+/**
+ * Create a new slab.
+ *
+ * Called when we ran out of free slabs.
+ */
+static enum pipe_error
+pb_slab_create(struct pb_slab_manager *mgr)
+{
+ struct pb_slab *slab;
+ struct pb_slab_buffer *buf;
+ unsigned numBuffers;
+ unsigned i;
+ enum pipe_error ret;
+
+ slab = CALLOC_STRUCT(pb_slab);
+ if (!slab)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
+ if(!slab->bo) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out_err0;
+ }
+
+ /* Note down the slab virtual address. All mappings are accessed directly
+ * through this address so it is required that the buffer is pinned. */
+ slab->virtual = pb_map(slab->bo,
+ PIPE_BUFFER_USAGE_CPU_READ |
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(!slab->virtual) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out_err1;
+ }
+ pb_unmap(slab->bo);
+
+ numBuffers = slab->bo->base.size / mgr->bufSize;
+
+ slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
+ if (!slab->buffers) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out_err1;
+ }
+
+ LIST_INITHEAD(&slab->head);
+ LIST_INITHEAD(&slab->freeBuffers);
+ slab->numBuffers = numBuffers;
+ slab->numFree = 0;
+ slab->mgr = mgr;
+
+ buf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ buf->base.base.refcount = 0;
+ buf->base.base.size = mgr->bufSize;
+ buf->base.base.alignment = 0;
+ buf->base.base.usage = 0;
+ buf->base.vtbl = &pb_slab_buffer_vtbl;
+ buf->slab = slab;
+ buf->start = i* mgr->bufSize;
+ buf->mapCount = 0;
+ pipe_condvar_init(buf->event);
+ LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
+ slab->numFree++;
+ buf++;
+ }
+
+ /* Add this slab to the list of partial slabs */
+ LIST_ADDTAIL(&slab->head, &mgr->slabs);
+
+ return PIPE_OK;
+
+out_err1:
+ pb_reference(&slab->bo, NULL);
+out_err0:
+ FREE(slab);
+ return ret;
+}
+
+
+static struct pb_buffer *
+pb_slab_manager_create_buffer(struct pb_manager *_mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
+ static struct pb_slab_buffer *buf;
+ struct pb_slab *slab;
+ struct list_head *list;
+
+ /* check size */
+ assert(size <= mgr->bufSize);
+ if(size > mgr->bufSize)
+ return NULL;
+
+ /* check if we can provide the requested alignment */
+ assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
+ if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
+ return NULL;
+ assert(pb_check_alignment(desc->alignment, mgr->bufSize));
+ if(!pb_check_alignment(desc->alignment, mgr->bufSize))
+ return NULL;
+
+ assert(pb_check_usage(desc->usage, mgr->desc.usage));
+ if(!pb_check_usage(desc->usage, mgr->desc.usage))
+ return NULL;
+
+ pipe_mutex_lock(mgr->mutex);
+
+ /* Create a new slab, if we run out of partial slabs */
+ if (mgr->slabs.next == &mgr->slabs) {
+ (void) pb_slab_create(mgr);
+ if (mgr->slabs.next == &mgr->slabs) {
+ pipe_mutex_unlock(mgr->mutex);
+ return NULL;
+ }
+ }
+
+ /* Allocate the buffer from a partial (or just created) slab */
+ list = mgr->slabs.next;
+ slab = LIST_ENTRY(struct pb_slab, list, head);
+
+ /* If totally full remove from the partial slab list */
+ if (--slab->numFree == 0)
+ LIST_DELINIT(list);
+
+ list = slab->freeBuffers.next;
+ LIST_DELINIT(list);
+
+ pipe_mutex_unlock(mgr->mutex);
+ buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
+
+ ++buf->base.base.refcount;
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+
+ return &buf->base;
+}
+
+
+static void
+pb_slab_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
+
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_slab_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
+
+ /* TODO: cleanup all allocated buffers */
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_slab_manager_create(struct pb_manager *provider,
+ size_t bufSize,
+ size_t slabSize,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_manager *mgr;
+
+ mgr = CALLOC_STRUCT(pb_slab_manager);
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_slab_manager_destroy;
+ mgr->base.create_buffer = pb_slab_manager_create_buffer;
+ mgr->base.flush = pb_slab_manager_flush;
+
+ mgr->provider = provider;
+ mgr->bufSize = bufSize;
+ mgr->slabSize = slabSize;
+ mgr->desc = *desc;
+
+ LIST_INITHEAD(&mgr->slabs);
+
+ pipe_mutex_init(mgr->mutex);
+
+ return &mgr->base;
+}
+
+
+static struct pb_buffer *
+pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
+ size_t bufSize;
+ unsigned i;
+
+ bufSize = mgr->minBufSize;
+ for (i = 0; i < mgr->numBuckets; ++i) {
+ if(bufSize >= size)
+ return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
+ bufSize *= 2;
+ }
+
+ /* Fall back to allocate a buffer object directly from the provider. */
+ return mgr->provider->create_buffer(mgr->provider, size, desc);
+}
+
+
+static void
+pb_slab_range_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
+
+ /* Individual slabs don't hold any temporary buffers so no need to call them */
+
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_slab_range_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
+ unsigned i;
+
+ for (i = 0; i < mgr->numBuckets; ++i)
+ mgr->buckets[i]->destroy(mgr->buckets[i]);
+ FREE(mgr->buckets);
+ FREE(mgr->bucketSizes);
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_slab_range_manager_create(struct pb_manager *provider,
+ size_t minBufSize,
+ size_t maxBufSize,
+ size_t slabSize,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_range_manager *mgr;
+ size_t bufSize;
+ unsigned i;
+
+ if(!provider)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_slab_range_manager);
+ if (!mgr)
+ goto out_err0;
+
+ mgr->base.destroy = pb_slab_range_manager_destroy;
+ mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
+ mgr->base.flush = pb_slab_range_manager_flush;
+
+ mgr->provider = provider;
+ mgr->minBufSize = minBufSize;
+ mgr->maxBufSize = maxBufSize;
+
+ mgr->numBuckets = 1;
+ bufSize = minBufSize;
+ while(bufSize < maxBufSize) {
+ bufSize *= 2;
+ ++mgr->numBuckets;
+ }
+
+ mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
+ if (!mgr->buckets)
+ goto out_err1;
+
+ bufSize = minBufSize;
+ for (i = 0; i < mgr->numBuckets; ++i) {
+ mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
+ if(!mgr->buckets[i])
+ goto out_err2;
+ bufSize *= 2;
+ }
+
+ return &mgr->base;
+
+out_err2:
+ for (i = 0; i < mgr->numBuckets; ++i)
+ if(mgr->buckets[i])
+ mgr->buckets[i]->destroy(mgr->buckets[i]);
+ FREE(mgr->buckets);
+out_err1:
+ FREE(mgr);
+out_err0:
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_validate.c b/src/gallium/auxiliary/pipebuffer/pb_validate.c
new file mode 100644
index 0000000000..94532bb4ce
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_validate.c
@@ -0,0 +1,193 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Buffer validation.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+#include "util/u_memory.h"
+#include "pipe/p_debug.h"
+
+#include "pb_buffer.h"
+#include "pb_buffer_fenced.h"
+#include "pb_validate.h"
+
+
+#define PB_VALIDATE_INITIAL_SIZE 1 /* 512 */
+
+
+struct pb_validate_entry
+{
+ struct pb_buffer *buf;
+ unsigned flags;
+};
+
+
+struct pb_validate
+{
+ struct pb_validate_entry *entries;
+ unsigned used;
+ unsigned size;
+};
+
+
+enum pipe_error
+pb_validate_add_buffer(struct pb_validate *vl,
+ struct pb_buffer *buf,
+ unsigned flags)
+{
+ assert(buf);
+ if(!buf)
+ return PIPE_ERROR;
+
+ assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
+ assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
+ flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+
+ /* We only need to store one reference for each buffer, so avoid storing
+ * consecutive references for the same buffer. It might not be the most
+ * common pattern, but it is easy to implement.
+ */
+ if(vl->used && vl->entries[vl->used - 1].buf == buf) {
+ vl->entries[vl->used - 1].flags |= flags;
+ return PIPE_OK;
+ }
+
+ /* Grow the table */
+ if(vl->used == vl->size) {
+ unsigned new_size;
+ struct pb_validate_entry *new_entries;
+
+ new_size = vl->size * 2;
+ if(!new_size)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ new_entries = (struct pb_validate_entry *)REALLOC(vl->entries,
+ vl->size*sizeof(struct pb_validate_entry),
+ new_size*sizeof(struct pb_validate_entry));
+ if(!new_entries)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memset(new_entries + vl->size, 0, (new_size - vl->size)*sizeof(struct pb_validate_entry));
+
+ vl->size = new_size;
+ vl->entries = new_entries;
+ }
+
+ assert(!vl->entries[vl->used].buf);
+ pb_reference(&vl->entries[vl->used].buf, buf);
+ vl->entries[vl->used].flags = flags;
+ ++vl->used;
+
+ return PIPE_OK;
+}
+
+
+enum pipe_error
+pb_validate_foreach(struct pb_validate *vl,
+ enum pipe_error (*callback)(struct pb_buffer *buf, void *data),
+ void *data)
+{
+ unsigned i;
+ for(i = 0; i < vl->used; ++i) {
+ enum pipe_error ret;
+ ret = callback(vl->entries[i].buf, data);
+ if(ret != PIPE_OK)
+ return ret;
+ }
+ return PIPE_OK;
+}
+
+
+enum pipe_error
+pb_validate_validate(struct pb_validate *vl)
+{
+ unsigned i;
+
+ for(i = 0; i < vl->used; ++i) {
+ enum pipe_error ret;
+ ret = pb_validate(vl->entries[i].buf, vl, vl->entries[i].flags);
+ if(ret != PIPE_OK) {
+ while(i--)
+ pb_validate(vl->entries[i].buf, NULL, 0);
+ return ret;
+ }
+ }
+
+ return PIPE_OK;
+}
+
+
+void
+pb_validate_fence(struct pb_validate *vl,
+ struct pipe_fence_handle *fence)
+{
+ unsigned i;
+ for(i = 0; i < vl->used; ++i) {
+ pb_fence(vl->entries[i].buf, fence);
+ pb_reference(&vl->entries[i].buf, NULL);
+ }
+ vl->used = 0;
+}
+
+
+void
+pb_validate_destroy(struct pb_validate *vl)
+{
+ unsigned i;
+ for(i = 0; i < vl->used; ++i)
+ pb_reference(&vl->entries[i].buf, NULL);
+ FREE(vl->entries);
+ FREE(vl);
+}
+
+
+struct pb_validate *
+pb_validate_create()
+{
+ struct pb_validate *vl;
+
+ vl = CALLOC_STRUCT(pb_validate);
+ if(!vl)
+ return NULL;
+
+ vl->size = PB_VALIDATE_INITIAL_SIZE;
+ vl->entries = (struct pb_validate_entry *)CALLOC(vl->size, sizeof(struct pb_validate_entry));
+ if(!vl->entries) {
+ FREE(vl);
+ return NULL;
+ }
+
+ return vl;
+}
+
diff --git a/src/gallium/auxiliary/pipebuffer/pb_validate.h b/src/gallium/auxiliary/pipebuffer/pb_validate.h
new file mode 100644
index 0000000000..dfb84df1ce
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_validate.h
@@ -0,0 +1,97 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Buffer validation.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_VALIDATE_H_
+#define PB_VALIDATE_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pb_buffer;
+struct pipe_fence_handle;
+
+
+/**
+ * Buffer validation list.
+ *
+ * It holds a list of buffers to be validated and fenced when flushing.
+ */
+struct pb_validate;
+
+
+enum pipe_error
+pb_validate_add_buffer(struct pb_validate *vl,
+ struct pb_buffer *buf,
+ unsigned flags);
+
+enum pipe_error
+pb_validate_foreach(struct pb_validate *vl,
+ enum pipe_error (*callback)(struct pb_buffer *buf, void *data),
+ void *data);
+
+/**
+ * Validate all buffers for hardware access.
+ *
+ * Should be called right before issuing commands to the hardware.
+ */
+enum pipe_error
+pb_validate_validate(struct pb_validate *vl);
+
+/**
+ * Fence all buffers and clear the list.
+ *
+ * Should be called right after issuing commands to the hardware.
+ */
+void
+pb_validate_fence(struct pb_validate *vl,
+ struct pipe_fence_handle *fence);
+
+struct pb_validate *
+pb_validate_create(void);
+
+void
+pb_validate_destroy(struct pb_validate *vl);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*PB_VALIDATE_H_*/