summaryrefslogtreecommitdiff
path: root/src/gallium/auxiliary/pipebuffer
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/auxiliary/pipebuffer')
-rw-r--r--src/gallium/auxiliary/pipebuffer/Makefile23
-rw-r--r--src/gallium/auxiliary/pipebuffer/linked_list.h91
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer.h202
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c299
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h117
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c127
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr.h126
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c131
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c593
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c288
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_winsys.c170
11 files changed, 2167 insertions, 0 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/Makefile b/src/gallium/auxiliary/pipebuffer/Makefile
new file mode 100644
index 0000000000..588629e870
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/Makefile
@@ -0,0 +1,23 @@
+
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = pipebuffer
+
+DRIVER_SOURCES = \
+ pb_buffer_fenced.c \
+ pb_buffer_malloc.c \
+ pb_bufmgr_fenced.c \
+ pb_bufmgr_mm.c \
+ pb_bufmgr_pool.c \
+ pb_winsys.c
+
+C_SOURCES = \
+ $(DRIVER_SOURCES)
+
+ASM_SOURCES =
+
+include ../../Makefile.template
+
+symlinks:
+
diff --git a/src/gallium/auxiliary/pipebuffer/linked_list.h b/src/gallium/auxiliary/pipebuffer/linked_list.h
new file mode 100644
index 0000000000..e99817fb13
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/linked_list.h
@@ -0,0 +1,91 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * List macros heavily inspired by the Linux kernel
+ * list handling. No list looping yet.
+ *
+ * Is not threadsafe, so common operations need to
+ * be protected using an external mutex.
+ */
+
+#ifndef LINKED_LIST_H_
+#define LINKED_LIST_H_
+
+
+#include <stddef.h>
+
+
+struct list_head
+{
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+
+#define LIST_INITHEAD(__item) \
+ do { \
+ (__item)->prev = (__item); \
+ (__item)->next = (__item); \
+ } while (0)
+
+#define LIST_ADD(__item, __list) \
+ do { \
+ (__item)->prev = (__list); \
+ (__item)->next = (__list)->next; \
+ (__list)->next->prev = (__item); \
+ (__list)->next = (__item); \
+ } while (0)
+
+#define LIST_ADDTAIL(__item, __list) \
+ do { \
+ (__item)->next = (__list); \
+ (__item)->prev = (__list)->prev; \
+ (__list)->prev->next = (__item); \
+ (__list)->prev = (__item); \
+ } while(0)
+
+#define LIST_DEL(__item) \
+ do { \
+ (__item)->prev->next = (__item)->next; \
+ (__item)->next->prev = (__item)->prev; \
+ } while(0)
+
+#define LIST_DELINIT(__item) \
+ do { \
+ (__item)->prev->next = (__item)->next; \
+ (__item)->next->prev = (__item)->prev; \
+ (__item)->next = (__item); \
+ (__item)->prev = (__item); \
+ } while(0)
+
+#define LIST_ENTRY(__type, __item, __field) \
+ ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
+
+
+#endif /*LINKED_LIST_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer.h b/src/gallium/auxiliary/pipebuffer/pb_buffer.h
new file mode 100644
index 0000000000..97beb5f72a
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer.h
@@ -0,0 +1,202 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Generic code for buffers.
+ *
+ * Behind a pipe buffle handle there can be DMA buffers, client (or user)
+ * buffers, regular malloced buffers, etc. This file provides an abstract base
+ * buffer handle that allows the driver to cope with all those kinds of buffers
+ * in a more flexible way.
+ *
+ * There is no obligation of a winsys driver to use this library. And a pipe
+ * driver should be completly agnostic about it.
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_BUFFER_H_
+#define PB_BUFFER_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+
+struct pb_vtbl;
+
+/**
+ * Buffer description.
+ *
+ * Used when allocating the buffer.
+ */
+struct pb_desc
+{
+ unsigned alignment;
+ unsigned usage;
+};
+
+
+/**
+ * Base class for all pb_* buffers.
+ */
+struct pb_buffer
+{
+ struct pipe_buffer base;
+
+ /**
+ * Pointer to the virtual function table.
+ *
+ * Avoid accessing this table directly. Use the inline functions below
+ * instead to avoid mistakes.
+ */
+ const struct pb_vtbl *vtbl;
+};
+
+
+/**
+ * Virtual function table for the buffer storage operations.
+ *
+ * Note that creation is not done through this table.
+ */
+struct pb_vtbl
+{
+ void (*destroy)( struct pb_buffer *buf );
+
+ /**
+ * Map the entire data store of a buffer object into the client's address.
+ * flags is bitmask of PIPE_BUFFER_FLAG_READ/WRITE.
+ */
+ void *(*map)( struct pb_buffer *buf,
+ unsigned flags );
+
+ void (*unmap)( struct pb_buffer *buf );
+
+ /**
+ * Get the base buffer and the offset.
+ *
+ * A buffer can be subdivided in smaller buffers. This method should return
+ * the underlaying buffer, and the relative offset.
+ *
+ * Buffers without an underlaying base buffer should return themselves, with
+ * a zero offset.
+ *
+ * Note that this will increase the reference count of the base buffer.
+ */
+ void (*get_base_buffer)( struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset );
+};
+
+
+static INLINE struct pipe_buffer *
+pb_pipe_buffer( struct pb_buffer *pbuf )
+{
+ assert(pbuf);
+ return &pbuf->base;
+}
+
+
+static INLINE struct pb_buffer *
+pb_buffer( struct pipe_buffer *buf )
+{
+ assert(buf);
+ /* Could add a magic cookie check on debug builds.
+ */
+ return (struct pb_buffer *)buf;
+}
+
+
+/* Accessor functions for pb->vtbl:
+ */
+static INLINE void *
+pb_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ assert(buf);
+ return buf->vtbl->map(buf, flags);
+}
+
+
+static INLINE void
+pb_unmap(struct pb_buffer *buf)
+{
+ assert(buf);
+ buf->vtbl->unmap(buf);
+}
+
+
+static INLINE void
+pb_get_base_buffer( struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset )
+{
+ buf->vtbl->get_base_buffer(buf, base_buf, offset);
+}
+
+
+static INLINE void
+pb_destroy(struct pb_buffer *buf)
+{
+ assert(buf);
+ buf->vtbl->destroy(buf);
+}
+
+
+/* XXX: thread safety issues!
+ */
+static INLINE void
+pb_reference(struct pb_buffer **dst,
+ struct pb_buffer *src)
+{
+ if (src)
+ src->base.refcount++;
+
+ if (*dst && --(*dst)->base.refcount == 0)
+ pb_destroy( *dst );
+
+ *dst = src;
+}
+
+
+/**
+ * Malloc-based buffer to store data that can't be used by the graphics
+ * hardware.
+ */
+struct pb_buffer *
+pb_malloc_buffer_create(size_t size,
+ const struct pb_desc *desc);
+
+
+void
+pb_init_winsys(struct pipe_winsys *winsys);
+
+
+#endif /*PB_BUFFER_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
new file mode 100644
index 0000000000..f4fc3f6d71
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -0,0 +1,299 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Implementation of fenced buffers.
+ *
+ * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+
+#include "linked_list.h"
+
+#include "p_compiler.h"
+#include "p_debug.h"
+#include "p_winsys.h"
+#include "p_thread.h"
+#include "p_util.h"
+
+#include "pb_buffer.h"
+#include "pb_buffer_fenced.h"
+
+#ifndef __MSC__
+#include <unistd.h>
+#endif
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct fenced_buffer_list
+{
+ _glthread_Mutex mutex;
+
+ struct pipe_winsys *winsys;
+
+ size_t numDelayed;
+ size_t checkDelayed;
+
+ struct list_head delayed;
+};
+
+
+/**
+ * Wrapper around a pipe buffer which adds fencing and reference counting.
+ */
+struct fenced_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_buffer *buffer;
+
+ struct pipe_fence_handle *fence;
+
+ struct list_head head;
+ struct fenced_buffer_list *list;
+};
+
+
+static INLINE struct fenced_buffer *
+fenced_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ assert(buf->vtbl == &fenced_buffer_vtbl);
+ return (struct fenced_buffer *)buf;
+}
+
+
+
+
+static void
+_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait)
+{
+ struct pipe_winsys *winsys = fenced_list->winsys;
+ struct fenced_buffer *fenced_buf;
+ struct list_head *list, *prev;
+ int signaled = -1;
+
+ list = fenced_list->delayed.next;
+
+ if (fenced_list->numDelayed > 3) {
+ unsigned i;
+
+ for (i = 0; i < fenced_list->numDelayed; i += 3) {
+ list = list->next;
+ }
+ }
+
+ prev = list->prev;
+ for (; list != &fenced_list->delayed; list = prev, prev = list->prev) {
+
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, list, head);
+
+ if (signaled != 0) {
+ if (wait) {
+ signaled = winsys->fence_finish(winsys, fenced_buf->fence, 0);
+ }
+ else {
+ signaled = winsys->fence_signalled(winsys, fenced_buf->fence, 0);
+ }
+ }
+
+ if (signaled != 0)
+ /* XXX: we are assuming that buffers are freed in the same order they
+ * are fenced which may not always be true...
+ */
+ break;
+
+ winsys->fence_reference(winsys, &fenced_buf->fence, NULL);
+
+ LIST_DEL(list);
+ fenced_list->numDelayed--;
+
+ /* Do the delayed destroy:
+ */
+ pb_reference(&fenced_buf->buffer, NULL);
+ FREE(fenced_buf);
+ }
+}
+
+
+static void
+fenced_buffer_destroy(struct pb_buffer *buf)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+
+ if (fenced_buf->fence) {
+ LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
+ fenced_list->numDelayed++;
+ }
+ else {
+ pb_reference(&fenced_buf->buffer, NULL);
+ FREE(fenced_buf);
+ }
+
+ if ((fenced_list->numDelayed % fenced_list->checkDelayed) == 0)
+ _fenced_buffer_list_check_free(fenced_list, 0);
+}
+
+
+static void *
+fenced_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ return pb_map(fenced_buf->buffer, flags);
+}
+
+
+static void
+fenced_buffer_unmap(struct pb_buffer *buf)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ pb_unmap(fenced_buf->buffer);
+}
+
+
+static void
+fenced_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
+}
+
+
+const struct pb_vtbl
+fenced_buffer_vtbl = {
+ fenced_buffer_destroy,
+ fenced_buffer_map,
+ fenced_buffer_unmap,
+ fenced_buffer_get_base_buffer
+};
+
+
+struct pb_buffer *
+fenced_buffer_create(struct fenced_buffer_list *fenced_list,
+ struct pb_buffer *buffer)
+{
+ struct fenced_buffer *buf;
+
+ if(!buffer)
+ return NULL;
+
+ buf = CALLOC_STRUCT(fenced_buffer);
+ if(!buf)
+ return NULL;
+
+ buf->base.base.refcount = 1;
+ buf->base.base.alignment = buffer->base.alignment;
+ buf->base.base.usage = buffer->base.usage;
+ buf->base.base.size = buffer->base.size;
+
+ buf->base.vtbl = &fenced_buffer_vtbl;
+ buf->buffer = buffer;
+ buf->list = fenced_list;
+
+ return &buf->base;
+}
+
+
+void
+buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+ struct pipe_winsys *winsys = fenced_list->winsys;
+
+ _glthread_LOCK_MUTEX(fenced_list->mutex);
+ winsys->fence_reference(winsys, &fenced_buf->fence, fence);
+ _glthread_UNLOCK_MUTEX(fenced_list->mutex);
+}
+
+
+struct fenced_buffer_list *
+fenced_buffer_list_create(struct pipe_winsys *winsys)
+{
+ struct fenced_buffer_list *fenced_list;
+
+ fenced_list = (struct fenced_buffer_list *)CALLOC(1, sizeof(*fenced_list));
+ if (!fenced_list)
+ return NULL;
+
+ fenced_list->winsys = winsys;
+
+ LIST_INITHEAD(&fenced_list->delayed);
+
+ fenced_list->numDelayed = 0;
+
+ /* TODO: don't hard code this */
+ fenced_list->checkDelayed = 5;
+
+ _glthread_INIT_MUTEX(fenced_list->mutex);
+
+ return fenced_list;
+}
+
+
+void
+fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait)
+{
+ _glthread_LOCK_MUTEX(fenced_list->mutex);
+ _fenced_buffer_list_check_free(fenced_list, wait);
+ _glthread_UNLOCK_MUTEX(fenced_list->mutex);
+}
+
+
+void
+fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
+{
+ _glthread_LOCK_MUTEX(fenced_list->mutex);
+
+ /* Wait on outstanding fences */
+ while (fenced_list->numDelayed) {
+ _glthread_UNLOCK_MUTEX(fenced_list->mutex);
+ sched_yield();
+ _fenced_buffer_list_check_free(fenced_list, 1);
+ _glthread_LOCK_MUTEX(fenced_list->mutex);
+ }
+
+ _glthread_UNLOCK_MUTEX(fenced_list->mutex);
+
+ FREE(fenced_list);
+}
+
+
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h
new file mode 100644
index 0000000000..c40b9c75e1
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer fencing.
+ *
+ * "Fenced buffers" is actually a misnomer. They should be referred as
+ * "fenceable buffers", i.e, buffers that can be fenced, but I couldn't find
+ * the word "fenceable" in the dictionary.
+ *
+ * A "fenced buffer" is a decorator around a normal buffer, which adds two
+ * special properties:
+ * - the ability for the destruction to be delayed by a fence;
+ * - reference counting.
+ *
+ * Usually DMA buffers have a life-time that will extend the life-time of its
+ * handle. The end-of-life is dictated by the fence signalling.
+ *
+ * Between the handle's destruction, and the fence signalling, the buffer is
+ * stored in a fenced buffer list.
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_BUFFER_FENCED_H_
+#define PB_BUFFER_FENCED_H_
+
+
+#include "pipe/p_debug.h"
+
+
+struct pipe_winsys;
+struct pipe_buffer;
+struct pipe_fence_handle;
+
+
+/**
+ * List of buffers which are awaiting fence signalling.
+ */
+struct fenced_buffer_list;
+
+
+/**
+ * The fenced buffer's virtual function table.
+ *
+ * NOTE: Made public for debugging purposes.
+ */
+extern const struct pb_vtbl fenced_buffer_vtbl;
+
+
+/**
+ * Create a fenced buffer list.
+ *
+ * See also fenced_bufmgr_create for a more convenient way to use this.
+ */
+struct fenced_buffer_list *
+fenced_buffer_list_create(struct pipe_winsys *winsys);
+
+
+/**
+ * Walk the fenced buffer list to check and free signalled buffers.
+ */
+void
+fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait);
+
+void
+fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list);
+
+
+/**
+ * Wrap a buffer in a fenced buffer.
+ *
+ * NOTE: this will not increase the buffer reference count.
+ */
+struct pb_buffer *
+fenced_buffer_create(struct fenced_buffer_list *fenced,
+ struct pb_buffer *buffer);
+
+
+/**
+ * Set a buffer's fence.
+ *
+ * NOTE: Although it takes a generic pb_buffer argument, it will fail
+ * on everything but buffers returned by fenced_buffer_create.
+ */
+void
+buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence);
+
+
+#endif /*PB_BUFFER_FENCED_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c
new file mode 100644
index 0000000000..9e8244f909
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c
@@ -0,0 +1,127 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Implementation of malloc-based buffers to store data that can't be processed
+ * by the hardware.
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_debug.h"
+#include "pipe/p_util.h"
+#include "pb_buffer.h"
+
+
+struct malloc_buffer
+{
+ struct pb_buffer base;
+ void *data;
+};
+
+
+extern const struct pb_vtbl malloc_buffer_vtbl;
+
+static INLINE struct malloc_buffer *
+malloc_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ assert(buf->vtbl == &malloc_buffer_vtbl);
+ return (struct malloc_buffer *)buf;
+}
+
+
+static void
+malloc_buffer_destroy(struct pb_buffer *buf)
+{
+ align_free(malloc_buffer(buf)->data);
+ FREE(buf);
+}
+
+
+static void *
+malloc_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ return malloc_buffer(buf)->data;
+}
+
+
+static void
+malloc_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static void
+malloc_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ *base_buf = buf;
+ *offset = 0;
+}
+
+
+const struct pb_vtbl
+malloc_buffer_vtbl = {
+ malloc_buffer_destroy,
+ malloc_buffer_map,
+ malloc_buffer_unmap,
+ malloc_buffer_get_base_buffer
+};
+
+
+struct pb_buffer *
+pb_malloc_buffer_create(size_t size,
+ const struct pb_desc *desc)
+{
+ struct malloc_buffer *buf;
+
+ /* TODO: do a single allocation */
+
+ buf = CALLOC_STRUCT(malloc_buffer);
+ if(!buf)
+ return NULL;
+
+ buf->base.base.refcount = 1;
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+ buf->base.base.size = size;
+ buf->base.vtbl = &malloc_buffer_vtbl;
+
+ buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment);
+ if(!buf->data) {
+ align_free(buf);
+ return NULL;
+ }
+
+ return &buf->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
new file mode 100644
index 0000000000..1ddf784c97
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
@@ -0,0 +1,126 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer management.
+ *
+ * A buffer manager does only one basic thing: it creates buffers. Actually,
+ * "buffer factory" would probably a more accurate description.
+ *
+ * You can chain buffer managers so that you can have a finer grained memory
+ * management and pooling.
+ *
+ * For example, for a simple batch buffer manager you would chain:
+ * - the native buffer manager, which provides DMA memory from the graphics
+ * memory space;
+ * - the pool buffer manager, which keep around a pool of equally sized buffers
+ * to avoid latency associated with the native buffer manager;
+ * - the fenced buffer manager, which will delay buffer destruction until the
+ * the moment the card finishing processing it.
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_BUFMGR_H_
+#define PB_BUFMGR_H_
+
+
+#include <stddef.h>
+
+
+struct pb_desc;
+struct pipe_buffer;
+struct pipe_winsys;
+
+
+/**
+ * Abstract base class for all buffer managers.
+ */
+struct pb_manager
+{
+ /* XXX: we will likely need more allocation flags */
+ struct pb_buffer *
+ (*create_buffer)( struct pb_manager *mgr,
+ size_t size,
+ const struct pb_desc *desc);
+
+ void
+ (*destroy)( struct pb_manager *mgr );
+};
+
+
+/**
+ * Static buffer pool manager.
+ *
+ * Manages the allocation of equally sized buffers. It does so by allocating
+ * a single big buffer and divide it equally sized buffers.
+ *
+ * It is meant to manage the allocation of batch buffer pools.
+ */
+struct pb_manager *
+pool_bufmgr_create(struct pb_manager *provider,
+ size_t n, size_t size,
+ const struct pb_desc *desc);
+
+
+/**
+ * Wraper around the old memory manager.
+ *
+ * It managers buffers of different sizes. It does so by allocating a buffer
+ * with the size of the heap, and then using the old mm memory manager to manage
+ * that heap.
+ */
+struct pb_manager *
+mm_bufmgr_create(struct pb_manager *provider,
+ size_t size, size_t align2);
+
+/**
+ * Same as mm_bufmgr_create.
+ *
+ * Buffer will be release when the manager is destroyed.
+ */
+struct pb_manager *
+mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
+ size_t size, size_t align2);
+
+
+/**
+ * Fenced buffer manager.
+ *
+ * This manager is just meant for convenience. It wraps the buffers returned
+ * by another manager in fenced buffers, so that
+ *
+ * NOTE: the buffer manager that provides the buffers will be destroyed
+ * at the same time.
+ */
+struct pb_manager *
+fenced_bufmgr_create(struct pb_manager *provider,
+ struct pipe_winsys *winsys);
+
+
+#endif /*PB_BUFMGR_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c
new file mode 100644
index 0000000000..c535d3276c
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c
@@ -0,0 +1,131 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * A buffer manager that wraps buffers in fenced buffers.
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.dot.com>
+ */
+
+
+#include "p_debug.h"
+#include "p_util.h"
+
+#include "pb_buffer.h"
+#include "pb_buffer_fenced.h"
+#include "pb_bufmgr.h"
+
+
+struct fenced_pb_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+
+ struct fenced_buffer_list *fenced_list;
+};
+
+
+static INLINE struct fenced_pb_manager *
+fenced_pb_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct fenced_pb_manager *)mgr;
+}
+
+
+static struct pb_buffer *
+fenced_bufmgr_create_buffer(struct pb_manager *mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr);
+ struct pb_buffer *buf;
+ struct pb_buffer *fenced_buf;
+
+ /* check for free buffers before allocating new ones */
+ fenced_buffer_list_check_free(fenced_mgr->fenced_list, 0);
+
+ buf = fenced_mgr->provider->create_buffer(fenced_mgr->provider, size, desc);
+ if(!buf) {
+ /* try harder to get a buffer */
+ fenced_buffer_list_check_free(fenced_mgr->fenced_list, 1);
+
+ buf = fenced_mgr->provider->create_buffer(fenced_mgr->provider, size, desc);
+ if(!buf) {
+ /* give up */
+ return NULL;
+ }
+ }
+
+ fenced_buf = fenced_buffer_create(fenced_mgr->fenced_list, buf);
+ if(!fenced_buf) {
+ assert(buf->base.refcount == 1);
+ pb_destroy(buf);
+ }
+
+ return fenced_buf;
+}
+
+
+static void
+fenced_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr);
+
+ fenced_buffer_list_destroy(fenced_mgr->fenced_list);
+
+ fenced_mgr->provider->destroy(fenced_mgr->provider);
+
+ FREE(fenced_mgr);
+}
+
+
+struct pb_manager *
+fenced_bufmgr_create(struct pb_manager *provider,
+ struct pipe_winsys *winsys)
+{
+ struct fenced_pb_manager *fenced_mgr;
+
+ fenced_mgr = (struct fenced_pb_manager *)CALLOC(1, sizeof(*fenced_mgr));
+ if (!fenced_mgr)
+ return NULL;
+
+ fenced_mgr->base.destroy = fenced_bufmgr_destroy;
+ fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
+
+ fenced_mgr->provider = provider;
+ fenced_mgr->fenced_list = fenced_buffer_list_create(winsys);
+ if(!fenced_mgr->fenced_list) {
+ FREE(fenced_mgr);
+ return NULL;
+ }
+
+ return &fenced_mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
new file mode 100644
index 0000000000..8b1b51c0e2
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
@@ -0,0 +1,593 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 1999 Wittawat Yamwong
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer manager using the old texture memory manager.
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "linked_list.h"
+
+#include "p_defines.h"
+#include "p_debug.h"
+#include "p_thread.h"
+#include "p_util.h"
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct mem_block
+{
+ struct mem_block *next, *prev;
+ struct mem_block *next_free, *prev_free;
+ struct mem_block *heap;
+ int ofs, size;
+ unsigned int free:1;
+ unsigned int reserved:1;
+};
+
+
+#ifdef DEBUG
+/**
+ * For debugging purposes.
+ */
+static void
+mmDumpMemInfo(const struct mem_block *heap)
+{
+ debug_printf("Memory heap %p:\n", (void *)heap);
+ if (heap == 0) {
+ debug_printf(" heap == 0\n");
+ } else {
+ const struct mem_block *p;
+
+ for(p = heap->next; p != heap; p = p->next) {
+ debug_printf(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
+ p->free ? 'F':'.',
+ p->reserved ? 'R':'.');
+ }
+
+ debug_printf("\nFree list:\n");
+
+ for(p = heap->next_free; p != heap; p = p->next_free) {
+ debug_printf(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
+ p->free ? 'F':'.',
+ p->reserved ? 'R':'.');
+ }
+
+ }
+ debug_printf("End of memory blocks\n");
+}
+#endif
+
+
+/**
+ * input: total size in bytes
+ * return: a heap pointer if OK, NULL if error
+ */
+static struct mem_block *
+mmInit(int ofs, int size)
+{
+ struct mem_block *heap, *block;
+
+ if (size <= 0)
+ return NULL;
+
+ heap = CALLOC_STRUCT(mem_block);
+ if (!heap)
+ return NULL;
+
+ block = CALLOC_STRUCT(mem_block);
+ if (!block) {
+ FREE(heap);
+ return NULL;
+ }
+
+ heap->next = block;
+ heap->prev = block;
+ heap->next_free = block;
+ heap->prev_free = block;
+
+ block->heap = heap;
+ block->next = heap;
+ block->prev = heap;
+ block->next_free = heap;
+ block->prev_free = heap;
+
+ block->ofs = ofs;
+ block->size = size;
+ block->free = 1;
+
+ return heap;
+}
+
+
+static struct mem_block *
+SliceBlock(struct mem_block *p,
+ int startofs, int size,
+ int reserved, int alignment)
+{
+ struct mem_block *newblock;
+
+ /* break left [p, newblock, p->next], then p = newblock */
+ if (startofs > p->ofs) {
+ newblock = CALLOC_STRUCT(mem_block);
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs;
+ newblock->size = p->size - (startofs - p->ofs);
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* break right, also [p, newblock, p->next] */
+ if (size < p->size) {
+ newblock = CALLOC_STRUCT(mem_block);
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs + size;
+ newblock->size = p->size - size;
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size = size;
+ }
+
+ /* p = middle block */
+ p->free = 0;
+
+ /* Remove p from the free list:
+ */
+ p->next_free->prev_free = p->prev_free;
+ p->prev_free->next_free = p->next_free;
+
+ p->next_free = 0;
+ p->prev_free = 0;
+
+ p->reserved = reserved;
+ return p;
+}
+
+
+/**
+ * Allocate 'size' bytes with 2^align2 bytes alignment,
+ * restrict the search to free memory after 'startSearch'
+ * depth and back buffers should be in different 4mb banks
+ * to get better page hits if possible
+ * input: size = size of block
+ * align2 = 2^align2 bytes alignment
+ * startSearch = linear offset from start of heap to begin search
+ * return: pointer to the allocated block, 0 if error
+ */
+static struct mem_block *
+mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
+{
+ struct mem_block *p;
+ const int mask = (1 << align2)-1;
+ int startofs = 0;
+ int endofs;
+
+ if (!heap || align2 < 0 || size <= 0)
+ return NULL;
+
+ for (p = heap->next_free; p != heap; p = p->next_free) {
+ assert(p->free);
+
+ startofs = (p->ofs + mask) & ~mask;
+ if ( startofs < startSearch ) {
+ startofs = startSearch;
+ }
+ endofs = startofs+size;
+ if (endofs <= (p->ofs+p->size))
+ break;
+ }
+
+ if (p == heap)
+ return NULL;
+
+ assert(p->free);
+ p = SliceBlock(p,startofs,size,0,mask+1);
+
+ return p;
+}
+
+
+#if 0
+/**
+ * Free block starts at offset
+ * input: pointer to a heap, start offset
+ * return: pointer to a block
+ */
+static struct mem_block *
+mmFindBlock(struct mem_block *heap, int start)
+{
+ struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ if (p->ofs == start)
+ return p;
+ }
+
+ return NULL;
+}
+#endif
+
+
+static INLINE int
+Join2Blocks(struct mem_block *p)
+{
+ /* XXX there should be some assertions here */
+
+ /* NOTE: heap->free == 0 */
+
+ if (p->free && p->next->free) {
+ struct mem_block *q = p->next;
+
+ assert(p->ofs + p->size == q->ofs);
+ p->size += q->size;
+
+ p->next = q->next;
+ q->next->prev = p;
+
+ q->next_free->prev_free = q->prev_free;
+ q->prev_free->next_free = q->next_free;
+
+ FREE(q);
+ return 1;
+ }
+ return 0;
+}
+
+
+/**
+ * Free block starts at offset
+ * input: pointer to a block
+ * return: 0 if OK, -1 if error
+ */
+static int
+mmFreeMem(struct mem_block *b)
+{
+ if (!b)
+ return 0;
+
+ if (b->free) {
+ debug_printf("block already free\n");
+ return -1;
+ }
+ if (b->reserved) {
+ debug_printf("block is reserved\n");
+ return -1;
+ }
+
+ b->free = 1;
+ b->next_free = b->heap->next_free;
+ b->prev_free = b->heap;
+ b->next_free->prev_free = b;
+ b->prev_free->next_free = b;
+
+ Join2Blocks(b);
+ if (b->prev != b->heap)
+ Join2Blocks(b->prev);
+
+ return 0;
+}
+
+
+/**
+ * destroy MM
+ */
+static void
+mmDestroy(struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap)
+ return;
+
+ for (p = heap->next; p != heap; ) {
+ struct mem_block *next = p->next;
+ FREE(p);
+ p = next;
+ }
+
+ FREE(heap);
+}
+
+
+struct mm_pb_manager
+{
+ struct pb_manager base;
+
+ _glthread_Mutex mutex;
+
+ size_t size;
+ struct mem_block *heap;
+
+ size_t align2;
+
+ struct pb_buffer *buffer;
+ void *map;
+};
+
+
+static INLINE struct mm_pb_manager *
+mm_pb_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct mm_pb_manager *)mgr;
+}
+
+
+struct mm_buffer
+{
+ struct pb_buffer base;
+
+ struct mm_pb_manager *mgr;
+
+ struct mem_block *block;
+};
+
+
+static INLINE struct mm_buffer *
+mm_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct mm_buffer *)buf;
+}
+
+
+static void
+mm_buffer_destroy(struct pb_buffer *buf)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+
+ assert(buf->base.refcount == 0);
+
+ _glthread_LOCK_MUTEX(mm->mutex);
+ mmFreeMem(mm_buf->block);
+ FREE(buf);
+ _glthread_UNLOCK_MUTEX(mm->mutex);
+}
+
+
+static void *
+mm_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+
+ return (unsigned char *) mm->map + mm_buf->block->ofs;
+}
+
+
+static void
+mm_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static void
+mm_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+ pb_get_base_buffer(mm->buffer, base_buf, offset);
+ *offset += mm_buf->block->ofs;
+}
+
+
+static const struct pb_vtbl
+mm_buffer_vtbl = {
+ mm_buffer_destroy,
+ mm_buffer_map,
+ mm_buffer_unmap,
+ mm_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+mm_bufmgr_create_buffer(struct pb_manager *mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct mm_pb_manager *mm = mm_pb_manager(mgr);
+ struct mm_buffer *mm_buf;
+
+ /* We don't handle alignments larger then the one initially setup */
+ assert(desc->alignment % (1 << mm->align2) == 0);
+ if(desc->alignment % (1 << mm->align2))
+ return NULL;
+
+ _glthread_LOCK_MUTEX(mm->mutex);
+
+ mm_buf = CALLOC_STRUCT(mm_buffer);
+ if (!mm_buf) {
+ _glthread_UNLOCK_MUTEX(mm->mutex);
+ return NULL;
+ }
+
+ mm_buf->base.base.refcount = 1;
+ mm_buf->base.base.alignment = desc->alignment;
+ mm_buf->base.base.usage = desc->usage;
+ mm_buf->base.base.size = size;
+
+ mm_buf->base.vtbl = &mm_buffer_vtbl;
+
+ mm_buf->mgr = mm;
+
+ mm_buf->block = mmAllocMem(mm->heap, size, mm->align2, 0);
+ if(!mm_buf->block) {
+ debug_printf("warning: heap full\n");
+#if 0
+ mmDumpMemInfo(mm->heap);
+#endif
+
+ mm_buf->block = mmAllocMem(mm->heap, size, mm->align2, 0);
+ if(!mm_buf->block) {
+ assert(0);
+ FREE(mm_buf);
+ _glthread_UNLOCK_MUTEX(mm->mutex);
+ return NULL;
+ }
+ }
+
+ /* Some sanity checks */
+ assert(0 <= mm_buf->block->ofs && mm_buf->block->ofs < mm->size);
+ assert(size <= mm_buf->block->size && mm_buf->block->ofs + mm_buf->block->size <= mm->size);
+
+ _glthread_UNLOCK_MUTEX(mm->mutex);
+ return SUPER(mm_buf);
+}
+
+
+static void
+mm_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct mm_pb_manager *mm = mm_pb_manager(mgr);
+
+ _glthread_LOCK_MUTEX(mm->mutex);
+
+ mmDestroy(mm->heap);
+
+ pb_unmap(mm->buffer);
+ pb_reference(&mm->buffer, NULL);
+
+ _glthread_UNLOCK_MUTEX(mm->mutex);
+
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
+ size_t size, size_t align2)
+{
+ struct mm_pb_manager *mm;
+
+ if(!buffer)
+ return NULL;
+
+ mm = CALLOC_STRUCT(mm_pb_manager);
+ if (!mm)
+ return NULL;
+
+ mm->base.create_buffer = mm_bufmgr_create_buffer;
+ mm->base.destroy = mm_bufmgr_destroy;
+
+ mm->size = size;
+ mm->align2 = align2; /* 64-byte alignment */
+
+ _glthread_INIT_MUTEX(mm->mutex);
+
+ mm->buffer = buffer;
+
+ mm->map = pb_map(mm->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ |
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(!mm->map)
+ goto failure;
+
+ mm->heap = mmInit(0, size);
+ if (!mm->heap)
+ goto failure;
+
+ return SUPER(mm);
+
+failure:
+if(mm->heap)
+ mmDestroy(mm->heap);
+ if(mm->map)
+ pb_unmap(mm->buffer);
+ if(mm)
+ FREE(mm);
+ return NULL;
+}
+
+
+struct pb_manager *
+mm_bufmgr_create(struct pb_manager *provider,
+ size_t size, size_t align2)
+{
+ struct pb_buffer *buffer;
+ struct pb_manager *mgr;
+ struct pb_desc desc;
+
+ assert(provider);
+ assert(provider->create_buffer);
+
+ memset(&desc, 0, sizeof(desc));
+ desc.alignment = 1 << align2;
+
+ buffer = provider->create_buffer(provider, size, &desc);
+ if (!buffer)
+ return NULL;
+
+ mgr = mm_bufmgr_create_from_buffer(buffer, size, align2);
+ if (!mgr) {
+ pb_reference(&buffer, NULL);
+ return NULL;
+ }
+
+ return mgr;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
new file mode 100644
index 0000000000..04477a865a
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
@@ -0,0 +1,288 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Batch buffer pool management.
+ *
+ * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+
+#include "linked_list.h"
+
+#include "p_compiler.h"
+#include "p_debug.h"
+#include "p_thread.h"
+#include "p_defines.h"
+#include "p_util.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct pool_pb_manager
+{
+ struct pb_manager base;
+
+ _glthread_Mutex mutex;
+
+ size_t bufSize;
+ size_t bufAlign;
+
+ size_t numFree;
+ size_t numTot;
+
+ struct list_head free;
+
+ struct pb_buffer *buffer;
+ void *map;
+
+ struct pool_buffer *bufs;
+};
+
+
+static INLINE struct pool_pb_manager *
+pool_pb_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pool_pb_manager *)mgr;
+}
+
+
+struct pool_buffer
+{
+ struct pb_buffer base;
+
+ struct pool_pb_manager *mgr;
+
+ struct list_head head;
+
+ size_t start;
+};
+
+
+static INLINE struct pool_buffer *
+pool_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pool_buffer *)buf;
+}
+
+
+
+static void
+pool_buffer_destroy(struct pb_buffer *buf)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+
+ assert(pool_buf->base.base.refcount == 0);
+
+ _glthread_LOCK_MUTEX(pool->mutex);
+ LIST_ADD(&pool_buf->head, &pool->free);
+ pool->numFree++;
+ _glthread_UNLOCK_MUTEX(pool->mutex);
+}
+
+
+static void *
+pool_buffer_map(struct pb_buffer *buf, unsigned flags)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ void *map;
+
+ _glthread_LOCK_MUTEX(pool->mutex);
+ map = (unsigned char *) pool->map + pool_buf->start;
+ _glthread_UNLOCK_MUTEX(pool->mutex);
+ return map;
+}
+
+
+static void
+pool_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static void
+pool_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ pb_get_base_buffer(pool->buffer, base_buf, offset);
+ *offset += pool_buf->start;
+}
+
+
+static const struct pb_vtbl
+pool_buffer_vtbl = {
+ pool_buffer_destroy,
+ pool_buffer_map,
+ pool_buffer_unmap,
+ pool_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+pool_bufmgr_create_buffer(struct pb_manager *mgr,
+ size_t size,
+ const struct pb_desc *desc)
+{
+ struct pool_pb_manager *pool = pool_pb_manager(mgr);
+ struct pool_buffer *pool_buf;
+ struct list_head *item;
+
+ assert(size == pool->bufSize);
+ assert(pool->bufAlign % desc->alignment == 0);
+
+ _glthread_LOCK_MUTEX(pool->mutex);
+
+ if (pool->numFree == 0) {
+ _glthread_UNLOCK_MUTEX(pool->mutex);
+ debug_printf("warning: out of fixed size buffer objects\n");
+ return NULL;
+ }
+
+ item = pool->free.next;
+
+ if (item == &pool->free) {
+ _glthread_UNLOCK_MUTEX(pool->mutex);
+ debug_printf("error: fixed size buffer pool corruption\n");
+ return NULL;
+ }
+
+ LIST_DEL(item);
+ --pool->numFree;
+
+ _glthread_UNLOCK_MUTEX(pool->mutex);
+
+ pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
+ assert(pool_buf->base.base.refcount == 0);
+ pool_buf->base.base.refcount = 1;
+ pool_buf->base.base.alignment = desc->alignment;
+ pool_buf->base.base.usage = desc->usage;
+
+ return SUPER(pool_buf);
+}
+
+
+static void
+pool_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct pool_pb_manager *pool = pool_pb_manager(mgr);
+ _glthread_LOCK_MUTEX(pool->mutex);
+
+ FREE(pool->bufs);
+
+ pb_unmap(pool->buffer);
+ pb_reference(&pool->buffer, NULL);
+
+ _glthread_UNLOCK_MUTEX(pool->mutex);
+
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pool_bufmgr_create(struct pb_manager *provider,
+ size_t numBufs,
+ size_t bufSize,
+ const struct pb_desc *desc)
+{
+ struct pool_pb_manager *pool;
+ struct pool_buffer *pool_buf;
+ size_t i;
+
+ pool = (struct pool_pb_manager *)CALLOC(1, sizeof(*pool));
+ if (!pool)
+ return NULL;
+
+ pool->base.destroy = pool_bufmgr_destroy;
+ pool->base.create_buffer = pool_bufmgr_create_buffer;
+
+ LIST_INITHEAD(&pool->free);
+
+ pool->numTot = numBufs;
+ pool->numFree = numBufs;
+ pool->bufSize = bufSize;
+ pool->bufAlign = desc->alignment;
+
+ _glthread_INIT_MUTEX(pool->mutex);
+
+ pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc);
+ if (!pool->buffer)
+ goto failure;
+
+ pool->map = pb_map(pool->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ |
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(!pool->map)
+ goto failure;
+
+ pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs));
+ if (!pool->bufs)
+ goto failure;
+
+ pool_buf = pool->bufs;
+ for (i = 0; i < numBufs; ++i) {
+ pool_buf->base.base.refcount = 0;
+ pool_buf->base.base.alignment = 0;
+ pool_buf->base.base.usage = 0;
+ pool_buf->base.base.size = bufSize;
+ pool_buf->base.vtbl = &pool_buffer_vtbl;
+ pool_buf->mgr = pool;
+ pool_buf->start = i * bufSize;
+ LIST_ADDTAIL(&pool_buf->head, &pool->free);
+ pool_buf++;
+ }
+
+ return SUPER(pool);
+
+failure:
+ if(pool->bufs)
+ FREE(pool->bufs);
+ if(pool->map)
+ pb_unmap(pool->buffer);
+ if(pool->buffer)
+ pb_reference(&pool->buffer, NULL);
+ if(pool)
+ FREE(pool);
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_winsys.c b/src/gallium/auxiliary/pipebuffer/pb_winsys.c
new file mode 100644
index 0000000000..978944091f
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_winsys.c
@@ -0,0 +1,170 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Implementation of client buffer (also designated as "user buffers"), which
+ * are just state-tracker owned data masqueraded as buffers.
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_winsys.h"
+#include "pipe/p_util.h"
+
+#include "pb_buffer.h"
+
+
+/**
+ * User buffers are special buffers that initially reference memory
+ * held by the user but which may if necessary copy that memory into
+ * device memory behind the scenes, for submission to hardware.
+ *
+ * These are particularly useful when the referenced data is never
+ * submitted to hardware at all, in the particular case of software
+ * vertex processing.
+ */
+struct pb_user_buffer
+{
+ struct pb_buffer base;
+ void *data;
+};
+
+
+extern const struct pb_vtbl pb_user_buffer_vtbl;
+
+
+static INLINE struct pb_user_buffer *
+pb_user_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ assert(buf->vtbl == &pb_user_buffer_vtbl);
+ return (struct pb_user_buffer *)buf;
+}
+
+
+static void
+pb_user_buffer_destroy(struct pb_buffer *buf)
+{
+ assert(buf);
+ FREE(buf);
+}
+
+
+static void *
+pb_user_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ return pb_user_buffer(buf)->data;
+}
+
+
+static void
+pb_user_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static void
+pb_user_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ *base_buf = buf;
+ *offset = 0;
+}
+
+
+const struct pb_vtbl
+pb_user_buffer_vtbl = {
+ pb_user_buffer_destroy,
+ pb_user_buffer_map,
+ pb_user_buffer_unmap,
+ pb_user_buffer_get_base_buffer
+};
+
+
+static struct pipe_buffer *
+pb_winsys_user_buffer_create(struct pipe_winsys *winsys,
+ void *data,
+ unsigned bytes)
+{
+ struct pb_user_buffer *buf = CALLOC_STRUCT(pb_user_buffer);
+
+ if(!buf)
+ return NULL;
+
+ buf->base.base.refcount = 1;
+ buf->base.base.size = bytes;
+ buf->base.base.alignment = 0;
+ buf->base.base.usage = 0;
+
+ buf->base.vtbl = &pb_user_buffer_vtbl;
+ buf->data = data;
+
+ return &buf->base.base;
+}
+
+
+static void *
+pb_winsys_buffer_map(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf,
+ unsigned flags)
+{
+ (void)winsys;
+ return pb_map(pb_buffer(buf), flags);
+}
+
+
+static void
+pb_winsys_buffer_unmap(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf)
+{
+ (void)winsys;
+ pb_unmap(pb_buffer(buf));
+}
+
+
+static void
+pb_winsys_buffer_destroy(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf)
+{
+ (void)winsys;
+ pb_destroy(pb_buffer(buf));
+}
+
+
+void
+pb_init_winsys(struct pipe_winsys *winsys)
+{
+ winsys->user_buffer_create = pb_winsys_user_buffer_create;
+ winsys->buffer_map = pb_winsys_buffer_map;
+ winsys->buffer_unmap = pb_winsys_buffer_unmap;
+ winsys->buffer_destroy = pb_winsys_buffer_destroy;
+}