diff options
Diffstat (limited to 'src/gallium/auxiliary/pipebuffer')
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/Makefile | 18 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/SConscript | 18 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_buffer.h | 292 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c | 1069 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h | 104 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c | 188 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr.h | 215 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c | 120 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c | 394 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c | 499 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c | 318 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c | 305 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c | 321 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c | 587 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_validate.c | 192 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_validate.h | 97 |
16 files changed, 4737 insertions, 0 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/Makefile b/src/gallium/auxiliary/pipebuffer/Makefile new file mode 100644 index 0000000000..21d25d2474 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/Makefile @@ -0,0 +1,18 @@ +TOP = ../../../.. +include $(TOP)/configs/current + +LIBNAME = pipebuffer + +C_SOURCES = \ + pb_buffer_fenced.c \ + pb_buffer_malloc.c \ + pb_bufmgr_alt.c \ + pb_bufmgr_cache.c \ + pb_bufmgr_debug.c \ + pb_bufmgr_mm.c \ + pb_bufmgr_ondemand.c \ + pb_bufmgr_pool.c \ + pb_bufmgr_slab.c \ + pb_validate.c + +include ../../Makefile.template diff --git a/src/gallium/auxiliary/pipebuffer/SConscript b/src/gallium/auxiliary/pipebuffer/SConscript new file mode 100644 index 0000000000..a074a55471 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/SConscript @@ -0,0 +1,18 @@ +Import('*') + +pipebuffer = env.ConvenienceLibrary( + target = 'pipebuffer', + source = [ + 'pb_buffer_fenced.c', + 'pb_buffer_malloc.c', + 'pb_bufmgr_alt.c', + 'pb_bufmgr_cache.c', + 'pb_bufmgr_debug.c', + 'pb_bufmgr_mm.c', + 'pb_bufmgr_ondemand.c', + 'pb_bufmgr_pool.c', + 'pb_bufmgr_slab.c', + 'pb_validate.c', + ]) + +auxiliaries.insert(0, pipebuffer) diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer.h b/src/gallium/auxiliary/pipebuffer/pb_buffer.h new file mode 100644 index 0000000000..a6c50dcf0c --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer.h @@ -0,0 +1,292 @@ +/************************************************************************** + * + * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * \file + * Generic code for buffers. + * + * Behind a pipe buffle handle there can be DMA buffers, client (or user) + * buffers, regular malloced buffers, etc. This file provides an abstract base + * buffer handle that allows the driver to cope with all those kinds of buffers + * in a more flexible way. + * + * There is no obligation of a winsys driver to use this library. And a pipe + * driver should be completly agnostic about it. + * + * \author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + +#ifndef PB_BUFFER_H_ +#define PB_BUFFER_H_ + + +#include "pipe/p_compiler.h" +#include "util/u_debug.h" +#include "util/u_inlines.h" +#include "pipe/p_defines.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +struct pb_vtbl; +struct pb_validate; +struct pipe_fence_handle; + + +#define PB_USAGE_CPU_READ (1 << 0) +#define PB_USAGE_CPU_WRITE (1 << 1) +#define PB_USAGE_GPU_READ (1 << 2) +#define PB_USAGE_GPU_WRITE (1 << 3) +#define PB_USAGE_UNSYNCHRONIZED (1 << 10) +#define PB_USAGE_DONTBLOCK (1 << 9) + +#define PB_USAGE_CPU_READ_WRITE \ + ( PB_USAGE_CPU_READ | PB_USAGE_CPU_WRITE ) +#define PB_USAGE_GPU_READ_WRITE \ + ( PB_USAGE_GPU_READ | PB_USAGE_GPU_WRITE ) +#define PB_USAGE_WRITE \ + ( PB_USAGE_CPU_WRITE | PB_USAGE_GPU_WRITE ) + +/** + * Buffer description. + * + * Used when allocating the buffer. + */ +struct pb_desc +{ + unsigned alignment; + unsigned usage; +}; + + +/** + * Size. Regular (32bit) unsigned for now. + */ +typedef unsigned pb_size; + + +/** + * Base class for all pb_* buffers. + */ +struct pb_buffer +{ + /* This used to be a pipe_buffer struct: + */ + struct { + struct pipe_reference reference; + unsigned size; + unsigned alignment; + unsigned usage; + } base; + + /** + * Pointer to the virtual function table. + * + * Avoid accessing this table directly. Use the inline functions below + * instead to avoid mistakes. + */ + const struct pb_vtbl *vtbl; +}; + + +/** + * Virtual function table for the buffer storage operations. + * + * Note that creation is not done through this table. + */ +struct pb_vtbl +{ + void (*destroy)( struct pb_buffer *buf ); + + /** + * Map the entire data store of a buffer object into the client's address. + * flags is bitmask of PB_USAGE_CPU_READ/WRITE. + */ + void *(*map)( struct pb_buffer *buf, + unsigned flags ); + + void (*unmap)( struct pb_buffer *buf ); + + enum pipe_error (*validate)( struct pb_buffer *buf, + struct pb_validate *vl, + unsigned flags ); + + void (*fence)( struct pb_buffer *buf, + struct pipe_fence_handle *fence ); + + /** + * Get the base buffer and the offset. + * + * A buffer can be subdivided in smaller buffers. This method should return + * the underlaying buffer, and the relative offset. + * + * Buffers without an underlaying base buffer should return themselves, with + * a zero offset. + * + * Note that this will increase the reference count of the base buffer. + */ + void (*get_base_buffer)( struct pb_buffer *buf, + struct pb_buffer **base_buf, + pb_size *offset ); + +}; + + + +/* Accessor functions for pb->vtbl: + */ +static INLINE void * +pb_map(struct pb_buffer *buf, + unsigned flags) +{ + assert(buf); + if(!buf) + return NULL; + assert(pipe_is_referenced(&buf->base.reference)); + return buf->vtbl->map(buf, flags); +} + + +static INLINE void +pb_unmap(struct pb_buffer *buf) +{ + assert(buf); + if(!buf) + return; + assert(pipe_is_referenced(&buf->base.reference)); + buf->vtbl->unmap(buf); +} + + +static INLINE void +pb_get_base_buffer( struct pb_buffer *buf, + struct pb_buffer **base_buf, + pb_size *offset ) +{ + assert(buf); + if(!buf) { + base_buf = NULL; + offset = 0; + return; + } + assert(pipe_is_referenced(&buf->base.reference)); + assert(buf->vtbl->get_base_buffer); + buf->vtbl->get_base_buffer(buf, base_buf, offset); + assert(*base_buf); + assert(*offset < (*base_buf)->base.size); +} + + +static INLINE enum pipe_error +pb_validate(struct pb_buffer *buf, struct pb_validate *vl, unsigned flags) +{ + assert(buf); + if(!buf) + return PIPE_ERROR; + assert(buf->vtbl->validate); + return buf->vtbl->validate(buf, vl, flags); +} + + +static INLINE void +pb_fence(struct pb_buffer *buf, struct pipe_fence_handle *fence) +{ + assert(buf); + if(!buf) + return; + assert(buf->vtbl->fence); + buf->vtbl->fence(buf, fence); +} + + +static INLINE void +pb_destroy(struct pb_buffer *buf) +{ + assert(buf); + if(!buf) + return; + assert(!pipe_is_referenced(&buf->base.reference)); + buf->vtbl->destroy(buf); +} + +static INLINE void +pb_reference(struct pb_buffer **dst, + struct pb_buffer *src) +{ + struct pb_buffer *old = *dst; + + if (pipe_reference(&(*dst)->base.reference, &src->base.reference)) + pb_destroy( old ); + *dst = src; +} + + +/** + * Utility function to check whether the provided alignment is consistent with + * the requested or not. + */ +static INLINE boolean +pb_check_alignment(pb_size requested, pb_size provided) +{ + if(!requested) + return TRUE; + if(requested > provided) + return FALSE; + if(provided % requested != 0) + return FALSE; + return TRUE; +} + + +/** + * Utility function to check whether the provided alignment is consistent with + * the requested or not. + */ +static INLINE boolean +pb_check_usage(unsigned requested, unsigned provided) +{ + return (requested & provided) == requested ? TRUE : FALSE; +} + + +/** + * Malloc-based buffer to store data that can't be used by the graphics + * hardware. + */ +struct pb_buffer * +pb_malloc_buffer_create(pb_size size, + const struct pb_desc *desc); + + +#ifdef __cplusplus +} +#endif + +#endif /*PB_BUFFER_H_*/ diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c new file mode 100644 index 0000000000..d6cf640582 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c @@ -0,0 +1,1069 @@ +/************************************************************************** + * + * Copyright 2007-2010 VMware, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * \file + * Implementation of fenced buffers. + * + * \author Jose Fonseca <jfonseca-at-vmware-dot-com> + * \author Thomas Hellström <thellstrom-at-vmware-dot-com> + */ + + +#include "pipe/p_config.h" + +#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) +#include <unistd.h> +#include <sched.h> +#endif + +#include "pipe/p_compiler.h" +#include "pipe/p_defines.h" +#include "util/u_debug.h" +#include "os/os_thread.h" +#include "util/u_memory.h" +#include "util/u_double_list.h" + +#include "pb_buffer.h" +#include "pb_buffer_fenced.h" +#include "pb_bufmgr.h" + + + +/** + * Convenience macro (type safe). + */ +#define SUPER(__derived) (&(__derived)->base) + + +struct fenced_manager +{ + struct pb_manager base; + struct pb_manager *provider; + struct pb_fence_ops *ops; + + /** + * Maximum buffer size that can be safely allocated. + */ + pb_size max_buffer_size; + + /** + * Maximum cpu memory we can allocate before we start waiting for the + * GPU to idle. + */ + pb_size max_cpu_total_size; + + /** + * Following members are mutable and protected by this mutex. + */ + pipe_mutex mutex; + + /** + * Fenced buffer list. + * + * All fenced buffers are placed in this listed, ordered from the oldest + * fence to the newest fence. + */ + struct list_head fenced; + pb_size num_fenced; + + struct list_head unfenced; + pb_size num_unfenced; + + /** + * How much temporary CPU memory is being used to hold unvalidated buffers. + */ + pb_size cpu_total_size; +}; + + +/** + * Fenced buffer. + * + * Wrapper around a pipe buffer which adds fencing and reference counting. + */ +struct fenced_buffer +{ + /* + * Immutable members. + */ + + struct pb_buffer base; + struct fenced_manager *mgr; + + /* + * Following members are mutable and protected by fenced_manager::mutex. + */ + + struct list_head head; + + /** + * Buffer with storage. + */ + struct pb_buffer *buffer; + pb_size size; + struct pb_desc desc; + + /** + * Temporary CPU storage data. Used when there isn't enough GPU memory to + * store the buffer. + */ + void *data; + + /** + * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current + * buffer usage. + */ + unsigned flags; + + unsigned mapcount; + + struct pb_validate *vl; + unsigned validation_flags; + + struct pipe_fence_handle *fence; +}; + + +static INLINE struct fenced_manager * +fenced_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct fenced_manager *)mgr; +} + + +static INLINE struct fenced_buffer * +fenced_buffer(struct pb_buffer *buf) +{ + assert(buf); + return (struct fenced_buffer *)buf; +} + + +static void +fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf); + +static enum pipe_error +fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf); + +static void +fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf); + +static enum pipe_error +fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf, + boolean wait); + +static enum pipe_error +fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf); + +static enum pipe_error +fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf); + + +/** + * Dump the fenced buffer list. + * + * Useful to understand failures to allocate buffers. + */ +static void +fenced_manager_dump_locked(struct fenced_manager *fenced_mgr) +{ +#ifdef DEBUG + struct pb_fence_ops *ops = fenced_mgr->ops; + struct list_head *curr, *next; + struct fenced_buffer *fenced_buf; + + debug_printf("%10s %7s %8s %7s %10s %s\n", + "buffer", "size", "refcount", "storage", "fence", "signalled"); + + curr = fenced_mgr->unfenced.next; + next = curr->next; + while(curr != &fenced_mgr->unfenced) { + fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); + assert(!fenced_buf->fence); + debug_printf("%10p %7u %8u %7s\n", + (void *) fenced_buf, + fenced_buf->base.base.size, + p_atomic_read(&fenced_buf->base.base.reference.count), + fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none")); + curr = next; + next = curr->next; + } + + curr = fenced_mgr->fenced.next; + next = curr->next; + while(curr != &fenced_mgr->fenced) { + int signaled; + fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); + assert(fenced_buf->buffer); + signaled = ops->fence_signalled(ops, fenced_buf->fence, 0); + debug_printf("%10p %7u %8u %7s %10p %s\n", + (void *) fenced_buf, + fenced_buf->base.base.size, + p_atomic_read(&fenced_buf->base.base.reference.count), + "gpu", + (void *) fenced_buf->fence, + signaled == 0 ? "y" : "n"); + curr = next; + next = curr->next; + } +#else + (void)fenced_mgr; +#endif +} + + +static INLINE void +fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) +{ + assert(!pipe_is_referenced(&fenced_buf->base.base.reference)); + + assert(!fenced_buf->fence); + assert(fenced_buf->head.prev); + assert(fenced_buf->head.next); + LIST_DEL(&fenced_buf->head); + assert(fenced_mgr->num_unfenced); + --fenced_mgr->num_unfenced; + + fenced_buffer_destroy_gpu_storage_locked(fenced_buf); + fenced_buffer_destroy_cpu_storage_locked(fenced_buf); + + FREE(fenced_buf); +} + + +/** + * Add the buffer to the fenced list. + * + * Reference count should be incremented before calling this function. + */ +static INLINE void +fenced_buffer_add_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) +{ + assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE); + assert(fenced_buf->fence); + + p_atomic_inc(&fenced_buf->base.base.reference.count); + + LIST_DEL(&fenced_buf->head); + assert(fenced_mgr->num_unfenced); + --fenced_mgr->num_unfenced; + LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced); + ++fenced_mgr->num_fenced; +} + + +/** + * Remove the buffer from the fenced list, and potentially destroy the buffer + * if the reference count reaches zero. + * + * Returns TRUE if the buffer was detroyed. + */ +static INLINE boolean +fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) +{ + struct pb_fence_ops *ops = fenced_mgr->ops; + + assert(fenced_buf->fence); + assert(fenced_buf->mgr == fenced_mgr); + + ops->fence_reference(ops, &fenced_buf->fence, NULL); + fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE; + + assert(fenced_buf->head.prev); + assert(fenced_buf->head.next); + + LIST_DEL(&fenced_buf->head); + assert(fenced_mgr->num_fenced); + --fenced_mgr->num_fenced; + + LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); + ++fenced_mgr->num_unfenced; + + if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) { + fenced_buffer_destroy_locked(fenced_mgr, fenced_buf); + return TRUE; + } + + return FALSE; +} + + +/** + * Wait for the fence to expire, and remove it from the fenced list. + * + * This function will release and re-aquire the mutex, so any copy of mutable + * state must be discarded after calling it. + */ +static INLINE enum pipe_error +fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) +{ + struct pb_fence_ops *ops = fenced_mgr->ops; + enum pipe_error ret = PIPE_ERROR; + +#if 0 + debug_warning("waiting for GPU"); +#endif + + assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(fenced_buf->fence); + + if(fenced_buf->fence) { + struct pipe_fence_handle *fence = NULL; + int finished; + boolean proceed; + + ops->fence_reference(ops, &fence, fenced_buf->fence); + + pipe_mutex_unlock(fenced_mgr->mutex); + + finished = ops->fence_finish(ops, fenced_buf->fence, 0); + + pipe_mutex_lock(fenced_mgr->mutex); + + assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + + /* + * Only proceed if the fence object didn't change in the meanwhile. + * Otherwise assume the work has been already carried out by another + * thread that re-aquired the lock before us. + */ + proceed = fence == fenced_buf->fence ? TRUE : FALSE; + + ops->fence_reference(ops, &fence, NULL); + + if(proceed && finished == 0) { + /* + * Remove from the fenced list + */ + + boolean destroyed; + + destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); + + /* TODO: remove consequents buffers with the same fence? */ + + assert(!destroyed); + + fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE; + + ret = PIPE_OK; + } + } + + return ret; +} + + +/** + * Remove as many fenced buffers from the fenced list as possible. + * + * Returns TRUE if at least one buffer was removed. + */ +static boolean +fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr, + boolean wait) +{ + struct pb_fence_ops *ops = fenced_mgr->ops; + struct list_head *curr, *next; + struct fenced_buffer *fenced_buf; + struct pipe_fence_handle *prev_fence = NULL; + boolean ret = FALSE; + + curr = fenced_mgr->fenced.next; + next = curr->next; + while(curr != &fenced_mgr->fenced) { + fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); + + if(fenced_buf->fence != prev_fence) { + int signaled; + + if (wait) { + signaled = ops->fence_finish(ops, fenced_buf->fence, 0); + + /* + * Don't return just now. Instead preemptively check if the + * following buffers' fences already expired, without further waits. + */ + wait = FALSE; + } + else { + signaled = ops->fence_signalled(ops, fenced_buf->fence, 0); + } + + if (signaled != 0) { + return ret; + } + + prev_fence = fenced_buf->fence; + } + else { + /* This buffer's fence object is identical to the previous buffer's + * fence object, so no need to check the fence again. + */ + assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0); + } + + fenced_buffer_remove_locked(fenced_mgr, fenced_buf); + + ret = TRUE; + + curr = next; + next = curr->next; + } + + return ret; +} + + +/** + * Try to free some GPU memory by backing it up into CPU memory. + * + * Returns TRUE if at least one buffer was freed. + */ +static boolean +fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr) +{ + struct list_head *curr, *next; + struct fenced_buffer *fenced_buf; + + curr = fenced_mgr->unfenced.next; + next = curr->next; + while(curr != &fenced_mgr->unfenced) { + fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); + + /* + * We can only move storage if the buffer is not mapped and not + * validated. + */ + if(fenced_buf->buffer && + !fenced_buf->mapcount && + !fenced_buf->vl) { + enum pipe_error ret; + + ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf); + if(ret == PIPE_OK) { + ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf); + if(ret == PIPE_OK) { + fenced_buffer_destroy_gpu_storage_locked(fenced_buf); + return TRUE; + } + fenced_buffer_destroy_cpu_storage_locked(fenced_buf); + } + } + + curr = next; + next = curr->next; + } + + return FALSE; +} + + +/** + * Destroy CPU storage for this buffer. + */ +static void +fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf) +{ + if(fenced_buf->data) { + align_free(fenced_buf->data); + fenced_buf->data = NULL; + assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size); + fenced_buf->mgr->cpu_total_size -= fenced_buf->size; + } +} + + +/** + * Create CPU storage for this buffer. + */ +static enum pipe_error +fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) +{ + assert(!fenced_buf->data); + if(fenced_buf->data) + return PIPE_OK; + + if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size) + return PIPE_ERROR_OUT_OF_MEMORY; + + fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment); + if(!fenced_buf->data) + return PIPE_ERROR_OUT_OF_MEMORY; + + fenced_mgr->cpu_total_size += fenced_buf->size; + + return PIPE_OK; +} + + +/** + * Destroy the GPU storage. + */ +static void +fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf) +{ + if(fenced_buf->buffer) { + pb_reference(&fenced_buf->buffer, NULL); + } +} + + +/** + * Try to create GPU storage for this buffer. + * + * This function is a shorthand around pb_manager::create_buffer for + * fenced_buffer_create_gpu_storage_locked()'s benefit. + */ +static INLINE boolean +fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) +{ + struct pb_manager *provider = fenced_mgr->provider; + + assert(!fenced_buf->buffer); + + fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider, + fenced_buf->size, + &fenced_buf->desc); + return fenced_buf->buffer ? TRUE : FALSE; +} + + +/** + * Create GPU storage for this buffer. + */ +static enum pipe_error +fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf, + boolean wait) +{ + assert(!fenced_buf->buffer); + + /* + * Check for signaled buffers before trying to allocate. + */ + fenced_manager_check_signalled_locked(fenced_mgr, FALSE); + + fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf); + + /* + * Keep trying while there is some sort of progress: + * - fences are expiring, + * - or buffers are being being swapped out from GPU memory into CPU memory. + */ + while(!fenced_buf->buffer && + (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) || + fenced_manager_free_gpu_storage_locked(fenced_mgr))) { + fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf); + } + + if(!fenced_buf->buffer && wait) { + /* + * Same as before, but this time around, wait to free buffers if + * necessary. + */ + while(!fenced_buf->buffer && + (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) || + fenced_manager_free_gpu_storage_locked(fenced_mgr))) { + fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf); + } + } + + if(!fenced_buf->buffer) { + if(0) + fenced_manager_dump_locked(fenced_mgr); + + /* give up */ + return PIPE_ERROR_OUT_OF_MEMORY; + } + + return PIPE_OK; +} + + +static enum pipe_error +fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf) +{ + uint8_t *map; + + assert(fenced_buf->data); + assert(fenced_buf->buffer); + + map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE); + if(!map) + return PIPE_ERROR; + + memcpy(map, fenced_buf->data, fenced_buf->size); + + pb_unmap(fenced_buf->buffer); + + return PIPE_OK; +} + + +static enum pipe_error +fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf) +{ + const uint8_t *map; + + assert(fenced_buf->data); + assert(fenced_buf->buffer); + + map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ); + if(!map) + return PIPE_ERROR; + + memcpy(fenced_buf->data, map, fenced_buf->size); + + pb_unmap(fenced_buf->buffer); + + return PIPE_OK; +} + + +static void +fenced_buffer_destroy(struct pb_buffer *buf) +{ + struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + + assert(!pipe_is_referenced(&fenced_buf->base.base.reference)); + + pipe_mutex_lock(fenced_mgr->mutex); + + fenced_buffer_destroy_locked(fenced_mgr, fenced_buf); + + pipe_mutex_unlock(fenced_mgr->mutex); +} + + +static void * +fenced_buffer_map(struct pb_buffer *buf, + unsigned flags) +{ + struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + struct pb_fence_ops *ops = fenced_mgr->ops; + void *map = NULL; + + pipe_mutex_lock(fenced_mgr->mutex); + + assert(!(flags & PB_USAGE_GPU_READ_WRITE)); + + /* + * Serialize writes. + */ + while((fenced_buf->flags & PB_USAGE_GPU_WRITE) || + ((fenced_buf->flags & PB_USAGE_GPU_READ) && + (flags & PB_USAGE_CPU_WRITE))) { + + /* + * Don't wait for the GPU to finish accessing it, if blocking is forbidden. + */ + if((flags & PB_USAGE_DONTBLOCK) && + ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) { + goto done; + } + + if (flags & PB_USAGE_UNSYNCHRONIZED) { + break; + } + + /* + * Wait for the GPU to finish accessing. This will release and re-acquire + * the mutex, so all copies of mutable state must be discarded. + */ + fenced_buffer_finish_locked(fenced_mgr, fenced_buf); + } + + if(fenced_buf->buffer) { + map = pb_map(fenced_buf->buffer, flags); + } + else { + assert(fenced_buf->data); + map = fenced_buf->data; + } + + if(map) { + ++fenced_buf->mapcount; + fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE; + } + +done: + pipe_mutex_unlock(fenced_mgr->mutex); + + return map; +} + + +static void +fenced_buffer_unmap(struct pb_buffer *buf) +{ + struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + + pipe_mutex_lock(fenced_mgr->mutex); + + assert(fenced_buf->mapcount); + if(fenced_buf->mapcount) { + if (fenced_buf->buffer) + pb_unmap(fenced_buf->buffer); + --fenced_buf->mapcount; + if(!fenced_buf->mapcount) + fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE; + } + + pipe_mutex_unlock(fenced_mgr->mutex); +} + + +static enum pipe_error +fenced_buffer_validate(struct pb_buffer *buf, + struct pb_validate *vl, + unsigned flags) +{ + struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + enum pipe_error ret; + + pipe_mutex_lock(fenced_mgr->mutex); + + if(!vl) { + /* invalidate */ + fenced_buf->vl = NULL; + fenced_buf->validation_flags = 0; + ret = PIPE_OK; + goto done; + } + + assert(flags & PB_USAGE_GPU_READ_WRITE); + assert(!(flags & ~PB_USAGE_GPU_READ_WRITE)); + flags &= PB_USAGE_GPU_READ_WRITE; + + /* Buffer cannot be validated in two different lists */ + if(fenced_buf->vl && fenced_buf->vl != vl) { + ret = PIPE_ERROR_RETRY; + goto done; + } + + if(fenced_buf->vl == vl && + (fenced_buf->validation_flags & flags) == flags) { + /* Nothing to do -- buffer already validated */ + ret = PIPE_OK; + goto done; + } + + /* + * Create and update GPU storage. + */ + if(!fenced_buf->buffer) { + assert(!fenced_buf->mapcount); + + ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE); + if(ret != PIPE_OK) { + goto done; + } + + ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf); + if(ret != PIPE_OK) { + fenced_buffer_destroy_gpu_storage_locked(fenced_buf); + goto done; + } + + if(fenced_buf->mapcount) { + debug_printf("warning: validating a buffer while it is still mapped\n"); + } + else { + fenced_buffer_destroy_cpu_storage_locked(fenced_buf); + } + } + + ret = pb_validate(fenced_buf->buffer, vl, flags); + if (ret != PIPE_OK) + goto done; + + fenced_buf->vl = vl; + fenced_buf->validation_flags |= flags; + +done: + pipe_mutex_unlock(fenced_mgr->mutex); + + return ret; +} + + +static void +fenced_buffer_fence(struct pb_buffer *buf, + struct pipe_fence_handle *fence) +{ + struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + struct pb_fence_ops *ops = fenced_mgr->ops; + + pipe_mutex_lock(fenced_mgr->mutex); + + assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(fenced_buf->buffer); + + if(fence != fenced_buf->fence) { + assert(fenced_buf->vl); + assert(fenced_buf->validation_flags); + + if (fenced_buf->fence) { + boolean destroyed; + destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); + assert(!destroyed); + } + if (fence) { + ops->fence_reference(ops, &fenced_buf->fence, fence); + fenced_buf->flags |= fenced_buf->validation_flags; + fenced_buffer_add_locked(fenced_mgr, fenced_buf); + } + + pb_fence(fenced_buf->buffer, fence); + + fenced_buf->vl = NULL; + fenced_buf->validation_flags = 0; + } + + pipe_mutex_unlock(fenced_mgr->mutex); +} + + +static void +fenced_buffer_get_base_buffer(struct pb_buffer *buf, + struct pb_buffer **base_buf, + pb_size *offset) +{ + struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + + pipe_mutex_lock(fenced_mgr->mutex); + + /* + * This should only be called when the buffer is validated. Typically + * when processing relocations. + */ + assert(fenced_buf->vl); + assert(fenced_buf->buffer); + + if(fenced_buf->buffer) + pb_get_base_buffer(fenced_buf->buffer, base_buf, offset); + else { + *base_buf = buf; + *offset = 0; + } + + pipe_mutex_unlock(fenced_mgr->mutex); +} + + +static const struct pb_vtbl +fenced_buffer_vtbl = { + fenced_buffer_destroy, + fenced_buffer_map, + fenced_buffer_unmap, + fenced_buffer_validate, + fenced_buffer_fence, + fenced_buffer_get_base_buffer +}; + + +/** + * Wrap a buffer in a fenced buffer. + */ +static struct pb_buffer * +fenced_bufmgr_create_buffer(struct pb_manager *mgr, + pb_size size, + const struct pb_desc *desc) +{ + struct fenced_manager *fenced_mgr = fenced_manager(mgr); + struct fenced_buffer *fenced_buf; + enum pipe_error ret; + + /* + * Don't stall the GPU, waste time evicting buffers, or waste memory + * trying to create a buffer that will most likely never fit into the + * graphics aperture. + */ + if(size > fenced_mgr->max_buffer_size) { + goto no_buffer; + } + + fenced_buf = CALLOC_STRUCT(fenced_buffer); + if(!fenced_buf) + goto no_buffer; + + pipe_reference_init(&fenced_buf->base.base.reference, 1); + fenced_buf->base.base.alignment = desc->alignment; + fenced_buf->base.base.usage = desc->usage; + fenced_buf->base.base.size = size; + fenced_buf->size = size; + fenced_buf->desc = *desc; + + fenced_buf->base.vtbl = &fenced_buffer_vtbl; + fenced_buf->mgr = fenced_mgr; + + pipe_mutex_lock(fenced_mgr->mutex); + + /* + * Try to create GPU storage without stalling, + */ + ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE); + + /* + * Attempt to use CPU memory to avoid stalling the GPU. + */ + if(ret != PIPE_OK) { + ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf); + } + + /* + * Create GPU storage, waiting for some to be available. + */ + if(ret != PIPE_OK) { + ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE); + } + + /* + * Give up. + */ + if(ret != PIPE_OK) { + goto no_storage; + } + + assert(fenced_buf->buffer || fenced_buf->data); + + LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); + ++fenced_mgr->num_unfenced; + pipe_mutex_unlock(fenced_mgr->mutex); + + return &fenced_buf->base; + +no_storage: + pipe_mutex_unlock(fenced_mgr->mutex); + FREE(fenced_buf); +no_buffer: + return NULL; +} + + +static void +fenced_bufmgr_flush(struct pb_manager *mgr) +{ + struct fenced_manager *fenced_mgr = fenced_manager(mgr); + + pipe_mutex_lock(fenced_mgr->mutex); + while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) + ; + pipe_mutex_unlock(fenced_mgr->mutex); + + assert(fenced_mgr->provider->flush); + if(fenced_mgr->provider->flush) + fenced_mgr->provider->flush(fenced_mgr->provider); +} + + +static void +fenced_bufmgr_destroy(struct pb_manager *mgr) +{ + struct fenced_manager *fenced_mgr = fenced_manager(mgr); + + pipe_mutex_lock(fenced_mgr->mutex); + + /* Wait on outstanding fences */ + while (fenced_mgr->num_fenced) { + pipe_mutex_unlock(fenced_mgr->mutex); +#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) + sched_yield(); +#endif + pipe_mutex_lock(fenced_mgr->mutex); + while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) + ; + } + +#ifdef DEBUG + /*assert(!fenced_mgr->num_unfenced);*/ +#endif + + pipe_mutex_unlock(fenced_mgr->mutex); + pipe_mutex_destroy(fenced_mgr->mutex); + + if(fenced_mgr->provider) + fenced_mgr->provider->destroy(fenced_mgr->provider); + + fenced_mgr->ops->destroy(fenced_mgr->ops); + + FREE(fenced_mgr); +} + + +struct pb_manager * +fenced_bufmgr_create(struct pb_manager *provider, + struct pb_fence_ops *ops, + pb_size max_buffer_size, + pb_size max_cpu_total_size) +{ + struct fenced_manager *fenced_mgr; + + if(!provider) + return NULL; + + fenced_mgr = CALLOC_STRUCT(fenced_manager); + if (!fenced_mgr) + return NULL; + + fenced_mgr->base.destroy = fenced_bufmgr_destroy; + fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer; + fenced_mgr->base.flush = fenced_bufmgr_flush; + + fenced_mgr->provider = provider; + fenced_mgr->ops = ops; + fenced_mgr->max_buffer_size = max_buffer_size; + fenced_mgr->max_cpu_total_size = max_cpu_total_size; + + LIST_INITHEAD(&fenced_mgr->fenced); + fenced_mgr->num_fenced = 0; + + LIST_INITHEAD(&fenced_mgr->unfenced); + fenced_mgr->num_unfenced = 0; + + pipe_mutex_init(fenced_mgr->mutex); + + return &fenced_mgr->base; +} diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h new file mode 100644 index 0000000000..004c2b939a --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h @@ -0,0 +1,104 @@ +/************************************************************************** + * + * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * \file + * Buffer fencing. + * + * "Fenced buffers" is actually a misnomer. They should be referred as + * "fenceable buffers", i.e, buffers that can be fenced, but I couldn't find + * the word "fenceable" in the dictionary. + * + * A "fenced buffer" is a decorator around a normal buffer, which adds two + * special properties: + * - the ability for the destruction to be delayed by a fence; + * - reference counting. + * + * Usually DMA buffers have a life-time that will extend the life-time of its + * handle. The end-of-life is dictated by the fence signalling. + * + * Between the handle's destruction, and the fence signalling, the buffer is + * stored in a fenced buffer list. + * + * \author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + +#ifndef PB_BUFFER_FENCED_H_ +#define PB_BUFFER_FENCED_H_ + + +#include "util/u_debug.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +struct pipe_fence_handle; + + +/** + * List of buffers which are awaiting fence signalling. + */ +struct fenced_buffer_list; + + +struct pb_fence_ops +{ + void (*destroy)( struct pb_fence_ops *ops ); + + /** Set ptr = fence, with reference counting */ + void (*fence_reference)( struct pb_fence_ops *ops, + struct pipe_fence_handle **ptr, + struct pipe_fence_handle *fence ); + + /** + * Checks whether the fence has been signalled. + * \param flags driver-specific meaning + * \return zero on success. + */ + int (*fence_signalled)( struct pb_fence_ops *ops, + struct pipe_fence_handle *fence, + unsigned flag ); + + /** + * Wait for the fence to finish. + * \param flags driver-specific meaning + * \return zero on success. + */ + int (*fence_finish)( struct pb_fence_ops *ops, + struct pipe_fence_handle *fence, + unsigned flag ); +}; + + +#ifdef __cplusplus +} +#endif + +#endif /*PB_BUFFER_FENCED_H_*/ diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c new file mode 100644 index 0000000000..b706f429be --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c @@ -0,0 +1,188 @@ +/************************************************************************** + * + * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * \file + * Implementation of malloc-based buffers to store data that can't be processed + * by the hardware. + * + * \author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + + +#include "util/u_debug.h" +#include "util/u_memory.h" +#include "pb_buffer.h" +#include "pb_bufmgr.h" + + +struct malloc_buffer +{ + struct pb_buffer base; + void *data; +}; + + +extern const struct pb_vtbl malloc_buffer_vtbl; + +static INLINE struct malloc_buffer * +malloc_buffer(struct pb_buffer *buf) +{ + assert(buf); + if (!buf) + return NULL; + assert(buf->vtbl == &malloc_buffer_vtbl); + return (struct malloc_buffer *)buf; +} + + +static void +malloc_buffer_destroy(struct pb_buffer *buf) +{ + align_free(malloc_buffer(buf)->data); + FREE(buf); +} + + +static void * +malloc_buffer_map(struct pb_buffer *buf, + unsigned flags) +{ + return malloc_buffer(buf)->data; +} + + +static void +malloc_buffer_unmap(struct pb_buffer *buf) +{ + /* No-op */ +} + + +static enum pipe_error +malloc_buffer_validate(struct pb_buffer *buf, + struct pb_validate *vl, + unsigned flags) +{ + assert(0); + return PIPE_ERROR; +} + + +static void +malloc_buffer_fence(struct pb_buffer *buf, + struct pipe_fence_handle *fence) +{ + assert(0); +} + + +static void +malloc_buffer_get_base_buffer(struct pb_buffer *buf, + struct pb_buffer **base_buf, + pb_size *offset) +{ + *base_buf = buf; + *offset = 0; +} + + +const struct pb_vtbl +malloc_buffer_vtbl = { + malloc_buffer_destroy, + malloc_buffer_map, + malloc_buffer_unmap, + malloc_buffer_validate, + malloc_buffer_fence, + malloc_buffer_get_base_buffer +}; + + +struct pb_buffer * +pb_malloc_buffer_create(pb_size size, + const struct pb_desc *desc) +{ + struct malloc_buffer *buf; + + /* TODO: do a single allocation */ + + buf = CALLOC_STRUCT(malloc_buffer); + if(!buf) + return NULL; + + pipe_reference_init(&buf->base.base.reference, 1); + buf->base.base.usage = desc->usage; + buf->base.base.size = size; + buf->base.base.alignment = desc->alignment; + buf->base.vtbl = &malloc_buffer_vtbl; + + buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment); + if(!buf->data) { + FREE(buf); + return NULL; + } + + return &buf->base; +} + + +static struct pb_buffer * +pb_malloc_bufmgr_create_buffer(struct pb_manager *mgr, + pb_size size, + const struct pb_desc *desc) +{ + return pb_malloc_buffer_create(size, desc); +} + + +static void +pb_malloc_bufmgr_flush(struct pb_manager *mgr) +{ + /* No-op */ +} + + +static void +pb_malloc_bufmgr_destroy(struct pb_manager *mgr) +{ + /* No-op */ +} + + +static struct pb_manager +pb_malloc_bufmgr = { + pb_malloc_bufmgr_destroy, + pb_malloc_bufmgr_create_buffer, + pb_malloc_bufmgr_flush +}; + + +struct pb_manager * +pb_malloc_bufmgr_create(void) +{ + return &pb_malloc_bufmgr; +} diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h new file mode 100644 index 0000000000..cec2524da2 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h @@ -0,0 +1,215 @@ +/************************************************************************** + * + * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * \file + * Buffer management. + * + * A buffer manager does only one basic thing: it creates buffers. Actually, + * "buffer factory" would probably a more accurate description. + * + * You can chain buffer managers so that you can have a finer grained memory + * management and pooling. + * + * For example, for a simple batch buffer manager you would chain: + * - the native buffer manager, which provides DMA memory from the graphics + * memory space; + * - the pool buffer manager, which keep around a pool of equally sized buffers + * to avoid latency associated with the native buffer manager; + * - the fenced buffer manager, which will delay buffer destruction until the + * the moment the card finishing processing it. + * + * \author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + +#ifndef PB_BUFMGR_H_ +#define PB_BUFMGR_H_ + + +#include "pipe/p_compiler.h" +#include "pipe/p_defines.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +struct pb_desc; + + +/** + * Abstract base class for all buffer managers. + */ +struct pb_manager +{ + void + (*destroy)( struct pb_manager *mgr ); + + struct pb_buffer * + (*create_buffer)( struct pb_manager *mgr, + pb_size size, + const struct pb_desc *desc); + + /** + * Flush all temporary-held buffers. + * + * Used mostly to aid debugging memory issues or to clean up resources when + * the drivers are long lived. + */ + void + (*flush)( struct pb_manager *mgr ); +}; + + +/** + * Malloc buffer provider. + * + * Simple wrapper around pb_malloc_buffer_create for convenience. + */ +struct pb_manager * +pb_malloc_bufmgr_create(void); + + +/** + * Static buffer pool sub-allocator. + * + * Manages the allocation of equally sized buffers. It does so by allocating + * a single big buffer and divide it equally sized buffers. + * + * It is meant to manage the allocation of batch buffer pools. + */ +struct pb_manager * +pool_bufmgr_create(struct pb_manager *provider, + pb_size n, pb_size size, + const struct pb_desc *desc); + + +/** + * Static sub-allocator based the old memory manager. + * + * It managers buffers of different sizes. It does so by allocating a buffer + * with the size of the heap, and then using the old mm memory manager to manage + * that heap. + */ +struct pb_manager * +mm_bufmgr_create(struct pb_manager *provider, + pb_size size, pb_size align2); + +/** + * Same as mm_bufmgr_create. + * + * Buffer will be release when the manager is destroyed. + */ +struct pb_manager * +mm_bufmgr_create_from_buffer(struct pb_buffer *buffer, + pb_size size, pb_size align2); + + +/** + * Slab sub-allocator. + */ +struct pb_manager * +pb_slab_manager_create(struct pb_manager *provider, + pb_size bufSize, + pb_size slabSize, + const struct pb_desc *desc); + +/** + * Allow a range of buffer size, by aggregating multiple slabs sub-allocators + * with different bucket sizes. + */ +struct pb_manager * +pb_slab_range_manager_create(struct pb_manager *provider, + pb_size minBufSize, + pb_size maxBufSize, + pb_size slabSize, + const struct pb_desc *desc); + + +/** + * Time-based buffer cache. + * + * This manager keeps a cache of destroyed buffers during a time interval. + */ +struct pb_manager * +pb_cache_manager_create(struct pb_manager *provider, + unsigned usecs); + + +struct pb_fence_ops; + +/** + * Fenced buffer manager. + * + * This manager is just meant for convenience. It wraps the buffers returned + * by another manager in fenced buffers, so that + * + * NOTE: the buffer manager that provides the buffers will be destroyed + * at the same time. + */ +struct pb_manager * +fenced_bufmgr_create(struct pb_manager *provider, + struct pb_fence_ops *ops, + pb_size max_buffer_size, + pb_size max_cpu_total_size); + + +struct pb_manager * +pb_alt_manager_create(struct pb_manager *provider1, + struct pb_manager *provider2); + + +/** + * Ondemand buffer manager. + * + * Buffers are created in malloc'ed memory (fast and cached), and the constents + * is transfered to a buffer from the provider (typically in slow uncached + * memory) when there is an attempt to validate the buffer. + * + * Ideal for situations where one does not know before hand whether a given + * buffer will effectively be used by the hardware or not. + */ +struct pb_manager * +pb_ondemand_manager_create(struct pb_manager *provider); + + +/** + * Debug buffer manager to detect buffer under- and overflows. + * + * Under/overflow sizes should be a multiple of the largest alignment + */ +struct pb_manager * +pb_debug_manager_create(struct pb_manager *provider, + pb_size underflow_size, pb_size overflow_size); + + +#ifdef __cplusplus +} +#endif + +#endif /*PB_BUFMGR_H_*/ diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c new file mode 100644 index 0000000000..f60c836f18 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c @@ -0,0 +1,120 @@ +/************************************************************************** + * + * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * \file + * Allocate buffers from two alternative buffer providers. + * + * \author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + + +#include "pipe/p_compiler.h" +#include "util/u_debug.h" +#include "util/u_memory.h" + +#include "pb_buffer.h" +#include "pb_bufmgr.h" + + +struct pb_alt_manager +{ + struct pb_manager base; + + struct pb_manager *provider1; + struct pb_manager *provider2; +}; + + +static INLINE struct pb_alt_manager * +pb_alt_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct pb_alt_manager *)mgr; +} + + +static struct pb_buffer * +pb_alt_manager_create_buffer(struct pb_manager *_mgr, + pb_size size, + const struct pb_desc *desc) +{ + struct pb_alt_manager *mgr = pb_alt_manager(_mgr); + struct pb_buffer *buf; + + buf = mgr->provider1->create_buffer(mgr->provider1, size, desc); + if(buf) + return buf; + + buf = mgr->provider2->create_buffer(mgr->provider2, size, desc); + return buf; +} + + +static void +pb_alt_manager_flush(struct pb_manager *_mgr) +{ + struct pb_alt_manager *mgr = pb_alt_manager(_mgr); + + assert(mgr->provider1->flush); + if(mgr->provider1->flush) + mgr->provider1->flush(mgr->provider1); + + assert(mgr->provider2->flush); + if(mgr->provider2->flush) + mgr->provider2->flush(mgr->provider2); +} + + +static void +pb_alt_manager_destroy(struct pb_manager *mgr) +{ + FREE(mgr); +} + + +struct pb_manager * +pb_alt_manager_create(struct pb_manager *provider1, + struct pb_manager *provider2) +{ + struct pb_alt_manager *mgr; + + if(!provider1 || !provider2) + return NULL; + + mgr = CALLOC_STRUCT(pb_alt_manager); + if (!mgr) + return NULL; + + mgr->base.destroy = pb_alt_manager_destroy; + mgr->base.create_buffer = pb_alt_manager_create_buffer; + mgr->base.flush = pb_alt_manager_flush; + mgr->provider1 = provider1; + mgr->provider2 = provider2; + + return &mgr->base; +} diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c new file mode 100644 index 0000000000..88501e8d72 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c @@ -0,0 +1,394 @@ +/************************************************************************** + * + * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * \file + * Buffer cache. + * + * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com> + * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + + +#include "pipe/p_compiler.h" +#include "util/u_debug.h" +#include "os/os_thread.h" +#include "util/u_memory.h" +#include "util/u_double_list.h" +#include "util/u_time.h" + +#include "pb_buffer.h" +#include "pb_bufmgr.h" + + +/** + * Convenience macro (type safe). + */ +#define SUPER(__derived) (&(__derived)->base) + + +struct pb_cache_manager; + + +/** + * Wrapper around a pipe buffer which adds delayed destruction. + */ +struct pb_cache_buffer +{ + struct pb_buffer base; + + struct pb_buffer *buffer; + struct pb_cache_manager *mgr; + + /** Caching time interval */ + int64_t start, end; + + struct list_head head; +}; + + +struct pb_cache_manager +{ + struct pb_manager base; + + struct pb_manager *provider; + unsigned usecs; + + pipe_mutex mutex; + + struct list_head delayed; + pb_size numDelayed; +}; + + +static INLINE struct pb_cache_buffer * +pb_cache_buffer(struct pb_buffer *buf) +{ + assert(buf); + return (struct pb_cache_buffer *)buf; +} + + +static INLINE struct pb_cache_manager * +pb_cache_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct pb_cache_manager *)mgr; +} + + +/** + * Actually destroy the buffer. + */ +static INLINE void +_pb_cache_buffer_destroy(struct pb_cache_buffer *buf) +{ + struct pb_cache_manager *mgr = buf->mgr; + + LIST_DEL(&buf->head); + assert(mgr->numDelayed); + --mgr->numDelayed; + assert(!pipe_is_referenced(&buf->base.base.reference)); + pb_reference(&buf->buffer, NULL); + FREE(buf); +} + + +/** + * Free as many cache buffers from the list head as possible. + */ +static void +_pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr) +{ + struct list_head *curr, *next; + struct pb_cache_buffer *buf; + int64_t now; + + now = os_time_get(); + + curr = mgr->delayed.next; + next = curr->next; + while(curr != &mgr->delayed) { + buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); + + if(!os_time_timeout(buf->start, buf->end, now)) + break; + + _pb_cache_buffer_destroy(buf); + + curr = next; + next = curr->next; + } +} + + +static void +pb_cache_buffer_destroy(struct pb_buffer *_buf) +{ + struct pb_cache_buffer *buf = pb_cache_buffer(_buf); + struct pb_cache_manager *mgr = buf->mgr; + + pipe_mutex_lock(mgr->mutex); + assert(!pipe_is_referenced(&buf->base.base.reference)); + + _pb_cache_buffer_list_check_free(mgr); + + buf->start = os_time_get(); + buf->end = buf->start + mgr->usecs; + LIST_ADDTAIL(&buf->head, &mgr->delayed); + ++mgr->numDelayed; + pipe_mutex_unlock(mgr->mutex); +} + + +static void * +pb_cache_buffer_map(struct pb_buffer *_buf, + unsigned flags) +{ + struct pb_cache_buffer *buf = pb_cache_buffer(_buf); + return pb_map(buf->buffer, flags); +} + + +static void +pb_cache_buffer_unmap(struct pb_buffer *_buf) +{ + struct pb_cache_buffer *buf = pb_cache_buffer(_buf); + pb_unmap(buf->buffer); +} + + +static enum pipe_error +pb_cache_buffer_validate(struct pb_buffer *_buf, + struct pb_validate *vl, + unsigned flags) +{ + struct pb_cache_buffer *buf = pb_cache_buffer(_buf); + return pb_validate(buf->buffer, vl, flags); +} + + +static void +pb_cache_buffer_fence(struct pb_buffer *_buf, + struct pipe_fence_handle *fence) +{ + struct pb_cache_buffer *buf = pb_cache_buffer(_buf); + pb_fence(buf->buffer, fence); +} + + +static void +pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf, + struct pb_buffer **base_buf, + pb_size *offset) +{ + struct pb_cache_buffer *buf = pb_cache_buffer(_buf); + pb_get_base_buffer(buf->buffer, base_buf, offset); +} + + +const struct pb_vtbl +pb_cache_buffer_vtbl = { + pb_cache_buffer_destroy, + pb_cache_buffer_map, + pb_cache_buffer_unmap, + pb_cache_buffer_validate, + pb_cache_buffer_fence, + pb_cache_buffer_get_base_buffer +}; + + +static INLINE boolean +pb_cache_is_buffer_compat(struct pb_cache_buffer *buf, + pb_size size, + const struct pb_desc *desc) +{ + void *map; + + if(buf->base.base.size < size) + return FALSE; + + /* be lenient with size */ + if(buf->base.base.size >= 2*size) + return FALSE; + + if(!pb_check_alignment(desc->alignment, buf->base.base.alignment)) + return FALSE; + + if(!pb_check_usage(desc->usage, buf->base.base.usage)) + return FALSE; + + map = pb_map(buf->buffer, PB_USAGE_DONTBLOCK); + if (!map) { + return FALSE; + } + + pb_unmap(buf->buffer); + + return TRUE; +} + + +static struct pb_buffer * +pb_cache_manager_create_buffer(struct pb_manager *_mgr, + pb_size size, + const struct pb_desc *desc) +{ + struct pb_cache_manager *mgr = pb_cache_manager(_mgr); + struct pb_cache_buffer *buf; + struct pb_cache_buffer *curr_buf; + struct list_head *curr, *next; + int64_t now; + + pipe_mutex_lock(mgr->mutex); + + buf = NULL; + curr = mgr->delayed.next; + next = curr->next; + + /* search in the expired buffers, freeing them in the process */ + now = os_time_get(); + while(curr != &mgr->delayed) { + curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); + if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc)) + buf = curr_buf; + else if(os_time_timeout(curr_buf->start, curr_buf->end, now)) + _pb_cache_buffer_destroy(curr_buf); + else + /* This buffer (and all hereafter) are still hot in cache */ + break; + curr = next; + next = curr->next; + } + + /* keep searching in the hot buffers */ + if(!buf) { + while(curr != &mgr->delayed) { + curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); + if(pb_cache_is_buffer_compat(curr_buf, size, desc)) { + buf = curr_buf; + break; + } + /* no need to check the timeout here */ + curr = next; + next = curr->next; + } + } + + if(buf) { + LIST_DEL(&buf->head); + pipe_mutex_unlock(mgr->mutex); + /* Increase refcount */ + pipe_reference_init(&buf->base.base.reference, 1); + return &buf->base; + } + + pipe_mutex_unlock(mgr->mutex); + + buf = CALLOC_STRUCT(pb_cache_buffer); + if(!buf) + return NULL; + + buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc); + if(!buf->buffer) { + FREE(buf); + return NULL; + } + + assert(pipe_is_referenced(&buf->buffer->base.reference)); + assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment)); + assert(pb_check_usage(desc->usage, buf->buffer->base.usage)); + assert(buf->buffer->base.size >= size); + + pipe_reference_init(&buf->base.base.reference, 1); + buf->base.base.alignment = buf->buffer->base.alignment; + buf->base.base.usage = buf->buffer->base.usage; + buf->base.base.size = buf->buffer->base.size; + + buf->base.vtbl = &pb_cache_buffer_vtbl; + buf->mgr = mgr; + + return &buf->base; +} + + +static void +pb_cache_manager_flush(struct pb_manager *_mgr) +{ + struct pb_cache_manager *mgr = pb_cache_manager(_mgr); + struct list_head *curr, *next; + struct pb_cache_buffer *buf; + + pipe_mutex_lock(mgr->mutex); + curr = mgr->delayed.next; + next = curr->next; + while(curr != &mgr->delayed) { + buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); + _pb_cache_buffer_destroy(buf); + curr = next; + next = curr->next; + } + pipe_mutex_unlock(mgr->mutex); + + assert(mgr->provider->flush); + if(mgr->provider->flush) + mgr->provider->flush(mgr->provider); +} + + +static void +pb_cache_manager_destroy(struct pb_manager *mgr) +{ + pb_cache_manager_flush(mgr); + FREE(mgr); +} + + +struct pb_manager * +pb_cache_manager_create(struct pb_manager *provider, + unsigned usecs) +{ + struct pb_cache_manager *mgr; + + if(!provider) + return NULL; + + mgr = CALLOC_STRUCT(pb_cache_manager); + if (!mgr) + return NULL; + + mgr->base.destroy = pb_cache_manager_destroy; + mgr->base.create_buffer = pb_cache_manager_create_buffer; + mgr->base.flush = pb_cache_manager_flush; + mgr->provider = provider; + mgr->usecs = usecs; + LIST_INITHEAD(&mgr->delayed); + mgr->numDelayed = 0; + pipe_mutex_init(mgr->mutex); + + return &mgr->base; +} diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c new file mode 100644 index 0000000000..0dc5b31a75 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c @@ -0,0 +1,499 @@ +/************************************************************************** + * + * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * \file + * Debug buffer manager to detect buffer under- and overflows. + * + * \author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + + +#include "pipe/p_compiler.h" +#include "util/u_debug.h" +#include "os/os_thread.h" +#include "util/u_math.h" +#include "util/u_memory.h" +#include "util/u_double_list.h" +#include "util/u_time.h" +#include "util/u_debug_stack.h" + +#include "pb_buffer.h" +#include "pb_bufmgr.h" + + +#ifdef DEBUG + + +#define PB_DEBUG_CREATE_BACKTRACE 8 +#define PB_DEBUG_MAP_BACKTRACE 8 + + +/** + * Convenience macro (type safe). + */ +#define SUPER(__derived) (&(__derived)->base) + + +struct pb_debug_manager; + + +/** + * Wrapper around a pipe buffer which adds delayed destruction. + */ +struct pb_debug_buffer +{ + struct pb_buffer base; + + struct pb_buffer *buffer; + struct pb_debug_manager *mgr; + + pb_size underflow_size; + pb_size overflow_size; + + struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE]; + + pipe_mutex mutex; + unsigned map_count; + struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE]; + + struct list_head head; +}; + + +struct pb_debug_manager +{ + struct pb_manager base; + + struct pb_manager *provider; + + pb_size underflow_size; + pb_size overflow_size; + + pipe_mutex mutex; + struct list_head list; +}; + + +static INLINE struct pb_debug_buffer * +pb_debug_buffer(struct pb_buffer *buf) +{ + assert(buf); + return (struct pb_debug_buffer *)buf; +} + + +static INLINE struct pb_debug_manager * +pb_debug_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct pb_debug_manager *)mgr; +} + + +static const uint8_t random_pattern[32] = { + 0xaf, 0xcf, 0xa5, 0xa2, 0xc2, 0x63, 0x15, 0x1a, + 0x7e, 0xe2, 0x7e, 0x84, 0x15, 0x49, 0xa2, 0x1e, + 0x49, 0x63, 0xf5, 0x52, 0x74, 0x66, 0x9e, 0xc4, + 0x6d, 0xcf, 0x2c, 0x4a, 0x74, 0xe6, 0xfd, 0x94 +}; + + +static INLINE void +fill_random_pattern(uint8_t *dst, pb_size size) +{ + pb_size i = 0; + while(size--) { + *dst++ = random_pattern[i++]; + i &= sizeof(random_pattern) - 1; + } +} + + +static INLINE boolean +check_random_pattern(const uint8_t *dst, pb_size size, + pb_size *min_ofs, pb_size *max_ofs) +{ + boolean result = TRUE; + pb_size i; + *min_ofs = size; + *max_ofs = 0; + for(i = 0; i < size; ++i) { + if(*dst++ != random_pattern[i % sizeof(random_pattern)]) { + *min_ofs = MIN2(*min_ofs, i); + *max_ofs = MAX2(*max_ofs, i); + result = FALSE; + } + } + return result; +} + + +static void +pb_debug_buffer_fill(struct pb_debug_buffer *buf) +{ + uint8_t *map; + + map = pb_map(buf->buffer, PB_USAGE_CPU_WRITE); + assert(map); + if(map) { + fill_random_pattern(map, buf->underflow_size); + fill_random_pattern(map + buf->underflow_size + buf->base.base.size, + buf->overflow_size); + pb_unmap(buf->buffer); + } +} + + +/** + * Check for under/over flows. + * + * Should be called with the buffer unmaped. + */ +static void +pb_debug_buffer_check(struct pb_debug_buffer *buf) +{ + uint8_t *map; + + map = pb_map(buf->buffer, + PB_USAGE_CPU_READ | + PB_USAGE_UNSYNCHRONIZED); + assert(map); + if(map) { + boolean underflow, overflow; + pb_size min_ofs, max_ofs; + + underflow = !check_random_pattern(map, buf->underflow_size, + &min_ofs, &max_ofs); + if(underflow) { + debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n", + buf->underflow_size - min_ofs, + min_ofs == 0 ? "+" : "", + buf->underflow_size - max_ofs); + } + + overflow = !check_random_pattern(map + buf->underflow_size + buf->base.base.size, + buf->overflow_size, + &min_ofs, &max_ofs); + if(overflow) { + debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n", + buf->base.base.size, + min_ofs, + max_ofs, + max_ofs == buf->overflow_size - 1 ? "+" : ""); + } + + if(underflow || overflow) + debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE); + + debug_assert(!underflow && !overflow); + + /* re-fill if not aborted */ + if(underflow) + fill_random_pattern(map, buf->underflow_size); + if(overflow) + fill_random_pattern(map + buf->underflow_size + buf->base.base.size, + buf->overflow_size); + + pb_unmap(buf->buffer); + } +} + + +static void +pb_debug_buffer_destroy(struct pb_buffer *_buf) +{ + struct pb_debug_buffer *buf = pb_debug_buffer(_buf); + struct pb_debug_manager *mgr = buf->mgr; + + assert(!pipe_is_referenced(&buf->base.base.reference)); + + pb_debug_buffer_check(buf); + + pipe_mutex_lock(mgr->mutex); + LIST_DEL(&buf->head); + pipe_mutex_unlock(mgr->mutex); + + pipe_mutex_destroy(buf->mutex); + + pb_reference(&buf->buffer, NULL); + FREE(buf); +} + + +static void * +pb_debug_buffer_map(struct pb_buffer *_buf, + unsigned flags) +{ + struct pb_debug_buffer *buf = pb_debug_buffer(_buf); + void *map; + + pb_debug_buffer_check(buf); + + map = pb_map(buf->buffer, flags); + if(!map) + return NULL; + + if(map) { + pipe_mutex_lock(buf->mutex); + ++buf->map_count; + debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE); + pipe_mutex_unlock(buf->mutex); + } + + return (uint8_t *)map + buf->underflow_size; +} + + +static void +pb_debug_buffer_unmap(struct pb_buffer *_buf) +{ + struct pb_debug_buffer *buf = pb_debug_buffer(_buf); + + pipe_mutex_lock(buf->mutex); + assert(buf->map_count); + if(buf->map_count) + --buf->map_count; + pipe_mutex_unlock(buf->mutex); + + pb_unmap(buf->buffer); + + pb_debug_buffer_check(buf); +} + + +static void +pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf, + struct pb_buffer **base_buf, + pb_size *offset) +{ + struct pb_debug_buffer *buf = pb_debug_buffer(_buf); + pb_get_base_buffer(buf->buffer, base_buf, offset); + *offset += buf->underflow_size; +} + + +static enum pipe_error +pb_debug_buffer_validate(struct pb_buffer *_buf, + struct pb_validate *vl, + unsigned flags) +{ + struct pb_debug_buffer *buf = pb_debug_buffer(_buf); + + pipe_mutex_lock(buf->mutex); + if(buf->map_count) { + debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__); + debug_printf("last map backtrace is\n"); + debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE); + } + pipe_mutex_unlock(buf->mutex); + + pb_debug_buffer_check(buf); + + return pb_validate(buf->buffer, vl, flags); +} + + +static void +pb_debug_buffer_fence(struct pb_buffer *_buf, + struct pipe_fence_handle *fence) +{ + struct pb_debug_buffer *buf = pb_debug_buffer(_buf); + pb_fence(buf->buffer, fence); +} + + +const struct pb_vtbl +pb_debug_buffer_vtbl = { + pb_debug_buffer_destroy, + pb_debug_buffer_map, + pb_debug_buffer_unmap, + pb_debug_buffer_validate, + pb_debug_buffer_fence, + pb_debug_buffer_get_base_buffer +}; + + +static void +pb_debug_manager_dump(struct pb_debug_manager *mgr) +{ + struct list_head *curr, *next; + struct pb_debug_buffer *buf; + + pipe_mutex_lock(mgr->mutex); + + curr = mgr->list.next; + next = curr->next; + while(curr != &mgr->list) { + buf = LIST_ENTRY(struct pb_debug_buffer, curr, head); + + debug_printf("buffer = %p\n", (void *) buf); + debug_printf(" .size = 0x%x\n", buf->base.base.size); + debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE); + + curr = next; + next = curr->next; + } + + pipe_mutex_unlock(mgr->mutex); +} + + +static struct pb_buffer * +pb_debug_manager_create_buffer(struct pb_manager *_mgr, + pb_size size, + const struct pb_desc *desc) +{ + struct pb_debug_manager *mgr = pb_debug_manager(_mgr); + struct pb_debug_buffer *buf; + struct pb_desc real_desc; + pb_size real_size; + + assert(size); + assert(desc->alignment); + + buf = CALLOC_STRUCT(pb_debug_buffer); + if(!buf) + return NULL; + + real_size = mgr->underflow_size + size + mgr->overflow_size; + real_desc = *desc; + real_desc.usage |= PB_USAGE_CPU_WRITE; + real_desc.usage |= PB_USAGE_CPU_READ; + + buf->buffer = mgr->provider->create_buffer(mgr->provider, + real_size, + &real_desc); + if(!buf->buffer) { + FREE(buf); +#if 0 + pipe_mutex_lock(mgr->mutex); + debug_printf("%s: failed to create buffer\n", __FUNCTION__); + if(!LIST_IS_EMPTY(&mgr->list)) + pb_debug_manager_dump(mgr); + pipe_mutex_unlock(mgr->mutex); +#endif + return NULL; + } + + assert(pipe_is_referenced(&buf->buffer->base.reference)); + assert(pb_check_alignment(real_desc.alignment, buf->buffer->base.alignment)); + assert(pb_check_usage(real_desc.usage, buf->buffer->base.usage)); + assert(buf->buffer->base.size >= real_size); + + pipe_reference_init(&buf->base.base.reference, 1); + buf->base.base.alignment = desc->alignment; + buf->base.base.usage = desc->usage; + buf->base.base.size = size; + + buf->base.vtbl = &pb_debug_buffer_vtbl; + buf->mgr = mgr; + + buf->underflow_size = mgr->underflow_size; + buf->overflow_size = buf->buffer->base.size - buf->underflow_size - size; + + debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE); + + pb_debug_buffer_fill(buf); + + pipe_mutex_init(buf->mutex); + + pipe_mutex_lock(mgr->mutex); + LIST_ADDTAIL(&buf->head, &mgr->list); + pipe_mutex_unlock(mgr->mutex); + + return &buf->base; +} + + +static void +pb_debug_manager_flush(struct pb_manager *_mgr) +{ + struct pb_debug_manager *mgr = pb_debug_manager(_mgr); + assert(mgr->provider->flush); + if(mgr->provider->flush) + mgr->provider->flush(mgr->provider); +} + + +static void +pb_debug_manager_destroy(struct pb_manager *_mgr) +{ + struct pb_debug_manager *mgr = pb_debug_manager(_mgr); + + pipe_mutex_lock(mgr->mutex); + if(!LIST_IS_EMPTY(&mgr->list)) { + debug_printf("%s: unfreed buffers\n", __FUNCTION__); + pb_debug_manager_dump(mgr); + } + pipe_mutex_unlock(mgr->mutex); + + pipe_mutex_destroy(mgr->mutex); + mgr->provider->destroy(mgr->provider); + FREE(mgr); +} + + +struct pb_manager * +pb_debug_manager_create(struct pb_manager *provider, + pb_size underflow_size, pb_size overflow_size) +{ + struct pb_debug_manager *mgr; + + if(!provider) + return NULL; + + mgr = CALLOC_STRUCT(pb_debug_manager); + if (!mgr) + return NULL; + + mgr->base.destroy = pb_debug_manager_destroy; + mgr->base.create_buffer = pb_debug_manager_create_buffer; + mgr->base.flush = pb_debug_manager_flush; + mgr->provider = provider; + mgr->underflow_size = underflow_size; + mgr->overflow_size = overflow_size; + + pipe_mutex_init(mgr->mutex); + LIST_INITHEAD(&mgr->list); + + return &mgr->base; +} + + +#else /* !DEBUG */ + + +struct pb_manager * +pb_debug_manager_create(struct pb_manager *provider, + pb_size underflow_size, pb_size overflow_size) +{ + return provider; +} + + +#endif /* !DEBUG */ diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c new file mode 100644 index 0000000000..faf7c35267 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c @@ -0,0 +1,318 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * \file + * Buffer manager using the old texture memory manager. + * + * \author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + + +#include "pipe/p_defines.h" +#include "util/u_debug.h" +#include "os/os_thread.h" +#include "util/u_memory.h" +#include "util/u_double_list.h" +#include "util/u_mm.h" +#include "pb_buffer.h" +#include "pb_bufmgr.h" + + +/** + * Convenience macro (type safe). + */ +#define SUPER(__derived) (&(__derived)->base) + + +struct mm_pb_manager +{ + struct pb_manager base; + + pipe_mutex mutex; + + pb_size size; + struct mem_block *heap; + + pb_size align2; + + struct pb_buffer *buffer; + void *map; +}; + + +static INLINE struct mm_pb_manager * +mm_pb_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct mm_pb_manager *)mgr; +} + + +struct mm_buffer +{ + struct pb_buffer base; + + struct mm_pb_manager *mgr; + + struct mem_block *block; +}; + + +static INLINE struct mm_buffer * +mm_buffer(struct pb_buffer *buf) +{ + assert(buf); + return (struct mm_buffer *)buf; +} + + +static void +mm_buffer_destroy(struct pb_buffer *buf) +{ + struct mm_buffer *mm_buf = mm_buffer(buf); + struct mm_pb_manager *mm = mm_buf->mgr; + + assert(!pipe_is_referenced(&mm_buf->base.base.reference)); + + pipe_mutex_lock(mm->mutex); + u_mmFreeMem(mm_buf->block); + FREE(mm_buf); + pipe_mutex_unlock(mm->mutex); +} + + +static void * +mm_buffer_map(struct pb_buffer *buf, + unsigned flags) +{ + struct mm_buffer *mm_buf = mm_buffer(buf); + struct mm_pb_manager *mm = mm_buf->mgr; + + return (unsigned char *) mm->map + mm_buf->block->ofs; +} + + +static void +mm_buffer_unmap(struct pb_buffer *buf) +{ + /* No-op */ +} + + +static enum pipe_error +mm_buffer_validate(struct pb_buffer *buf, + struct pb_validate *vl, + unsigned flags) +{ + struct mm_buffer *mm_buf = mm_buffer(buf); + struct mm_pb_manager *mm = mm_buf->mgr; + return pb_validate(mm->buffer, vl, flags); +} + + +static void +mm_buffer_fence(struct pb_buffer *buf, + struct pipe_fence_handle *fence) +{ + struct mm_buffer *mm_buf = mm_buffer(buf); + struct mm_pb_manager *mm = mm_buf->mgr; + pb_fence(mm->buffer, fence); +} + + +static void +mm_buffer_get_base_buffer(struct pb_buffer *buf, + struct pb_buffer **base_buf, + pb_size *offset) +{ + struct mm_buffer *mm_buf = mm_buffer(buf); + struct mm_pb_manager *mm = mm_buf->mgr; + pb_get_base_buffer(mm->buffer, base_buf, offset); + *offset += mm_buf->block->ofs; +} + + +static const struct pb_vtbl +mm_buffer_vtbl = { + mm_buffer_destroy, + mm_buffer_map, + mm_buffer_unmap, + mm_buffer_validate, + mm_buffer_fence, + mm_buffer_get_base_buffer +}; + + +static struct pb_buffer * +mm_bufmgr_create_buffer(struct pb_manager *mgr, + pb_size size, + const struct pb_desc *desc) +{ + struct mm_pb_manager *mm = mm_pb_manager(mgr); + struct mm_buffer *mm_buf; + + /* We don't handle alignments larger then the one initially setup */ + assert(pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2)); + if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2)) + return NULL; + + pipe_mutex_lock(mm->mutex); + + mm_buf = CALLOC_STRUCT(mm_buffer); + if (!mm_buf) { + pipe_mutex_unlock(mm->mutex); + return NULL; + } + + pipe_reference_init(&mm_buf->base.base.reference, 1); + mm_buf->base.base.alignment = desc->alignment; + mm_buf->base.base.usage = desc->usage; + mm_buf->base.base.size = size; + + mm_buf->base.vtbl = &mm_buffer_vtbl; + + mm_buf->mgr = mm; + + mm_buf->block = u_mmAllocMem(mm->heap, (int)size, (int)mm->align2, 0); + if(!mm_buf->block) { +#if 0 + debug_printf("warning: heap full\n"); + mmDumpMemInfo(mm->heap); +#endif + FREE(mm_buf); + pipe_mutex_unlock(mm->mutex); + return NULL; + } + + /* Some sanity checks */ + assert(0 <= (pb_size)mm_buf->block->ofs && (pb_size)mm_buf->block->ofs < mm->size); + assert(size <= (pb_size)mm_buf->block->size && (pb_size)mm_buf->block->ofs + (pb_size)mm_buf->block->size <= mm->size); + + pipe_mutex_unlock(mm->mutex); + return SUPER(mm_buf); +} + + +static void +mm_bufmgr_flush(struct pb_manager *mgr) +{ + /* No-op */ +} + + +static void +mm_bufmgr_destroy(struct pb_manager *mgr) +{ + struct mm_pb_manager *mm = mm_pb_manager(mgr); + + pipe_mutex_lock(mm->mutex); + + u_mmDestroy(mm->heap); + + pb_unmap(mm->buffer); + pb_reference(&mm->buffer, NULL); + + pipe_mutex_unlock(mm->mutex); + + FREE(mgr); +} + + +struct pb_manager * +mm_bufmgr_create_from_buffer(struct pb_buffer *buffer, + pb_size size, pb_size align2) +{ + struct mm_pb_manager *mm; + + if(!buffer) + return NULL; + + mm = CALLOC_STRUCT(mm_pb_manager); + if (!mm) + return NULL; + + mm->base.destroy = mm_bufmgr_destroy; + mm->base.create_buffer = mm_bufmgr_create_buffer; + mm->base.flush = mm_bufmgr_flush; + + mm->size = size; + mm->align2 = align2; /* 64-byte alignment */ + + pipe_mutex_init(mm->mutex); + + mm->buffer = buffer; + + mm->map = pb_map(mm->buffer, + PB_USAGE_CPU_READ | + PB_USAGE_CPU_WRITE); + if(!mm->map) + goto failure; + + mm->heap = u_mmInit(0, (int)size); + if (!mm->heap) + goto failure; + + return SUPER(mm); + +failure: +if(mm->heap) + u_mmDestroy(mm->heap); + if(mm->map) + pb_unmap(mm->buffer); + if(mm) + FREE(mm); + return NULL; +} + + +struct pb_manager * +mm_bufmgr_create(struct pb_manager *provider, + pb_size size, pb_size align2) +{ + struct pb_buffer *buffer; + struct pb_manager *mgr; + struct pb_desc desc; + + if(!provider) + return NULL; + + memset(&desc, 0, sizeof(desc)); + desc.alignment = 1 << align2; + + buffer = provider->create_buffer(provider, size, &desc); + if (!buffer) + return NULL; + + mgr = mm_bufmgr_create_from_buffer(buffer, size, align2); + if (!mgr) { + pb_reference(&buffer, NULL); + return NULL; + } + + return mgr; +} diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c new file mode 100644 index 0000000000..31f1ebbeb7 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c @@ -0,0 +1,305 @@ +/************************************************************************** + * + * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * @file + * A variation of malloc buffers which get transferred to real graphics memory + * when there is an attempt to validate them. + * + * @author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + + +#include "util/u_debug.h" +#include "util/u_memory.h" +#include "pb_buffer.h" +#include "pb_bufmgr.h" + + +struct pb_ondemand_manager; + + +struct pb_ondemand_buffer +{ + struct pb_buffer base; + + struct pb_ondemand_manager *mgr; + + /** Regular malloc'ed memory */ + void *data; + unsigned mapcount; + + /** Real buffer */ + struct pb_buffer *buffer; + pb_size size; + struct pb_desc desc; +}; + + +struct pb_ondemand_manager +{ + struct pb_manager base; + + struct pb_manager *provider; +}; + + +extern const struct pb_vtbl pb_ondemand_buffer_vtbl; + +static INLINE struct pb_ondemand_buffer * +pb_ondemand_buffer(struct pb_buffer *buf) +{ + assert(buf); + if (!buf) + return NULL; + assert(buf->vtbl == &pb_ondemand_buffer_vtbl); + return (struct pb_ondemand_buffer *)buf; +} + +static INLINE struct pb_ondemand_manager * +pb_ondemand_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct pb_ondemand_manager *)mgr; +} + + +static void +pb_ondemand_buffer_destroy(struct pb_buffer *_buf) +{ + struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf); + + pb_reference(&buf->buffer, NULL); + + align_free(buf->data); + + FREE(buf); +} + + +static void * +pb_ondemand_buffer_map(struct pb_buffer *_buf, + unsigned flags) +{ + struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf); + + if(buf->buffer) { + assert(!buf->data); + return pb_map(buf->buffer, flags); + } + else { + assert(buf->data); + ++buf->mapcount; + return buf->data; + } +} + + +static void +pb_ondemand_buffer_unmap(struct pb_buffer *_buf) +{ + struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf); + + if(buf->buffer) { + assert(!buf->data); + pb_unmap(buf->buffer); + } + else { + assert(buf->data); + assert(buf->mapcount); + if(buf->mapcount) + --buf->mapcount; + } +} + + +static enum pipe_error +pb_ondemand_buffer_instantiate(struct pb_ondemand_buffer *buf) +{ + if(!buf->buffer) { + struct pb_manager *provider = buf->mgr->provider; + uint8_t *map; + + assert(!buf->mapcount); + + buf->buffer = provider->create_buffer(provider, buf->size, &buf->desc); + if(!buf->buffer) + return PIPE_ERROR_OUT_OF_MEMORY; + + map = pb_map(buf->buffer, PB_USAGE_CPU_READ); + if(!map) { + pb_reference(&buf->buffer, NULL); + return PIPE_ERROR; + } + + memcpy(map, buf->data, buf->size); + + pb_unmap(buf->buffer); + + if(!buf->mapcount) { + FREE(buf->data); + buf->data = NULL; + } + } + + return PIPE_OK; +} + +static enum pipe_error +pb_ondemand_buffer_validate(struct pb_buffer *_buf, + struct pb_validate *vl, + unsigned flags) +{ + struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf); + enum pipe_error ret; + + assert(!buf->mapcount); + if(buf->mapcount) + return PIPE_ERROR; + + ret = pb_ondemand_buffer_instantiate(buf); + if(ret != PIPE_OK) + return ret; + + return pb_validate(buf->buffer, vl, flags); +} + + +static void +pb_ondemand_buffer_fence(struct pb_buffer *_buf, + struct pipe_fence_handle *fence) +{ + struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf); + + assert(buf->buffer); + if(!buf->buffer) + return; + + pb_fence(buf->buffer, fence); +} + + +static void +pb_ondemand_buffer_get_base_buffer(struct pb_buffer *_buf, + struct pb_buffer **base_buf, + pb_size *offset) +{ + struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf); + + if(pb_ondemand_buffer_instantiate(buf) != PIPE_OK) { + assert(0); + *base_buf = &buf->base; + *offset = 0; + return; + } + + pb_get_base_buffer(buf->buffer, base_buf, offset); +} + + +const struct pb_vtbl +pb_ondemand_buffer_vtbl = { + pb_ondemand_buffer_destroy, + pb_ondemand_buffer_map, + pb_ondemand_buffer_unmap, + pb_ondemand_buffer_validate, + pb_ondemand_buffer_fence, + pb_ondemand_buffer_get_base_buffer +}; + + +static struct pb_buffer * +pb_ondemand_manager_create_buffer(struct pb_manager *_mgr, + pb_size size, + const struct pb_desc *desc) +{ + struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr); + struct pb_ondemand_buffer *buf; + + buf = CALLOC_STRUCT(pb_ondemand_buffer); + if(!buf) + return NULL; + + pipe_reference_init(&buf->base.base.reference, 1); + buf->base.base.alignment = desc->alignment; + buf->base.base.usage = desc->usage; + buf->base.base.size = size; + buf->base.vtbl = &pb_ondemand_buffer_vtbl; + + buf->mgr = mgr; + + buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment); + if(!buf->data) { + FREE(buf); + return NULL; + } + + buf->size = size; + buf->desc = *desc; + + return &buf->base; +} + + +static void +pb_ondemand_manager_flush(struct pb_manager *_mgr) +{ + struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr); + + mgr->provider->flush(mgr->provider); +} + + +static void +pb_ondemand_manager_destroy(struct pb_manager *_mgr) +{ + struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr); + + FREE(mgr); +} + + +struct pb_manager * +pb_ondemand_manager_create(struct pb_manager *provider) +{ + struct pb_ondemand_manager *mgr; + + if(!provider) + return NULL; + + mgr = CALLOC_STRUCT(pb_ondemand_manager); + if(!mgr) + return NULL; + + mgr->base.destroy = pb_ondemand_manager_destroy; + mgr->base.create_buffer = pb_ondemand_manager_create_buffer; + mgr->base.flush = pb_ondemand_manager_flush; + + mgr->provider = provider; + + return &mgr->base; +} diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c new file mode 100644 index 0000000000..fdcce42878 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c @@ -0,0 +1,321 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ + +/** + * \file + * Batch buffer pool management. + * + * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com> + * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + + +#include "pipe/p_compiler.h" +#include "util/u_debug.h" +#include "os/os_thread.h" +#include "pipe/p_defines.h" +#include "util/u_memory.h" +#include "util/u_double_list.h" + +#include "pb_buffer.h" +#include "pb_bufmgr.h" + + +/** + * Convenience macro (type safe). + */ +#define SUPER(__derived) (&(__derived)->base) + + +struct pool_pb_manager +{ + struct pb_manager base; + + pipe_mutex mutex; + + pb_size bufSize; + pb_size bufAlign; + + pb_size numFree; + pb_size numTot; + + struct list_head free; + + struct pb_buffer *buffer; + void *map; + + struct pool_buffer *bufs; +}; + + +static INLINE struct pool_pb_manager * +pool_pb_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct pool_pb_manager *)mgr; +} + + +struct pool_buffer +{ + struct pb_buffer base; + + struct pool_pb_manager *mgr; + + struct list_head head; + + pb_size start; +}; + + +static INLINE struct pool_buffer * +pool_buffer(struct pb_buffer *buf) +{ + assert(buf); + return (struct pool_buffer *)buf; +} + + + +static void +pool_buffer_destroy(struct pb_buffer *buf) +{ + struct pool_buffer *pool_buf = pool_buffer(buf); + struct pool_pb_manager *pool = pool_buf->mgr; + + assert(!pipe_is_referenced(&pool_buf->base.base.reference)); + + pipe_mutex_lock(pool->mutex); + LIST_ADD(&pool_buf->head, &pool->free); + pool->numFree++; + pipe_mutex_unlock(pool->mutex); +} + + +static void * +pool_buffer_map(struct pb_buffer *buf, unsigned flags) +{ + struct pool_buffer *pool_buf = pool_buffer(buf); + struct pool_pb_manager *pool = pool_buf->mgr; + void *map; + + pipe_mutex_lock(pool->mutex); + map = (unsigned char *) pool->map + pool_buf->start; + pipe_mutex_unlock(pool->mutex); + return map; +} + + +static void +pool_buffer_unmap(struct pb_buffer *buf) +{ + /* No-op */ +} + + +static enum pipe_error +pool_buffer_validate(struct pb_buffer *buf, + struct pb_validate *vl, + unsigned flags) +{ + struct pool_buffer *pool_buf = pool_buffer(buf); + struct pool_pb_manager *pool = pool_buf->mgr; + return pb_validate(pool->buffer, vl, flags); +} + + +static void +pool_buffer_fence(struct pb_buffer *buf, + struct pipe_fence_handle *fence) +{ + struct pool_buffer *pool_buf = pool_buffer(buf); + struct pool_pb_manager *pool = pool_buf->mgr; + pb_fence(pool->buffer, fence); +} + + +static void +pool_buffer_get_base_buffer(struct pb_buffer *buf, + struct pb_buffer **base_buf, + pb_size *offset) +{ + struct pool_buffer *pool_buf = pool_buffer(buf); + struct pool_pb_manager *pool = pool_buf->mgr; + pb_get_base_buffer(pool->buffer, base_buf, offset); + *offset += pool_buf->start; +} + + +static const struct pb_vtbl +pool_buffer_vtbl = { + pool_buffer_destroy, + pool_buffer_map, + pool_buffer_unmap, + pool_buffer_validate, + pool_buffer_fence, + pool_buffer_get_base_buffer +}; + + +static struct pb_buffer * +pool_bufmgr_create_buffer(struct pb_manager *mgr, + pb_size size, + const struct pb_desc *desc) +{ + struct pool_pb_manager *pool = pool_pb_manager(mgr); + struct pool_buffer *pool_buf; + struct list_head *item; + + assert(size == pool->bufSize); + assert(pool->bufAlign % desc->alignment == 0); + + pipe_mutex_lock(pool->mutex); + + if (pool->numFree == 0) { + pipe_mutex_unlock(pool->mutex); + debug_printf("warning: out of fixed size buffer objects\n"); + return NULL; + } + + item = pool->free.next; + + if (item == &pool->free) { + pipe_mutex_unlock(pool->mutex); + debug_printf("error: fixed size buffer pool corruption\n"); + return NULL; + } + + LIST_DEL(item); + --pool->numFree; + + pipe_mutex_unlock(pool->mutex); + + pool_buf = LIST_ENTRY(struct pool_buffer, item, head); + assert(!pipe_is_referenced(&pool_buf->base.base.reference)); + pipe_reference_init(&pool_buf->base.base.reference, 1); + pool_buf->base.base.alignment = desc->alignment; + pool_buf->base.base.usage = desc->usage; + + return SUPER(pool_buf); +} + + +static void +pool_bufmgr_flush(struct pb_manager *mgr) +{ + /* No-op */ +} + + +static void +pool_bufmgr_destroy(struct pb_manager *mgr) +{ + struct pool_pb_manager *pool = pool_pb_manager(mgr); + pipe_mutex_lock(pool->mutex); + + FREE(pool->bufs); + + pb_unmap(pool->buffer); + pb_reference(&pool->buffer, NULL); + + pipe_mutex_unlock(pool->mutex); + + FREE(mgr); +} + + +struct pb_manager * +pool_bufmgr_create(struct pb_manager *provider, + pb_size numBufs, + pb_size bufSize, + const struct pb_desc *desc) +{ + struct pool_pb_manager *pool; + struct pool_buffer *pool_buf; + pb_size i; + + if(!provider) + return NULL; + + pool = CALLOC_STRUCT(pool_pb_manager); + if (!pool) + return NULL; + + pool->base.destroy = pool_bufmgr_destroy; + pool->base.create_buffer = pool_bufmgr_create_buffer; + pool->base.flush = pool_bufmgr_flush; + + LIST_INITHEAD(&pool->free); + + pool->numTot = numBufs; + pool->numFree = numBufs; + pool->bufSize = bufSize; + pool->bufAlign = desc->alignment; + + pipe_mutex_init(pool->mutex); + + pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc); + if (!pool->buffer) + goto failure; + + pool->map = pb_map(pool->buffer, + PB_USAGE_CPU_READ | + PB_USAGE_CPU_WRITE); + if(!pool->map) + goto failure; + + pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs)); + if (!pool->bufs) + goto failure; + + pool_buf = pool->bufs; + for (i = 0; i < numBufs; ++i) { + pipe_reference_init(&pool_buf->base.base.reference, 0); + pool_buf->base.base.alignment = 0; + pool_buf->base.base.usage = 0; + pool_buf->base.base.size = bufSize; + pool_buf->base.vtbl = &pool_buffer_vtbl; + pool_buf->mgr = pool; + pool_buf->start = i * bufSize; + LIST_ADDTAIL(&pool_buf->head, &pool->free); + pool_buf++; + } + + return SUPER(pool); + +failure: + if(pool->bufs) + FREE(pool->bufs); + if(pool->map) + pb_unmap(pool->buffer); + if(pool->buffer) + pb_reference(&pool->buffer, NULL); + if(pool) + FREE(pool); + return NULL; +} diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c new file mode 100644 index 0000000000..7a3305aaf3 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c @@ -0,0 +1,587 @@ +/************************************************************************** + * + * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, FREE of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ + +/** + * @file + * S-lab pool implementation. + * + * @sa http://en.wikipedia.org/wiki/Slab_allocation + * + * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> + * @author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + +#include "pipe/p_compiler.h" +#include "util/u_debug.h" +#include "os/os_thread.h" +#include "pipe/p_defines.h" +#include "util/u_memory.h" +#include "util/u_double_list.h" +#include "util/u_time.h" + +#include "pb_buffer.h" +#include "pb_bufmgr.h" + + +struct pb_slab; + + +/** + * Buffer in a slab. + * + * Sub-allocation of a contiguous buffer. + */ +struct pb_slab_buffer +{ + struct pb_buffer base; + + struct pb_slab *slab; + + struct list_head head; + + unsigned mapCount; + + /** Offset relative to the start of the slab buffer. */ + pb_size start; + + /** Use when validating, to signal that all mappings are finished */ + /* TODO: Actually validation does not reach this stage yet */ + pipe_condvar event; +}; + + +/** + * Slab -- a contiguous piece of memory. + */ +struct pb_slab +{ + struct list_head head; + struct list_head freeBuffers; + pb_size numBuffers; + pb_size numFree; + + struct pb_slab_buffer *buffers; + struct pb_slab_manager *mgr; + + /** Buffer from the provider */ + struct pb_buffer *bo; + + void *virtual; +}; + + +/** + * It adds/removes slabs as needed in order to meet the allocation/destruction + * of individual buffers. + */ +struct pb_slab_manager +{ + struct pb_manager base; + + /** From where we get our buffers */ + struct pb_manager *provider; + + /** Size of the buffers we hand on downstream */ + pb_size bufSize; + + /** Size of the buffers we request upstream */ + pb_size slabSize; + + /** + * Alignment, usage to be used to allocate the slab buffers. + * + * We can only provide buffers which are consistent (in alignment, usage) + * with this description. + */ + struct pb_desc desc; + + /** + * Partial slabs + * + * Full slabs are not stored in any list. Empty slabs are destroyed + * immediatly. + */ + struct list_head slabs; + + pipe_mutex mutex; +}; + + +/** + * Wrapper around several slabs, therefore capable of handling buffers of + * multiple sizes. + * + * This buffer manager just dispatches buffer allocations to the appropriate slab + * manager, according to the requested buffer size, or by passes the slab + * managers altogether for even greater sizes. + * + * The data of this structure remains constant after + * initialization and thus needs no mutex protection. + */ +struct pb_slab_range_manager +{ + struct pb_manager base; + + struct pb_manager *provider; + + pb_size minBufSize; + pb_size maxBufSize; + + /** @sa pb_slab_manager::desc */ + struct pb_desc desc; + + unsigned numBuckets; + pb_size *bucketSizes; + + /** Array of pb_slab_manager, one for each bucket size */ + struct pb_manager **buckets; +}; + + +static INLINE struct pb_slab_buffer * +pb_slab_buffer(struct pb_buffer *buf) +{ + assert(buf); + return (struct pb_slab_buffer *)buf; +} + + +static INLINE struct pb_slab_manager * +pb_slab_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct pb_slab_manager *)mgr; +} + + +static INLINE struct pb_slab_range_manager * +pb_slab_range_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct pb_slab_range_manager *)mgr; +} + + +/** + * Delete a buffer from the slab delayed list and put + * it on the slab FREE list. + */ +static void +pb_slab_buffer_destroy(struct pb_buffer *_buf) +{ + struct pb_slab_buffer *buf = pb_slab_buffer(_buf); + struct pb_slab *slab = buf->slab; + struct pb_slab_manager *mgr = slab->mgr; + struct list_head *list = &buf->head; + + pipe_mutex_lock(mgr->mutex); + + assert(!pipe_is_referenced(&buf->base.base.reference)); + + buf->mapCount = 0; + + LIST_DEL(list); + LIST_ADDTAIL(list, &slab->freeBuffers); + slab->numFree++; + + if (slab->head.next == &slab->head) + LIST_ADDTAIL(&slab->head, &mgr->slabs); + + /* If the slab becomes totally empty, free it */ + if (slab->numFree == slab->numBuffers) { + list = &slab->head; + LIST_DELINIT(list); + pb_reference(&slab->bo, NULL); + FREE(slab->buffers); + FREE(slab); + } + + pipe_mutex_unlock(mgr->mutex); +} + + +static void * +pb_slab_buffer_map(struct pb_buffer *_buf, + unsigned flags) +{ + struct pb_slab_buffer *buf = pb_slab_buffer(_buf); + + ++buf->mapCount; + return (void *) ((uint8_t *) buf->slab->virtual + buf->start); +} + + +static void +pb_slab_buffer_unmap(struct pb_buffer *_buf) +{ + struct pb_slab_buffer *buf = pb_slab_buffer(_buf); + + --buf->mapCount; + if (buf->mapCount == 0) + pipe_condvar_broadcast(buf->event); +} + + +static enum pipe_error +pb_slab_buffer_validate(struct pb_buffer *_buf, + struct pb_validate *vl, + unsigned flags) +{ + struct pb_slab_buffer *buf = pb_slab_buffer(_buf); + return pb_validate(buf->slab->bo, vl, flags); +} + + +static void +pb_slab_buffer_fence(struct pb_buffer *_buf, + struct pipe_fence_handle *fence) +{ + struct pb_slab_buffer *buf = pb_slab_buffer(_buf); + pb_fence(buf->slab->bo, fence); +} + + +static void +pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf, + struct pb_buffer **base_buf, + pb_size *offset) +{ + struct pb_slab_buffer *buf = pb_slab_buffer(_buf); + pb_get_base_buffer(buf->slab->bo, base_buf, offset); + *offset += buf->start; +} + + +static const struct pb_vtbl +pb_slab_buffer_vtbl = { + pb_slab_buffer_destroy, + pb_slab_buffer_map, + pb_slab_buffer_unmap, + pb_slab_buffer_validate, + pb_slab_buffer_fence, + pb_slab_buffer_get_base_buffer +}; + + +/** + * Create a new slab. + * + * Called when we ran out of free slabs. + */ +static enum pipe_error +pb_slab_create(struct pb_slab_manager *mgr) +{ + struct pb_slab *slab; + struct pb_slab_buffer *buf; + unsigned numBuffers; + unsigned i; + enum pipe_error ret; + + slab = CALLOC_STRUCT(pb_slab); + if (!slab) + return PIPE_ERROR_OUT_OF_MEMORY; + + slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc); + if(!slab->bo) { + ret = PIPE_ERROR_OUT_OF_MEMORY; + goto out_err0; + } + + /* Note down the slab virtual address. All mappings are accessed directly + * through this address so it is required that the buffer is pinned. */ + slab->virtual = pb_map(slab->bo, + PB_USAGE_CPU_READ | + PB_USAGE_CPU_WRITE); + if(!slab->virtual) { + ret = PIPE_ERROR_OUT_OF_MEMORY; + goto out_err1; + } + pb_unmap(slab->bo); + + numBuffers = slab->bo->base.size / mgr->bufSize; + + slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers)); + if (!slab->buffers) { + ret = PIPE_ERROR_OUT_OF_MEMORY; + goto out_err1; + } + + LIST_INITHEAD(&slab->head); + LIST_INITHEAD(&slab->freeBuffers); + slab->numBuffers = numBuffers; + slab->numFree = 0; + slab->mgr = mgr; + + buf = slab->buffers; + for (i=0; i < numBuffers; ++i) { + pipe_reference_init(&buf->base.base.reference, 0); + buf->base.base.size = mgr->bufSize; + buf->base.base.alignment = 0; + buf->base.base.usage = 0; + buf->base.vtbl = &pb_slab_buffer_vtbl; + buf->slab = slab; + buf->start = i* mgr->bufSize; + buf->mapCount = 0; + pipe_condvar_init(buf->event); + LIST_ADDTAIL(&buf->head, &slab->freeBuffers); + slab->numFree++; + buf++; + } + + /* Add this slab to the list of partial slabs */ + LIST_ADDTAIL(&slab->head, &mgr->slabs); + + return PIPE_OK; + +out_err1: + pb_reference(&slab->bo, NULL); +out_err0: + FREE(slab); + return ret; +} + + +static struct pb_buffer * +pb_slab_manager_create_buffer(struct pb_manager *_mgr, + pb_size size, + const struct pb_desc *desc) +{ + struct pb_slab_manager *mgr = pb_slab_manager(_mgr); + static struct pb_slab_buffer *buf; + struct pb_slab *slab; + struct list_head *list; + + /* check size */ + assert(size <= mgr->bufSize); + if(size > mgr->bufSize) + return NULL; + + /* check if we can provide the requested alignment */ + assert(pb_check_alignment(desc->alignment, mgr->desc.alignment)); + if(!pb_check_alignment(desc->alignment, mgr->desc.alignment)) + return NULL; + assert(pb_check_alignment(desc->alignment, mgr->bufSize)); + if(!pb_check_alignment(desc->alignment, mgr->bufSize)) + return NULL; + + assert(pb_check_usage(desc->usage, mgr->desc.usage)); + if(!pb_check_usage(desc->usage, mgr->desc.usage)) + return NULL; + + pipe_mutex_lock(mgr->mutex); + + /* Create a new slab, if we run out of partial slabs */ + if (mgr->slabs.next == &mgr->slabs) { + (void) pb_slab_create(mgr); + if (mgr->slabs.next == &mgr->slabs) { + pipe_mutex_unlock(mgr->mutex); + return NULL; + } + } + + /* Allocate the buffer from a partial (or just created) slab */ + list = mgr->slabs.next; + slab = LIST_ENTRY(struct pb_slab, list, head); + + /* If totally full remove from the partial slab list */ + if (--slab->numFree == 0) + LIST_DELINIT(list); + + list = slab->freeBuffers.next; + LIST_DELINIT(list); + + pipe_mutex_unlock(mgr->mutex); + buf = LIST_ENTRY(struct pb_slab_buffer, list, head); + + pipe_reference_init(&buf->base.base.reference, 1); + buf->base.base.alignment = desc->alignment; + buf->base.base.usage = desc->usage; + + return &buf->base; +} + + +static void +pb_slab_manager_flush(struct pb_manager *_mgr) +{ + struct pb_slab_manager *mgr = pb_slab_manager(_mgr); + + assert(mgr->provider->flush); + if(mgr->provider->flush) + mgr->provider->flush(mgr->provider); +} + + +static void +pb_slab_manager_destroy(struct pb_manager *_mgr) +{ + struct pb_slab_manager *mgr = pb_slab_manager(_mgr); + + /* TODO: cleanup all allocated buffers */ + FREE(mgr); +} + + +struct pb_manager * +pb_slab_manager_create(struct pb_manager *provider, + pb_size bufSize, + pb_size slabSize, + const struct pb_desc *desc) +{ + struct pb_slab_manager *mgr; + + mgr = CALLOC_STRUCT(pb_slab_manager); + if (!mgr) + return NULL; + + mgr->base.destroy = pb_slab_manager_destroy; + mgr->base.create_buffer = pb_slab_manager_create_buffer; + mgr->base.flush = pb_slab_manager_flush; + + mgr->provider = provider; + mgr->bufSize = bufSize; + mgr->slabSize = slabSize; + mgr->desc = *desc; + + LIST_INITHEAD(&mgr->slabs); + + pipe_mutex_init(mgr->mutex); + + return &mgr->base; +} + + +static struct pb_buffer * +pb_slab_range_manager_create_buffer(struct pb_manager *_mgr, + pb_size size, + const struct pb_desc *desc) +{ + struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr); + pb_size bufSize; + pb_size reqSize = size; + unsigned i; + + if(desc->alignment > reqSize) + reqSize = desc->alignment; + + bufSize = mgr->minBufSize; + for (i = 0; i < mgr->numBuckets; ++i) { + if(bufSize >= reqSize) + return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc); + bufSize *= 2; + } + + /* Fall back to allocate a buffer object directly from the provider. */ + return mgr->provider->create_buffer(mgr->provider, size, desc); +} + + +static void +pb_slab_range_manager_flush(struct pb_manager *_mgr) +{ + struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr); + + /* Individual slabs don't hold any temporary buffers so no need to call them */ + + assert(mgr->provider->flush); + if(mgr->provider->flush) + mgr->provider->flush(mgr->provider); +} + + +static void +pb_slab_range_manager_destroy(struct pb_manager *_mgr) +{ + struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr); + unsigned i; + + for (i = 0; i < mgr->numBuckets; ++i) + mgr->buckets[i]->destroy(mgr->buckets[i]); + FREE(mgr->buckets); + FREE(mgr->bucketSizes); + FREE(mgr); +} + + +struct pb_manager * +pb_slab_range_manager_create(struct pb_manager *provider, + pb_size minBufSize, + pb_size maxBufSize, + pb_size slabSize, + const struct pb_desc *desc) +{ + struct pb_slab_range_manager *mgr; + pb_size bufSize; + unsigned i; + + if(!provider) + return NULL; + + mgr = CALLOC_STRUCT(pb_slab_range_manager); + if (!mgr) + goto out_err0; + + mgr->base.destroy = pb_slab_range_manager_destroy; + mgr->base.create_buffer = pb_slab_range_manager_create_buffer; + mgr->base.flush = pb_slab_range_manager_flush; + + mgr->provider = provider; + mgr->minBufSize = minBufSize; + mgr->maxBufSize = maxBufSize; + + mgr->numBuckets = 1; + bufSize = minBufSize; + while(bufSize < maxBufSize) { + bufSize *= 2; + ++mgr->numBuckets; + } + + mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets)); + if (!mgr->buckets) + goto out_err1; + + bufSize = minBufSize; + for (i = 0; i < mgr->numBuckets; ++i) { + mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc); + if(!mgr->buckets[i]) + goto out_err2; + bufSize *= 2; + } + + return &mgr->base; + +out_err2: + for (i = 0; i < mgr->numBuckets; ++i) + if(mgr->buckets[i]) + mgr->buckets[i]->destroy(mgr->buckets[i]); + FREE(mgr->buckets); +out_err1: + FREE(mgr); +out_err0: + return NULL; +} diff --git a/src/gallium/auxiliary/pipebuffer/pb_validate.c b/src/gallium/auxiliary/pipebuffer/pb_validate.c new file mode 100644 index 0000000000..b585422460 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_validate.c @@ -0,0 +1,192 @@ +/************************************************************************** + * + * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * @file + * Buffer validation. + * + * @author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + + +#include "pipe/p_compiler.h" +#include "pipe/p_defines.h" +#include "util/u_memory.h" +#include "util/u_debug.h" + +#include "pb_buffer.h" +#include "pb_validate.h" + + +#define PB_VALIDATE_INITIAL_SIZE 1 /* 512 */ + + +struct pb_validate_entry +{ + struct pb_buffer *buf; + unsigned flags; +}; + + +struct pb_validate +{ + struct pb_validate_entry *entries; + unsigned used; + unsigned size; +}; + + +enum pipe_error +pb_validate_add_buffer(struct pb_validate *vl, + struct pb_buffer *buf, + unsigned flags) +{ + assert(buf); + if(!buf) + return PIPE_ERROR; + + assert(flags & PB_USAGE_GPU_READ_WRITE); + assert(!(flags & ~PB_USAGE_GPU_READ_WRITE)); + flags &= PB_USAGE_GPU_READ_WRITE; + + /* We only need to store one reference for each buffer, so avoid storing + * consecutive references for the same buffer. It might not be the most + * common pattern, but it is easy to implement. + */ + if(vl->used && vl->entries[vl->used - 1].buf == buf) { + vl->entries[vl->used - 1].flags |= flags; + return PIPE_OK; + } + + /* Grow the table */ + if(vl->used == vl->size) { + unsigned new_size; + struct pb_validate_entry *new_entries; + + new_size = vl->size * 2; + if(!new_size) + return PIPE_ERROR_OUT_OF_MEMORY; + + new_entries = (struct pb_validate_entry *)REALLOC(vl->entries, + vl->size*sizeof(struct pb_validate_entry), + new_size*sizeof(struct pb_validate_entry)); + if(!new_entries) + return PIPE_ERROR_OUT_OF_MEMORY; + + memset(new_entries + vl->size, 0, (new_size - vl->size)*sizeof(struct pb_validate_entry)); + + vl->size = new_size; + vl->entries = new_entries; + } + + assert(!vl->entries[vl->used].buf); + pb_reference(&vl->entries[vl->used].buf, buf); + vl->entries[vl->used].flags = flags; + ++vl->used; + + return PIPE_OK; +} + + +enum pipe_error +pb_validate_foreach(struct pb_validate *vl, + enum pipe_error (*callback)(struct pb_buffer *buf, void *data), + void *data) +{ + unsigned i; + for(i = 0; i < vl->used; ++i) { + enum pipe_error ret; + ret = callback(vl->entries[i].buf, data); + if(ret != PIPE_OK) + return ret; + } + return PIPE_OK; +} + + +enum pipe_error +pb_validate_validate(struct pb_validate *vl) +{ + unsigned i; + + for(i = 0; i < vl->used; ++i) { + enum pipe_error ret; + ret = pb_validate(vl->entries[i].buf, vl, vl->entries[i].flags); + if(ret != PIPE_OK) { + while(i--) + pb_validate(vl->entries[i].buf, NULL, 0); + return ret; + } + } + + return PIPE_OK; +} + + +void +pb_validate_fence(struct pb_validate *vl, + struct pipe_fence_handle *fence) +{ + unsigned i; + for(i = 0; i < vl->used; ++i) { + pb_fence(vl->entries[i].buf, fence); + pb_reference(&vl->entries[i].buf, NULL); + } + vl->used = 0; +} + + +void +pb_validate_destroy(struct pb_validate *vl) +{ + unsigned i; + for(i = 0; i < vl->used; ++i) + pb_reference(&vl->entries[i].buf, NULL); + FREE(vl->entries); + FREE(vl); +} + + +struct pb_validate * +pb_validate_create() +{ + struct pb_validate *vl; + + vl = CALLOC_STRUCT(pb_validate); + if(!vl) + return NULL; + + vl->size = PB_VALIDATE_INITIAL_SIZE; + vl->entries = (struct pb_validate_entry *)CALLOC(vl->size, sizeof(struct pb_validate_entry)); + if(!vl->entries) { + FREE(vl); + return NULL; + } + + return vl; +} + diff --git a/src/gallium/auxiliary/pipebuffer/pb_validate.h b/src/gallium/auxiliary/pipebuffer/pb_validate.h new file mode 100644 index 0000000000..3c93f30f20 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/pb_validate.h @@ -0,0 +1,97 @@ +/************************************************************************** + * + * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * @file + * Buffer validation. + * + * @author Jose Fonseca <jrfonseca@tungstengraphics.com> + */ + +#ifndef PB_VALIDATE_H_ +#define PB_VALIDATE_H_ + + +#include "pipe/p_compiler.h" +#include "pipe/p_defines.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +struct pb_buffer; +struct pipe_fence_handle; + + +/** + * Buffer validation list. + * + * It holds a list of buffers to be validated and fenced when flushing. + */ +struct pb_validate; + + +enum pipe_error +pb_validate_add_buffer(struct pb_validate *vl, + struct pb_buffer *buf, + unsigned flags); + +enum pipe_error +pb_validate_foreach(struct pb_validate *vl, + enum pipe_error (*callback)(struct pb_buffer *buf, void *data), + void *data); + +/** + * Validate all buffers for hardware access. + * + * Should be called right before issuing commands to the hardware. + */ +enum pipe_error +pb_validate_validate(struct pb_validate *vl); + +/** + * Fence all buffers and clear the list. + * + * Should be called right after issuing commands to the hardware. + */ +void +pb_validate_fence(struct pb_validate *vl, + struct pipe_fence_handle *fence); + +struct pb_validate * +pb_validate_create(void); + +void +pb_validate_destroy(struct pb_validate *vl); + + +#ifdef __cplusplus +} +#endif + +#endif /*PB_VALIDATE_H_*/ |