From 7b971a50088caeeb2d66d99dbf6bef580a01c5d9 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 16 May 2007 15:14:20 -0700 Subject: WIP: Replace TTM buffer pool manager with a simplified interface. The interface is not solid yet (some simplification to do still, and adjustment for 0-copy), and the drivers are not converted. However, the new interface allows using the same calls to support either a TTM or a classic static allocation backend, with the static backend allowing a more limited feature set. --- src/mesa/drivers/dri/Makefile.template | 3 +- src/mesa/drivers/dri/common/dri_bufmgr.c | 506 ++------------- src/mesa/drivers/dri/common/dri_bufmgr.h | 203 ++++-- src/mesa/drivers/dri/common/dri_bufmgr_fake.c | 871 ++++++++++++++++++++++++++ src/mesa/drivers/dri/common/dri_bufmgr_ttm.c | 356 +++++++++++ src/mesa/drivers/dri/common/dri_bufpool.h | 86 --- src/mesa/drivers/dri/common/dri_drmpool.c | 227 ------- 7 files changed, 1455 insertions(+), 797 deletions(-) create mode 100644 src/mesa/drivers/dri/common/dri_bufmgr_fake.c create mode 100644 src/mesa/drivers/dri/common/dri_bufmgr_ttm.c delete mode 100644 src/mesa/drivers/dri/common/dri_bufpool.h delete mode 100644 src/mesa/drivers/dri/common/dri_drmpool.c (limited to 'src/mesa') diff --git a/src/mesa/drivers/dri/Makefile.template b/src/mesa/drivers/dri/Makefile.template index 5261a4b55d..3d0389c60c 100644 --- a/src/mesa/drivers/dri/Makefile.template +++ b/src/mesa/drivers/dri/Makefile.template @@ -13,7 +13,8 @@ COMMON_SOURCES = \ COMMON_BM_SOURCES = \ ../common/dri_bufmgr.c \ - ../common/dri_drmpool.c + ../common/dri_bufmgr_ttm.c \ + ../common/dri_bufmgr_fake.c ifeq ($(WINDOW_SYSTEM),dri) diff --git a/src/mesa/drivers/dri/common/dri_bufmgr.c b/src/mesa/drivers/dri/common/dri_bufmgr.c index 65d6545965..fc2d4a0665 100644 --- a/src/mesa/drivers/dri/common/dri_bufmgr.c +++ b/src/mesa/drivers/dri/common/dri_bufmgr.c @@ -1,493 +1,123 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA - * All Rights Reserved. - * +/* + * Copyright © 2007 Intel Corporation + * * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * - **************************************************************************/ -/* - * Authors: Thomas Hellström - * Keith Whitwell */ -#include -#include -#include "glthread.h" -#include "errno.h" +#include "mtypes.h" #include "dri_bufmgr.h" -#include "string.h" -#include "imports.h" -#include "dri_bufpool.h" -_glthread_DECLARE_STATIC_MUTEX(bmMutex); - -/* - * TODO: Introduce fence pools in the same way as - * buffer object pools. +/** @file dri_bufmgr.c + * + * Convenience functions for buffer management methods. */ - - -typedef struct _DriFenceObject -{ - int fd; - _glthread_Mutex mutex; - int refCount; - const char *name; - drmFence fence; -} DriFenceObject; - -typedef struct _DriBufferObject -{ - DriBufferPool *pool; - _glthread_Mutex mutex; - int refCount; - const char *name; - unsigned flags; - unsigned hint; - unsigned alignment; - void *private; -} DriBufferObject; - - -void -bmError(int val, const char *file, const char *function, int line) -{ - _mesa_printf("Fatal video memory manager error \"%s\".\n" - "Check kernel logs or set the LIBGL_DEBUG\n" - "environment variable to \"verbose\" for more info.\n" - "Detected in file %s, line %d, function %s.\n", - strerror(-val), file, line, function); -#ifndef NDEBUG - abort(); -#else - abort(); -#endif -} - -DriFenceObject * -driFenceBuffers(int fd, char *name, unsigned flags) -{ - DriFenceObject *fence = (DriFenceObject *) malloc(sizeof(*fence)); - int ret; - - if (!fence) - BM_CKFATAL(-EINVAL); - - _glthread_LOCK_MUTEX(bmMutex); - fence->refCount = 1; - fence->name = name; - fence->fd = fd; - _glthread_INIT_MUTEX(fence->mutex); - ret = drmFenceBuffers(fd, flags, &fence->fence); - _glthread_UNLOCK_MUTEX(bmMutex); - if (ret) { - free(fence); - BM_CKFATAL(ret); - } - return fence; -} - - -unsigned -driFenceType(DriFenceObject * fence) +dri_bo * +dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size, + unsigned int alignment, unsigned int flags, unsigned int hint) { - unsigned ret; - - _glthread_LOCK_MUTEX(bmMutex); - ret = fence->fence.flags; - _glthread_UNLOCK_MUTEX(bmMutex); - - return ret; + return bufmgr->bo_alloc(bufmgr, name, size, alignment, flags, hint); } - -DriFenceObject * -driFenceReference(DriFenceObject * fence) +dri_bo * +dri_bo_alloc_static(dri_bufmgr *bufmgr, const char *name, unsigned long offset, + unsigned long size, void *virtual, unsigned int flags, + unsigned int hint) { - _glthread_LOCK_MUTEX(bmMutex); - ++fence->refCount; - _glthread_UNLOCK_MUTEX(bmMutex); - return fence; + return bufmgr->bo_alloc_static(bufmgr, name, offset, size, virtual, + flags, hint); } void -driFenceUnReference(DriFenceObject * fence) +dri_bo_reference(dri_bo *bo) { - if (!fence) - return; - - _glthread_LOCK_MUTEX(bmMutex); - if (--fence->refCount == 0) { - drmFenceDestroy(fence->fd, &fence->fence); - free(fence); - } - _glthread_UNLOCK_MUTEX(bmMutex); + bo->bufmgr->bo_reference(bo); } void -driFenceFinish(DriFenceObject * fence, unsigned type, int lazy) +dri_bo_unreference(dri_bo *bo) { - int ret; - unsigned flags = (lazy) ? DRM_FENCE_FLAG_WAIT_LAZY : 0; - - _glthread_LOCK_MUTEX(fence->mutex); - ret = drmFenceWait(fence->fd, flags, &fence->fence, type); - _glthread_UNLOCK_MUTEX(fence->mutex); - BM_CKFATAL(ret); + bo->bufmgr->bo_unreference(bo); } int -driFenceSignaled(DriFenceObject * fence, unsigned type) +dri_bo_map(dri_bo *buf, GLboolean write_enable) { - int signaled; - int ret; - - if (fence == NULL) - return GL_TRUE; - - _glthread_LOCK_MUTEX(fence->mutex); - ret = drmFenceSignaled(fence->fd, &fence->fence, type, &signaled); - _glthread_UNLOCK_MUTEX(fence->mutex); - BM_CKFATAL(ret); - return signaled; -} - - -extern drmBO * -driBOKernel(struct _DriBufferObject *buf) -{ - drmBO *ret; - - assert(buf->private != NULL); - ret = buf->pool->kernel(buf->pool, buf->private); - if (!ret) - BM_CKFATAL(-EINVAL); - - return ret; -} - -void -driBOWaitIdle(struct _DriBufferObject *buf, int lazy) -{ - assert(buf->private != NULL); - - _glthread_LOCK_MUTEX(buf->mutex); - BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, lazy)); - _glthread_UNLOCK_MUTEX(buf->mutex); + return buf->bufmgr->bo_map(buf, write_enable); } -void * -driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint) -{ - void *virtual; - - assert(buf->private != NULL); - - _glthread_LOCK_MUTEX(buf->mutex); - BM_CKFATAL(buf->pool->map(buf->pool, buf->private, flags, hint, &virtual)); - _glthread_UNLOCK_MUTEX(buf->mutex); - return virtual; -} - -void -driBOUnmap(struct _DriBufferObject *buf) -{ - assert(buf->private != NULL); - - buf->pool->unmap(buf->pool, buf->private); -} - -unsigned long -driBOOffset(struct _DriBufferObject *buf) -{ - unsigned long ret; - - assert(buf->private != NULL); - - _glthread_LOCK_MUTEX(buf->mutex); - ret = buf->pool->offset(buf->pool, buf->private); - _glthread_UNLOCK_MUTEX(buf->mutex); - return ret; -} - -unsigned -driBOFlags(struct _DriBufferObject *buf) -{ - unsigned ret; - - assert(buf->private != NULL); - - _glthread_LOCK_MUTEX(buf->mutex); - ret = buf->pool->flags(buf->pool, buf->private); - _glthread_UNLOCK_MUTEX(buf->mutex); - return ret; -} - -struct _DriBufferObject * -driBOReference(struct _DriBufferObject *buf) -{ - _glthread_LOCK_MUTEX(bmMutex); - if (++buf->refCount == 1) { - BM_CKFATAL(-EINVAL); - } - _glthread_UNLOCK_MUTEX(bmMutex); - return buf; -} - -void -driBOUnReference(struct _DriBufferObject *buf) -{ - int tmp; - - if (!buf) - return; - - _glthread_LOCK_MUTEX(bmMutex); - tmp = --buf->refCount; - _glthread_UNLOCK_MUTEX(bmMutex); - if (!tmp) { - buf->pool->destroy(buf->pool, buf->private); - free(buf); - } -} - -void -driBOData(struct _DriBufferObject *buf, - unsigned size, const void *data, unsigned flags) -{ - void *virtual; - int newBuffer; - struct _DriBufferPool *pool; - - _glthread_LOCK_MUTEX(buf->mutex); - pool = buf->pool; - if (!pool->create) { - _mesa_error(NULL, GL_INVALID_OPERATION, - "driBOData called on invalid buffer\n"); - BM_CKFATAL(-EINVAL); - } - newBuffer = !buf->private || (pool->size(pool, buf->private) < size) || - pool->map(pool, buf->private, DRM_BO_FLAG_WRITE, - DRM_BO_HINT_DONT_BLOCK, &virtual); - - if (newBuffer) { - if (buf->private) - pool->destroy(pool, buf->private); - if (!flags) - flags = buf->flags; - buf->private = pool->create(pool, size, flags, 0, buf->alignment); - if (!buf->private) - BM_CKFATAL(-ENOMEM); - BM_CKFATAL(pool->map(pool, buf->private, - DRM_BO_FLAG_WRITE, - DRM_BO_HINT_DONT_BLOCK, &virtual)); - } - - if (data != NULL) - memcpy(virtual, data, size); - - BM_CKFATAL(pool->unmap(pool, buf->private)); - _glthread_UNLOCK_MUTEX(buf->mutex); -} - -void -driBOSubData(struct _DriBufferObject *buf, - unsigned long offset, unsigned long size, const void *data) -{ - void *virtual; - - _glthread_LOCK_MUTEX(buf->mutex); - if (size && data) { - BM_CKFATAL(buf->pool->map(buf->pool, buf->private, - DRM_BO_FLAG_WRITE, 0, &virtual)); - memcpy((unsigned char *) virtual + offset, data, size); - BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private)); - } - _glthread_UNLOCK_MUTEX(buf->mutex); -} - -void -driBOGetSubData(struct _DriBufferObject *buf, - unsigned long offset, unsigned long size, void *data) -{ - void *virtual; - - _glthread_LOCK_MUTEX(buf->mutex); - if (size && data) { - BM_CKFATAL(buf->pool->map(buf->pool, buf->private, - DRM_BO_FLAG_READ, 0, &virtual)); - memcpy(data, (unsigned char *) virtual + offset, size); - BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private)); - } - _glthread_UNLOCK_MUTEX(buf->mutex); -} - -void -driBOSetStatic(struct _DriBufferObject *buf, - unsigned long offset, - unsigned long size, void *virtual, unsigned flags) -{ - _glthread_LOCK_MUTEX(buf->mutex); - if (buf->private != NULL) { - _mesa_error(NULL, GL_INVALID_OPERATION, - "Invalid buffer for setStatic\n"); - BM_CKFATAL(-EINVAL); - } - if (buf->pool->setstatic == NULL) { - _mesa_error(NULL, GL_INVALID_OPERATION, - "Invalid buffer pool for setStatic\n"); - BM_CKFATAL(-EINVAL); - } - - if (!flags) - flags = buf->flags; - - buf->private = buf->pool->setstatic(buf->pool, offset, size, - virtual, flags); - if (!buf->private) { - _mesa_error(NULL, GL_OUT_OF_MEMORY, - "Invalid buffer pool for setStatic\n"); - BM_CKFATAL(-ENOMEM); - } - _glthread_UNLOCK_MUTEX(buf->mutex); -} - - - -void -driGenBuffers(struct _DriBufferPool *pool, - const char *name, - unsigned n, - struct _DriBufferObject *buffers[], - unsigned alignment, unsigned flags, unsigned hint) +int +dri_bo_unmap(dri_bo *buf) { - struct _DriBufferObject *buf; - int i; - - flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM | - DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE; - - - for (i = 0; i < n; ++i) { - buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf)); - if (!buf) - BM_CKFATAL(-ENOMEM); - - _glthread_INIT_MUTEX(buf->mutex); - _glthread_LOCK_MUTEX(buf->mutex); - _glthread_LOCK_MUTEX(bmMutex); - buf->refCount = 1; - _glthread_UNLOCK_MUTEX(bmMutex); - buf->flags = flags; - buf->hint = hint; - buf->name = name; - buf->alignment = alignment; - buf->pool = pool; - _glthread_UNLOCK_MUTEX(buf->mutex); - buffers[i] = buf; - } + return buf->bufmgr->bo_unmap(buf); } -void -driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]) -{ - int i; - - for (i = 0; i < n; ++i) { - driBOUnReference(buffers[i]); - } -} - - -void -driInitBufMgr(int fd) +int +dri_bo_validate(dri_bo *buf, unsigned int flags) { - ; + return buf->bufmgr->bo_validate(buf, flags); } - -void -driBOCreateList(int target, drmBOList * list) +dri_fence * +dri_fence_validated(dri_bufmgr *bufmgr, const char *name, GLboolean flushed) { - _glthread_LOCK_MUTEX(bmMutex); - BM_CKFATAL(drmBOCreateList(target, list)); - _glthread_UNLOCK_MUTEX(bmMutex); + return bufmgr->fence_validated(bufmgr, name, flushed); } void -driBOResetList(drmBOList * list) +dri_fence_reference(dri_fence *fence) { - _glthread_LOCK_MUTEX(bmMutex); - BM_CKFATAL(drmBOResetList(list)); - _glthread_UNLOCK_MUTEX(bmMutex); + fence->bufmgr->fence_reference(fence); } void -driBOAddListItem(drmBOList * list, struct _DriBufferObject *buf, - unsigned flags, unsigned mask) +dri_fence_unreference(dri_fence *fence) { - int newItem; - - _glthread_LOCK_MUTEX(buf->mutex); - _glthread_LOCK_MUTEX(bmMutex); - BM_CKFATAL(drmAddValidateItem(list, driBOKernel(buf), - flags, mask, &newItem)); - _glthread_UNLOCK_MUTEX(bmMutex); - - /* - * Tell userspace pools to validate the buffer. This should be a - * noop if the pool is already validated. - * FIXME: We should have a list for this as well. - */ - - if (buf->pool->validate) { - BM_CKFATAL(buf->pool->validate(buf->pool, buf->private)); - } - - _glthread_UNLOCK_MUTEX(buf->mutex); + fence->bufmgr->fence_unreference(fence); } void -driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence) +dri_bo_subdata(dri_bo *bo, unsigned long offset, + unsigned long size, const void *data) { - _glthread_LOCK_MUTEX(buf->mutex); - BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence)); - _glthread_UNLOCK_MUTEX(buf->mutex); + if (size == 0 || data == NULL) + return; + dri_bo_map(bo, GL_TRUE); + memcpy((unsigned char *)bo->virtual + offset, data, size); + dri_bo_unmap(bo); } -void -driBOValidateList(int fd, drmBOList * list) -{ - _glthread_LOCK_MUTEX(bmMutex); - BM_CKFATAL(drmBOValidateList(fd, list)); - _glthread_UNLOCK_MUTEX(bmMutex); -} void -driPoolTakeDown(struct _DriBufferPool *pool) +dri_bo_get_subdata(dri_bo *bo, unsigned long offset, + unsigned long size, void *data) { - pool->takeDown(pool); + if (size == 0 || data == NULL) + return; + dri_bo_map(bo, GL_FALSE); + memcpy(data, (unsigned char *)bo->virtual + offset, size); + dri_bo_unmap(bo); } diff --git a/src/mesa/drivers/dri/common/dri_bufmgr.h b/src/mesa/drivers/dri/common/dri_bufmgr.h index 01f149ae4e..eafee88b88 100644 --- a/src/mesa/drivers/dri/common/dri_bufmgr.h +++ b/src/mesa/drivers/dri/common/dri_bufmgr.h @@ -1,5 +1,6 @@ /************************************************************************** * + * Copyright © 2007 Intel Corporation * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA * All Rights Reserved. * @@ -28,43 +29,142 @@ /* * Authors: Thomas Hellström * Keith Whitwell + * Eric Anholt */ #ifndef _DRI_BUFMGR_H_ #define _DRI_BUFMGR_H_ #include +typedef struct _dri_bufmgr dri_bufmgr; +typedef struct _dri_bo dri_bo; +typedef struct _dri_fence dri_fence; -struct _DriFenceObject; -struct _DriBufferObject; -struct _DriBufferPool; +struct _dri_bo { + /** Size in bytes of the buffer object. */ + unsigned long size; + /** + * Card virtual address (offset from the beginning of the aperture) for the + * object. Only valid while validated. + */ + unsigned long offset; + /** + * Virtual address for accessing the buffer data. Only valid while mapped. + */ + void *virtual; + /** Buffer manager context associated with this buffer object */ + dri_bufmgr *bufmgr; +}; -extern struct _DriFenceObject *driFenceBuffers(int fd, char *name, - unsigned flags); +struct _dri_fence { + /** + * This is an ORed mask of DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE, and + * DRM_FLAG_EXE indicating the operations associated with this fence. + * + * It is constant for the life of the fence object. + */ + unsigned int type; + /** Buffer manager context associated with this fence */ + dri_bufmgr *bufmgr; +}; -extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence); +/** + * Context for a buffer manager instance. + * + * Contains public methods followed by private storage for the buffer manager. + */ +struct _dri_bufmgr { + /** + * Allocate a buffer object. + * + * Buffer objects are not necessarily initially mapped into CPU virtual + * address space or graphics device aperture. They must be mapped using + * bo_map() to be used by the CPU, and validated for use using bo_validate() + * to be used from the graphics device. + * + * XXX: flags/hint reason to live? + */ + dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name, + unsigned long size, unsigned int alignment, + unsigned int flags, unsigned int hint); -extern void driFenceUnReference(struct _DriFenceObject *fence); + /** + * Allocates a buffer object for a static allocation. + * + * Static allocations are ones such as the front buffer that are offered by + * the X Server, which are never evicted and never moved. + * + * XXX: flags/hint reason to live? + */ + dri_bo *(*bo_alloc_static)(dri_bufmgr *bufmgr_ctx, const char *name, + unsigned long offset, unsigned long size, + void *virtual, unsigned int flags, + unsigned int hint); -extern void -driFenceFinish(struct _DriFenceObject *fence, unsigned type, int lazy); + /** Takes a reference on a buffer object */ + void (*bo_reference)(dri_bo *bo); -extern int driFenceSignaled(struct _DriFenceObject *fence, unsigned type); -extern unsigned driFenceType(struct _DriFenceObject *fence); + /** + * Releases a reference on a buffer object, freeing the data if + * rerefences remain. + */ + void (*bo_unreference)(dri_bo *bo); -/* - * Return a pointer to the libdrm buffer object this DriBufferObject - * uses. - */ + /** + * Maps the buffer into userspace. + * + * This function will block waiting for any existing fence on the buffer to + * clear, first. The resulting mapping is available at buf->virtual. +\ */ + int (*bo_map)(dri_bo *buf, GLboolean write_enable); + + /** Reduces the refcount on the userspace mapping of the buffer object. */ + int (*bo_unmap)(dri_bo *buf); + + /** + * Makes the buffer accessible to the graphics chip. + * + * The resulting offset of the buffer within the graphics aperture is then + * available at buf->offset until the buffer is fenced. + * + * Flags should consist of the memory types that the buffer may be validated + * into and the read/write/exe flags appropriate to the use of the buffer. + */ + int (*bo_validate)(dri_bo *buf, unsigned int flags); + + /** + * Associates the current set of validated buffers with a fence. + * + * Once fenced, the buffer manager will allow the validated buffers to be + * evicted when the graphics device's execution has passed the fence + * command. + * + * The fence object will have flags for the sum of the read/write/exe flags + * of the validated buffers associated with it. + */ + dri_fence * (*fence_validated)(dri_bufmgr *bufmgr, const char *name, + GLboolean flushed); + + /** Takes a reference on a fence object */ + void (*fence_reference)(dri_fence *fence); -extern drmBO *driBOKernel(struct _DriBufferObject *buf); -extern void *driBOMap(struct _DriBufferObject *buf, unsigned flags, - unsigned hint); -extern void driBOUnmap(struct _DriBufferObject *buf); -extern unsigned long driBOOffset(struct _DriBufferObject *buf); -extern unsigned driBOFlags(struct _DriBufferObject *buf); -extern struct _DriBufferObject *driBOReference(struct _DriBufferObject *buf); -extern void driBOUnReference(struct _DriBufferObject *buf); + /** + * Releases a reference on a fence object, freeing the data if + * rerefences remain. + */ + void (*fence_unreference)(dri_fence *fence); + + /** + * Blocks until the given fence is signaled. + */ + void (*fence_wait)(dri_fence *fence); + + /** + * Checks and returns whether the given fence is signaled. + */ +}; + +/* extern void driBOData(struct _DriBufferObject *r_buf, unsigned size, const void *data, unsigned flags); extern void driBOSubData(struct _DriBufferObject *buf, @@ -73,27 +173,40 @@ extern void driBOSubData(struct _DriBufferObject *buf, extern void driBOGetSubData(struct _DriBufferObject *buf, unsigned long offset, unsigned long size, void *data); -extern void driGenBuffers(struct _DriBufferPool *pool, - const char *name, - unsigned n, - struct _DriBufferObject *buffers[], - unsigned alignment, unsigned flags, unsigned hint); -extern void driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]); -extern void driInitBufMgr(int fd); -extern void driBOCreateList(int target, drmBOList * list); -extern void driBOResetList(drmBOList * list); -extern void driBOAddListItem(drmBOList * list, struct _DriBufferObject *buf, - unsigned flags, unsigned mask); -extern void driBOValidateList(int fd, drmBOList * list); - -extern void driBOFence(struct _DriBufferObject *buf, - struct _DriFenceObject *fence); - -extern void driPoolTakeDown(struct _DriBufferPool *pool); -extern void driBOSetStatic(struct _DriBufferObject *buf, - unsigned long offset, - unsigned long size, void *virtual, unsigned flags); -extern void driBOWaitIdle(struct _DriBufferObject *buf, int lazy); -extern void driPoolTakeDown(struct _DriBufferPool *pool); +*/ + +dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size, + unsigned int alignment, unsigned int flags, + unsigned int hint); +dri_bo *dri_bo_alloc_static(dri_bufmgr *bufmgr, const char *name, + unsigned long offset, unsigned long size, + void *virtual, unsigned int flags, + unsigned int hint); +void dri_bo_reference(dri_bo *bo); +void dri_bo_unreference(dri_bo *bo); +int dri_bo_map(dri_bo *buf, GLboolean write_enable); +int dri_bo_unmap(dri_bo *buf); +int dri_bo_validate(dri_bo *buf, unsigned int flags); +dri_fence *dri_fence_validated(dri_bufmgr *bufmgr, const char *name, + GLboolean flushed); +void dri_fence_reference(dri_fence *fence); +void dri_fence_unreference(dri_fence *fence); + +void dri_bo_subdata(dri_bo *bo, unsigned long offset, + unsigned long size, const void *data); +void dri_bo_get_subdata(dri_bo *bo, unsigned long offset, + unsigned long size, void *data); + +dri_bufmgr *dri_bufmgr_ttm_init(int fd, unsigned int fence_type, + unsigned int fence_type_flush); + +void dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr); +dri_bufmgr *dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual, + unsigned long size, + unsigned int (*fence_emit)(void *private), + int (*fence_wait)(void *private, + unsigned int cookie), + void *driver_priv); +void dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr); #endif diff --git a/src/mesa/drivers/dri/common/dri_bufmgr_fake.c b/src/mesa/drivers/dri/common/dri_bufmgr_fake.c new file mode 100644 index 0000000000..2519808851 --- /dev/null +++ b/src/mesa/drivers/dri/common/dri_bufmgr_fake.c @@ -0,0 +1,871 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/* Originally a fake version of the buffer manager so that we can + * prototype the changes in a driver fairly quickly, has been fleshed + * out to a fully functional interim solution. + * + * Basically wraps the old style memory management in the new + * programming interface, but is more expressive and avoids many of + * the bugs in the old texture manager. + */ +#include "mtypes.h" +#include "dri_bufmgr.h" +#include "drm.h" + +#include "simple_list.h" +#include "mm.h" +#include "imports.h" + +#define DBG(...) + +/* Internal flags: + */ +#define BM_NO_BACKING_STORE DRM_BO_FLAG_MEM_PRIV0 +#define BM_NO_FENCE_SUBDATA DRM_BO_FLAG_MEM_PRIV1 + +/* Wrapper around mm.c's mem_block, which understands that you must + * wait for fences to expire before memory can be freed. This is + * specific to our use of memcpy for uploads - an upload that was + * processed through the command queue wouldn't need to care about + * fences. + */ +struct block { + struct block *next, *prev; + struct mem_block *mem; /* BM_MEM_AGP */ + + unsigned referenced:1; + unsigned on_hardware:1; + unsigned fenced:1; + + unsigned fence; /* BM_MEM_AGP, Split to read_fence, write_fence */ + + dri_bo *bo; + void *virtual; +}; + +typedef struct _bufmgr_fake { + dri_bufmgr bufmgr; + + _glthread_Mutex mutex; /**< for thread safety */ + + unsigned long low_offset; + unsigned long size; + void *virtual; + + struct mem_block *heap; + struct block lru; /* only allocated, non-fence-pending blocks here */ + + unsigned buf_nr; /* for generating ids */ + + struct block referenced; /* after bmBufferOffset */ + struct block on_hardware; /* after bmValidateBuffers */ + struct block fenced; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */ + /* then to bufmgr->lru or free() */ + + unsigned int last_fence; + + unsigned fail:1; + unsigned need_fence:1; + GLboolean thrashing; + + /** + * Driver callback to emit a fence, returning the cookie. + * + * Currently, this also requires that a write flush be emitted before + * emitting the fence, but this should change. + */ + unsigned int (*fence_emit)(void *private); + /** Driver callback to wait for a fence cookie to have passed. */ + int (*fence_wait)(void *private, unsigned int fence_cookie); + /** Driver-supplied argument to driver callbacks */ + void *driver_priv; +} dri_bufmgr_fake; + +typedef struct _dri_bo_fake { + dri_bo bo; + + unsigned id; /* debug only */ + const char *name; + + unsigned dirty:1; + unsigned int refcount; + /* Flags may consist of any of the DRM_BO flags, plus + * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two + * driver private flags. + */ + unsigned int flags; + unsigned int alignment; + GLboolean is_static; + + struct block *block; + void *backing_store; + void (*invalidate_cb)(dri_bufmgr *bufmgr, void * ); + void *invalidate_ptr; +} dri_bo_fake; + +typedef struct _dri_fence_fake { + dri_fence fence; + + const char *name; + unsigned int refcount; + unsigned int fence_cookie; + GLboolean flushed; +} dri_fence_fake; + +static int clear_fenced(dri_bufmgr_fake *bufmgr_fake, + unsigned int fence_cookie); + +#define MAXFENCE 0x7fffffff + +static GLboolean FENCE_LTE( unsigned a, unsigned b ) +{ + if (a == b) + return GL_TRUE; + + if (a < b && b - a < (1<<24)) + return GL_TRUE; + + if (a > b && MAXFENCE - a + b < (1<<24)) + return GL_TRUE; + + return GL_FALSE; +} + +static unsigned int +_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake) +{ + bufmgr_fake->last_fence = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv); + return bufmgr_fake->last_fence; +} + +static void +_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie) +{ + int ret; + + ret = bufmgr_fake->fence_wait(bufmgr_fake->driver_priv, cookie); + if (ret != 0) { + _mesa_printf("%s:%d: Error %d waiting for fence.\n", + __FILE__, __LINE__); + abort(); + } + clear_fenced(bufmgr_fake, cookie); +} + +static GLboolean +_fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence) +{ + /* Slight problem with wrap-around: + */ + return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence); +} + +/** + * Allocate a memory manager block for the buffer. + */ +static GLboolean +alloc_block(dri_bo *bo) +{ + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr; + struct block *block = (struct block *)calloc(sizeof *block, 1); + unsigned int align_log2 = ffs(bo_fake->alignment); + GLuint sz; + + if (!block) + return GL_FALSE; + + sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1); + + block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0); + if (!block->mem) { + free(block); + return GL_FALSE; + } + + make_empty_list(block); + + /* Insert at head or at tail??? + */ + insert_at_tail(&bufmgr_fake->lru, block); + + block->virtual = bufmgr_fake->virtual + block->mem->ofs; + block->bo = bo; + + bo_fake->block = block; + + return GL_TRUE; +} + +/* Release the card storage associated with buf: + */ +static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block) +{ + DBG("free block %p\n", block); + + if (!block) + return; + + if (block->referenced) { + _mesa_printf("tried to free block on referenced list\n"); + assert(0); + } + else if (block->on_hardware) { + block->bo = NULL; + } + else if (block->fenced) { + block->bo = NULL; + } + else { + DBG(" - free immediately\n"); + remove_from_list(block); + + mmFreeMem(block->mem); + free(block); + } +} + +static void +alloc_backing_store(dri_bo *bo) +{ + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + assert(!bo_fake->backing_store); + assert(!(bo_fake->flags & (DRM_BO_FLAG_NO_EVICT|BM_NO_BACKING_STORE))); + + bo_fake->backing_store = ALIGN_MALLOC(bo->size, 64); +} + +static void +free_backing_store(dri_bo *bo) +{ + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + assert(!(bo_fake->flags & (DRM_BO_FLAG_NO_EVICT|BM_NO_BACKING_STORE))); + + if (bo_fake->backing_store) { + ALIGN_FREE(bo_fake->backing_store); + bo_fake->backing_store = NULL; + } +} + +static void +set_dirty(dri_bo *bo) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + + if (bo_fake->flags & BM_NO_BACKING_STORE) + bo_fake->invalidate_cb(&bufmgr_fake->bufmgr, bo_fake->invalidate_ptr); + + assert(!(bo_fake->flags & DRM_BO_FLAG_NO_EVICT)); + + DBG("set_dirty - buf %d\n", bo_fake->id); + bo_fake->dirty = 1; +} + +static GLboolean +evict_lru(dri_bufmgr_fake *bufmgr_fake, GLuint max_fence) +{ + struct block *block, *tmp; + + DBG("%s\n", __FUNCTION__); + + foreach_s(block, tmp, &bufmgr_fake->lru) { + dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo; + + if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA)) + continue; + + if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence)) + return 0; + + set_dirty(&bo_fake->bo); + bo_fake->block = NULL; + + free_block(bufmgr_fake, block); + return GL_TRUE; + } + + return GL_FALSE; +} + +#define foreach_s_rev(ptr, t, list) \ + for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev) + +static GLboolean +evict_mru(dri_bufmgr_fake *bufmgr_fake) +{ + struct block *block, *tmp; + + DBG("%s\n", __FUNCTION__); + + foreach_s_rev(block, tmp, &bufmgr_fake->lru) { + dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo; + + if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA)) + continue; + + set_dirty(&bo_fake->bo); + bo_fake->block = NULL; + + free_block(bufmgr_fake, block); + return GL_TRUE; + } + + return GL_FALSE; +} + +/** + * Removes all objects from the fenced list older than the given fence. + */ +static int clear_fenced(dri_bufmgr_fake *bufmgr_fake, + unsigned int fence_cookie) +{ + struct block *block, *tmp; + int ret = 0; + + foreach_s(block, tmp, &bufmgr_fake->fenced) { + assert(block->fenced); + + if (_fence_test(bufmgr_fake, block->fence)) { + + block->fenced = 0; + + if (!block->bo) { + DBG("delayed free: offset %x sz %x\n", + block->mem->ofs, block->mem->size); + remove_from_list(block); + mmFreeMem(block->mem); + free(block); + } + else { + DBG("return to lru: offset %x sz %x\n", + block->mem->ofs, block->mem->size); + move_to_tail(&bufmgr_fake->lru, block); + } + + ret = 1; + } + else { + /* Blocks are ordered by fence, so if one fails, all from + * here will fail also: + */ + break; + } + } + + /* Also check the referenced list: + */ + foreach_s(block, tmp, &bufmgr_fake->referenced ) { + if (block->fenced && _fence_test(bufmgr_fake, block->fence)) { + block->fenced = 0; + } + } + + DBG("%s: %d\n", __FUNCTION__, ret); + return ret; +} + +static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence) +{ + struct block *block, *tmp; + + foreach_s (block, tmp, &bufmgr_fake->on_hardware) { + DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block, + block->mem->size, block->bo, fence); + block->fence = fence; + + block->on_hardware = 0; + block->fenced = 1; + + /* Move to tail of pending list here + */ + move_to_tail(&bufmgr_fake->fenced, block); + } + + /* Also check the referenced list: + */ + foreach_s (block, tmp, &bufmgr_fake->referenced) { + if (block->on_hardware) { + DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block, + block->mem->size, block->bo, fence); + + block->fence = fence; + block->on_hardware = 0; + block->fenced = 1; + } + } + + assert(is_empty_list(&bufmgr_fake->on_hardware)); +} + +static GLboolean evict_and_alloc_block(dri_bo *bo) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + + assert(bo_fake->block == NULL); + + /* Search for already free memory: + */ + if (alloc_block(bo)) + return GL_TRUE; + + /* If we're not thrashing, allow lru eviction to dig deeper into + * recently used textures. We'll probably be thrashing soon: + */ + if (!bufmgr_fake->thrashing) { + while (evict_lru(bufmgr_fake, 0)) + if (alloc_block(bo)) + return GL_TRUE; + } + + /* Keep thrashing counter alive? + */ + if (bufmgr_fake->thrashing) + bufmgr_fake->thrashing = 20; + + /* Wait on any already pending fences - here we are waiting for any + * freed memory that has been submitted to hardware and fenced to + * become available: + */ + while (!is_empty_list(&bufmgr_fake->fenced)) { + GLuint fence = bufmgr_fake->fenced.next->fence; + _fence_wait_internal(bufmgr_fake, fence); + + if (alloc_block(bo)) + return GL_TRUE; + } + + if (!is_empty_list(&bufmgr_fake->on_hardware)) { + while (!is_empty_list(&bufmgr_fake->fenced)) { + GLuint fence = bufmgr_fake->fenced.next->fence; + _fence_wait_internal(bufmgr_fake, fence); + } + + if (!bufmgr_fake->thrashing) { + DBG("thrashing\n"); + } + bufmgr_fake->thrashing = 20; + + if (alloc_block(bo)) + return GL_TRUE; + } + + while (evict_mru(bufmgr_fake)) + if (alloc_block(bo)) + return GL_TRUE; + + DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size); + + assert(is_empty_list(&bufmgr_fake->on_hardware)); + assert(is_empty_list(&bufmgr_fake->fenced)); + + return GL_FALSE; +} + +/*********************************************************************** + * Public functions + */ + +/** + * Wait for hardware idle by emitting a fence and waiting for it. + */ +static void +dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake) +{ + unsigned int cookie; + + cookie = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv); + _fence_wait_internal(bufmgr_fake->driver_priv, cookie); +} + +/* Specifically ignore texture memory sharing. + * -- just evict everything + * -- and wait for idle + */ +void +dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; + + _glthread_LOCK_MUTEX(bufmgr_fake->mutex); + { + struct block *block, *tmp; + + assert(is_empty_list(&bufmgr_fake->referenced)); + + bufmgr_fake->need_fence = 1; + bufmgr_fake->fail = 0; + + /* Wait for hardware idle. We don't know where acceleration has been + * happening, so we'll need to wait anyway before letting anything get + * put on the card again. + */ + dri_bufmgr_fake_wait_idle(bufmgr_fake); + + assert(is_empty_list(&bufmgr_fake->fenced)); + assert(is_empty_list(&bufmgr_fake->on_hardware)); + + foreach_s(block, tmp, &bufmgr_fake->lru) { + assert(_fence_test(bufmgr_fake, block->fence)); + set_dirty(block->bo); + } + } + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); +} + +static dri_bo * +dri_fake_alloc(dri_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment, unsigned int flags, + unsigned int hint) +{ + dri_bufmgr_fake *bufmgr_fake; + dri_bo_fake *bo_fake; + + bufmgr_fake = (dri_bufmgr_fake *)bufmgr; + + bo_fake = calloc(1, sizeof(*bo_fake)); + if (!bo_fake) + return NULL; + + bo_fake->bo.size = size; + bo_fake->bo.offset = -1; + bo_fake->bo.virtual = NULL; + bo_fake->bo.bufmgr = bufmgr; + bo_fake->refcount = 1; + + /* Alignment must be a power of two */ + assert((alignment & (alignment - 1)) == 0); + if (alignment == 0) + alignment = 1; + bo_fake->alignment = alignment; + bo_fake->id = ++bufmgr_fake->buf_nr; + bo_fake->name = name; + bo_fake->flags = flags; + bo_fake->is_static = GL_FALSE; + + return &bo_fake->bo; +} + +static dri_bo * +dri_fake_alloc_static(dri_bufmgr *bufmgr, const char *name, + unsigned long offset, unsigned long size, void *virtual, + unsigned int flags, unsigned int hint) +{ + dri_bufmgr_fake *bufmgr_fake; + dri_bo_fake *bo_fake; + + bufmgr_fake = (dri_bufmgr_fake *)bufmgr; + + bo_fake = calloc(1, sizeof(*bo_fake)); + if (!bo_fake) + return NULL; + + bo_fake->bo.size = size; + bo_fake->bo.offset = offset; + bo_fake->bo.virtual = virtual; + bo_fake->bo.bufmgr = bufmgr; + bo_fake->refcount = 1; + bo_fake->name = name; + bo_fake->flags = flags; + bo_fake->is_static = GL_TRUE; + + return &bo_fake->bo; +} + +static void +dri_fake_bo_reference(dri_bo *bo) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + + _glthread_LOCK_MUTEX(bufmgr_fake->mutex); + bo_fake->refcount++; + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); +} + +static void +dri_fake_bo_unreference(dri_bo *bo) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + + if (!bo) + return; + + _glthread_LOCK_MUTEX(bufmgr_fake->mutex); + if (--bo_fake->refcount == 0) { + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); + /* No remaining references, so free it */ + if (bo_fake->block) + free_block(bufmgr_fake, bo_fake->block); + free_backing_store(bo); + free(bo); + return; + } + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); +} + +/** + * Map a buffer into bo->virtual, allocating either card memory space (If + * BM_NO_BACKING_STORE or DRM_BO_FLAG_NO_EVICT) or backing store, as necessary. + */ +static int +dri_fake_bo_map(dri_bo *bo, GLboolean write_enable) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + + /* Static buffers are always mapped. */ + if (bo_fake->is_static) + return 0; + + _glthread_LOCK_MUTEX(bufmgr_fake->mutex); + { + DBG("bmMapBuffer %d\n", bo_fake->id); + + if (bo->virtual != NULL) { + _mesa_printf("%s: already mapped\n", __FUNCTION__); + abort(); + } + else if (bo_fake->flags & (BM_NO_BACKING_STORE|DRM_BO_FLAG_NO_EVICT)) { + + if (!bo_fake->block && !evict_and_alloc_block(bo)) { + DBG("%s: alloc failed\n", __FUNCTION__); + bufmgr_fake->fail = 1; + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); + return 1; + } + else { + assert(bo_fake->block); + bo_fake->dirty = 0; + + if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA)) + dri_bufmgr_fake_wait_idle(bufmgr_fake); + + bo->virtual = bo_fake->block->virtual; + } + } + else { + if (write_enable) + set_dirty(bo); + + if (bo_fake->backing_store == 0) + alloc_backing_store(bo); + + bo->virtual = bo_fake->backing_store; + } + } + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); + return 0; +} + +static int +dri_fake_bo_unmap(dri_bo *bo) +{ + dri_bufmgr_fake *bufmgr_fake; + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + + /* Static buffers are always mapped. */ + if (bo_fake->is_static) + return 0; + + if (bo == NULL) + return 0; + + bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + + bo->virtual = NULL; + + return 0; +} + +static int +dri_fake_bo_validate(dri_bo *bo, unsigned int flags) +{ + dri_bufmgr_fake *bufmgr_fake; + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + + /* XXX: Sanity-check whether we've already validated this one under + * different flags. See drmAddValidateItem(). + */ + + bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + + _glthread_LOCK_MUTEX(bufmgr_fake->mutex); + { + /* Allocate the card memory */ + if (!bo_fake->block && !evict_and_alloc_block(bo)) { + bufmgr_fake->fail = 1; + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); + return -1; + } + + assert(bo_fake->block); + assert(bo_fake->block->bo == &bo_fake->bo); + + DBG("Add buf %d (block %p, dirty %d) to referenced list\n", + bo_fake->id, bo_fake->block, bo_fake->dirty); + + move_to_tail(&bufmgr_fake->referenced, bo_fake->block); + bo_fake->block->referenced = 1; + + bo->offset = bo_fake->block->mem->ofs; + + /* Upload the buffer contents if necessary */ + if (bo_fake->dirty) { + DBG("Upload dirty buf %d (%s) sz %d offset 0x%x\n", bo_fake->id, + bo_fake->name, bo->size, block->mem->ofs); + + assert(!(bo_fake->flags & + (BM_NO_BACKING_STORE|DRM_BO_FLAG_NO_EVICT))); + + /* Actually, should be able to just wait for a fence on the memory, + * which we would be tracking when we free it. Waiting for idle is + * a sufficiently large hammer for now. + */ + dri_bufmgr_fake_wait_idle(bufmgr_fake); + + memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size); + + bo_fake->block->referenced = 0; + bo_fake->block->on_hardware = 1; + move_to_tail(&bufmgr_fake->on_hardware, bo_fake->block); + } + + bufmgr_fake->need_fence = 1; + } + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); + + return 0; +} + +static dri_fence * +dri_fake_fence_validated(dri_bufmgr *bufmgr, const char *name, + GLboolean flushed) +{ + dri_fence_fake *fence_fake; + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; + unsigned int cookie; + + fence_fake = malloc(sizeof(*fence_fake)); + if (!fence_fake) + return NULL; + + fence_fake->refcount = 1; + fence_fake->name = name; + fence_fake->flushed = flushed; + fence_fake->fence.bufmgr = bufmgr; + + _glthread_LOCK_MUTEX(bufmgr_fake->mutex); + cookie = _fence_emit_internal(bufmgr_fake); + fence_fake->fence_cookie = cookie; + fence_blocks(bufmgr_fake, cookie); + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); + + return &fence_fake->fence; +} + +static void +dri_fake_fence_reference(dri_fence *fence) +{ + dri_fence_fake *fence_fake = (dri_fence_fake *)fence; + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)fence->bufmgr; + + _glthread_LOCK_MUTEX(bufmgr_fake->mutex); + ++fence_fake->refcount; + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); +} + +static void +dri_fake_fence_unreference(dri_fence *fence) +{ + dri_fence_fake *fence_fake = (dri_fence_fake *)fence; + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)fence->bufmgr; + + if (!fence) + return; + + _glthread_LOCK_MUTEX(bufmgr_fake->mutex); + if (--fence_fake->refcount == 0) { + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); + free(fence); + return; + } + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); +} + +static void +dri_fake_fence_wait(dri_fence *fence) +{ + dri_fence_fake *fence_fake = (dri_fence_fake *)fence; + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)fence->bufmgr; + + _glthread_LOCK_MUTEX(bufmgr_fake->mutex); + _fence_wait_internal(bufmgr_fake->driver_priv, fence_fake->fence_cookie); + _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex); +} + +dri_bufmgr * +dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual, + unsigned long size, + unsigned int (*fence_emit)(void *private), + int (*fence_wait)(void *private, unsigned int cookie), + void *driver_priv) +{ + dri_bufmgr_fake *bufmgr_fake; + + bufmgr_fake = malloc(sizeof(*bufmgr_fake)); + + /* Initialize allocator */ + make_empty_list(&bufmgr_fake->referenced); + make_empty_list(&bufmgr_fake->fenced); + make_empty_list(&bufmgr_fake->on_hardware); + + bufmgr_fake->low_offset = low_offset; + bufmgr_fake->virtual = low_virtual; + bufmgr_fake->size = size; + bufmgr_fake->heap = mmInit(low_offset, size); + + _glthread_INIT_MUTEX(bufmgr_fake->mutex); + + /* Hook in methods */ + bufmgr_fake->bufmgr.bo_alloc = dri_fake_alloc; + bufmgr_fake->bufmgr.bo_alloc_static = dri_fake_alloc_static; + bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference; + bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference; + bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map; + bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap; + bufmgr_fake->bufmgr.bo_validate = dri_fake_bo_validate; + bufmgr_fake->bufmgr.fence_validated = dri_fake_fence_validated; + bufmgr_fake->bufmgr.fence_wait = dri_fake_fence_wait; + bufmgr_fake->bufmgr.fence_reference = dri_fake_fence_reference; + bufmgr_fake->bufmgr.fence_unreference = dri_fake_fence_unreference; + + return &bufmgr_fake->bufmgr; +} diff --git a/src/mesa/drivers/dri/common/dri_bufmgr_ttm.c b/src/mesa/drivers/dri/common/dri_bufmgr_ttm.c new file mode 100644 index 0000000000..edf2a923ce --- /dev/null +++ b/src/mesa/drivers/dri/common/dri_bufmgr_ttm.c @@ -0,0 +1,356 @@ +/************************************************************************** + * + * Copyright © 2007 Intel Corporation + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + * Keith Whitwell + * Eric Anholt + */ + +#include +#include +#include +#include "glthread.h" +#include "errno.h" +#include "mtypes.h" +#include "dri_bufmgr.h" +#include "string.h" +#include "imports.h" + +typedef struct _dri_bufmgr_ttm { + dri_bufmgr bufmgr; + + int fd; + _glthread_Mutex mutex; + unsigned int fence_type; + unsigned int fence_type_flush; +} dri_bufmgr_ttm; + +typedef struct _dri_bo_ttm { + dri_bo bo; + + int refcount; /* Protected by bufmgr->mutex */ + drmBO drm_bo; +} dri_bo_ttm; + +typedef struct _dri_fence_ttm +{ + dri_fence fence; + + int refcount; /* Protected by bufmgr->mutex */ + /** Fence type from when the fence was created, used for later waits */ + unsigned int type; + const char *name; + drmFence drm_fence; +} dri_fence_ttm; + +#if 0 +int +driFenceSignaled(DriFenceObject * fence, unsigned type) +{ + int signaled; + int ret; + + if (fence == NULL) + return GL_TRUE; + + _glthread_LOCK_MUTEX(fence->mutex); + ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled); + _glthread_UNLOCK_MUTEX(fence->mutex); + BM_CKFATAL(ret); + return signaled; +} +#endif + +static dri_bo * +dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment, unsigned int flags, + unsigned int hint) +{ + dri_bufmgr_ttm *ttm_bufmgr; + dri_bo_ttm *ttm_buf; + unsigned int pageSize = getpagesize(); + int ret; + + ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr; + + ttm_buf = malloc(sizeof(*ttm_buf)); + if (!ttm_buf) + return NULL; + + ret = drmBOCreate(ttm_bufmgr->fd, 0, size, alignment / pageSize, + NULL, drm_bo_type_dc, + flags, hint, &ttm_buf->drm_bo); + if (ret != 0) { + free(ttm_buf); + return NULL; + } + ttm_buf->bo.size = ttm_buf->drm_bo.size; + ttm_buf->bo.offset = ttm_buf->drm_bo.offset; + ttm_buf->bo.virtual = NULL; + ttm_buf->bo.bufmgr = bufmgr; + ttm_buf->refcount = 1; + + return &ttm_buf->bo; +} + +static dri_bo * +dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name, + unsigned long offset, unsigned long size, void *virtual, + unsigned int flags, unsigned int hint) +{ + dri_bufmgr_ttm *ttm_bufmgr; + dri_bo_ttm *ttm_buf; + int ret; + + ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr; + + ttm_buf = malloc(sizeof(*ttm_buf)); + if (!ttm_buf) + return NULL; + + ret = drmBOCreate(ttm_bufmgr->fd, offset, size, 0, + NULL, drm_bo_type_fake, + flags, 0, &ttm_buf->drm_bo); + if (ret != 0) { + free(ttm_buf); + return NULL; + } + ttm_buf->bo.size = ttm_buf->drm_bo.size; + ttm_buf->bo.offset = ttm_buf->drm_bo.offset; + ttm_buf->bo.virtual = virtual; + ttm_buf->bo.bufmgr = bufmgr; + ttm_buf->refcount = 1; + + return &ttm_buf->bo; +} + +static void +dri_ttm_bo_reference(dri_bo *buf) +{ + dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr; + dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf; + + _glthread_LOCK_MUTEX(bufmgr_ttm->mutex); + ttm_buf->refcount++; + _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex); +} + +static void +dri_ttm_bo_unreference(dri_bo *buf) +{ + dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr; + dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf; + + if (!buf) + return; + + _glthread_LOCK_MUTEX(bufmgr_ttm->mutex); + if (--ttm_buf->refcount == 0) { + drmBOUnReference(bufmgr_ttm->fd, &ttm_buf->drm_bo); + _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex); + free(buf); + return; + } + _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex); +} + +static int +dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable) +{ + dri_bufmgr_ttm *bufmgr_ttm; + dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf; + unsigned int flags; + + bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr; + + flags = DRM_BO_FLAG_READ; + if (write_enable) + flags |= DRM_BO_FLAG_WRITE; + + return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual); +} + +static int +dri_ttm_bo_unmap(dri_bo *buf) +{ + dri_bufmgr_ttm *bufmgr_ttm; + dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf; + + if (buf == NULL) + return 0; + + bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr; + + buf->virtual = NULL; + + return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo); +} + +static int +dri_ttm_validate(dri_bo *buf, unsigned int flags) +{ + dri_bufmgr_ttm *bufmgr_ttm; + dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf; + unsigned int mask; + int err; + + /* XXX: Sanity-check whether we've already validated this one under + * different flags. See drmAddValidateItem(). + */ + + bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr; + + /* Calculate the appropriate mask to pass to the DRM. There appears to be + * be a direct relationship to flags, so it's unnecessary to have it passed + * in as an argument. + */ + mask = DRM_BO_MASK_MEM; + mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE); + + err = drmBOValidate(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, mask, 0); + + if (err == 0) { + /* XXX: add to fence list for sanity checking */ + } + + return err; +} + +static dri_fence * +dri_ttm_fence_validated(dri_bufmgr *bufmgr, const char *name, + GLboolean flushed) +{ + dri_fence_ttm *fence_ttm = malloc(sizeof(*fence_ttm)); + dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr; + int ret; + unsigned int type; + + if (!fence_ttm) + return NULL; + + if (flushed) + type = bufmgr_ttm->fence_type_flush; + else + type = bufmgr_ttm->fence_type; + + fence_ttm->refcount = 1; + fence_ttm->name = name; + fence_ttm->type = type; + fence_ttm->fence.bufmgr = bufmgr; + ret = drmFenceBuffers(bufmgr_ttm->fd, type, &fence_ttm->drm_fence); + if (ret) { + free(fence_ttm); + return NULL; + } + return &fence_ttm->fence; +} + +static void +dri_ttm_fence_reference(dri_fence *fence) +{ + dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence; + dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr; + + _glthread_LOCK_MUTEX(bufmgr_ttm->mutex); + ++fence_ttm->refcount; + _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex); +} + +static void +dri_ttm_fence_unreference(dri_fence *fence) +{ + dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence; + dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr; + + if (!fence) + return; + + _glthread_LOCK_MUTEX(bufmgr_ttm->mutex); + if (--fence_ttm->refcount == 0) { + drmFenceDestroy(bufmgr_ttm->fd, &fence_ttm->drm_fence); + _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex); + free(fence); + return; + } + _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex); +} + +static void +dri_ttm_fence_wait(dri_fence *fence) +{ + dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence; + dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr; + int ret; + + _glthread_LOCK_MUTEX(bufmgr_ttm->mutex); + ret = drmFenceWait(bufmgr_ttm->fd, 0, &fence_ttm->drm_fence, + fence_ttm->type); + _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex); + if (ret != 0) { + _mesa_printf("%s:%d: Error %d waiting for fence %s.\n", + __FILE__, __LINE__, fence_ttm->name); + abort(); + } +} + +/** + * Initializes the TTM buffer manager, which uses the kernel to allocate, map, + * and manage map buffer objections. + * + * \param fd File descriptor of the opened DRM device. + * \param fence_type Driver-specific fence type used for fences with no flush. + * \param fence_type_flush Driver-specific fence type used for fences with a + * flush. + */ +dri_bufmgr * +dri_bufmgr_ttm_init(int fd, unsigned int fence_type, + unsigned int fence_type_flush) +{ + dri_bufmgr_ttm *bufmgr_ttm; + + bufmgr_ttm = malloc(sizeof(*bufmgr_ttm)); + bufmgr_ttm->fd = fd; + bufmgr_ttm->fence_type = fence_type; + bufmgr_ttm->fence_type_flush = fence_type_flush; + _glthread_INIT_MUTEX(bufmgr_ttm->mutex); + + bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc; + bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static; + bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference; + bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference; + bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map; + bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap; + bufmgr_ttm->bufmgr.bo_validate = dri_ttm_validate; + bufmgr_ttm->bufmgr.fence_validated = dri_ttm_fence_validated; + bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference; + bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference; + bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait; + + return &bufmgr_ttm->bufmgr; +} diff --git a/src/mesa/drivers/dri/common/dri_bufpool.h b/src/mesa/drivers/dri/common/dri_bufpool.h deleted file mode 100644 index c6fb2c3ce0..0000000000 --- a/src/mesa/drivers/dri/common/dri_bufpool.h +++ /dev/null @@ -1,86 +0,0 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * - **************************************************************************/ -/* - * Authors: Thomas Hellström - */ - -#ifndef _DRI_BUFPOOL_H_ -#define _DRI_BUFPOOL_H_ - -#include -struct _DriFenceObject; - -typedef struct _DriBufferPool -{ - int fd; - int (*map) (struct _DriBufferPool * pool, void *private, - unsigned flags, int hint, void **virtual); - int (*unmap) (struct _DriBufferPool * pool, void *private); - int (*destroy) (struct _DriBufferPool * pool, void *private); - unsigned long (*offset) (struct _DriBufferPool * pool, void *private); - unsigned (*flags) (struct _DriBufferPool * pool, void *private); - unsigned long (*size) (struct _DriBufferPool * pool, void *private); - void *(*create) (struct _DriBufferPool * pool, unsigned long size, - unsigned flags, unsigned hint, unsigned alignment); - int (*fence) (struct _DriBufferPool * pool, void *private, - struct _DriFenceObject * fence); - drmBO *(*kernel) (struct _DriBufferPool * pool, void *private); - int (*validate) (struct _DriBufferPool * pool, void *private); - void *(*setstatic) (struct _DriBufferPool * pool, unsigned long offset, - unsigned long size, void *virtual, unsigned flags); - int (*waitIdle) (struct _DriBufferPool *pool, void *private, - int lazy); - void (*takeDown) (struct _DriBufferPool * pool); - void *data; -} DriBufferPool; - -extern void bmError(int val, const char *file, const char *function, - int line); -#define BM_CKFATAL(val) \ - do{ \ - int tstVal = (val); \ - if (tstVal) \ - bmError(tstVal, __FILE__, __FUNCTION__, __LINE__); \ - } while(0); - - - - - -/* - * Builtin pools. - */ - -/* - * Kernel buffer objects. Size in multiples of page size. Page size aligned. - */ - -extern struct _DriBufferPool *driDRMPoolInit(int fd); -extern struct _DriBufferPool *driDRMStaticPoolInit(int fd); - -#endif diff --git a/src/mesa/drivers/dri/common/dri_drmpool.c b/src/mesa/drivers/dri/common/dri_drmpool.c deleted file mode 100644 index b5b324be50..0000000000 --- a/src/mesa/drivers/dri/common/dri_drmpool.c +++ /dev/null @@ -1,227 +0,0 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * - **************************************************************************/ -/* - * Authors: Thomas Hellström - */ - -#include -#include -#include -#include "dri_bufpool.h" - -/* - * Buffer pool implementation using DRM buffer objects as DRI buffer objects. - */ - -static void * -pool_create(struct _DriBufferPool *pool, - unsigned long size, unsigned flags, unsigned hint, - unsigned alignment) -{ - drmBO *buf = (drmBO *) malloc(sizeof(*buf)); - int ret; - unsigned pageSize = getpagesize(); - - if (!buf) - return NULL; - - if ((alignment > pageSize) && (alignment % pageSize)) { - return NULL; - } - - ret = drmBOCreate(pool->fd, 0, size, alignment / pageSize, - NULL, drm_bo_type_dc, - flags, hint, buf); - if (ret) { - free(buf); - return NULL; - } - - return (void *) buf; -} - -static int -pool_destroy(struct _DriBufferPool *pool, void *private) -{ - int ret; - drmBO *buf = (drmBO *) private; - ret = drmBODestroy(pool->fd, buf); - free(buf); - return ret; -} - -static int -pool_map(struct _DriBufferPool *pool, void *private, unsigned flags, - int hint, void **virtual) -{ - drmBO *buf = (drmBO *) private; - - return drmBOMap(pool->fd, buf, flags, hint, virtual); -} - -static int -pool_unmap(struct _DriBufferPool *pool, void *private) -{ - drmBO *buf = (drmBO *) private; - return drmBOUnmap(pool->fd, buf); -} - -static unsigned long -pool_offset(struct _DriBufferPool *pool, void *private) -{ - drmBO *buf = (drmBO *) private; - return buf->offset; -} - -static unsigned -pool_flags(struct _DriBufferPool *pool, void *private) -{ - drmBO *buf = (drmBO *) private; - return buf->flags; -} - - -static unsigned long -pool_size(struct _DriBufferPool *pool, void *private) -{ - drmBO *buf = (drmBO *) private; - return buf->size; -} - -static int -pool_fence(struct _DriBufferPool *pool, void *private, - struct _DriFenceObject *fence) -{ - /* - * Noop. The kernel handles all fencing. - */ - - return 0; -} - -static drmBO * -pool_kernel(struct _DriBufferPool *pool, void *private) -{ - return (drmBO *) private; -} - -static int -pool_waitIdle(struct _DriBufferPool *pool, void *private, int lazy) -{ - drmBO *buf = (drmBO *) private; - return drmBOWaitIdle(pool->fd, buf, (lazy) ? DRM_BO_HINT_WAIT_LAZY:0); -} - - -static void -pool_takedown(struct _DriBufferPool *pool) -{ - free(pool); -} - - -struct _DriBufferPool * -driDRMPoolInit(int fd) -{ - struct _DriBufferPool *pool; - - pool = (struct _DriBufferPool *) malloc(sizeof(*pool)); - - if (!pool) - return NULL; - - pool->fd = fd; - pool->map = &pool_map; - pool->unmap = &pool_unmap; - pool->destroy = &pool_destroy; - pool->offset = &pool_offset; - pool->flags = &pool_flags; - pool->size = &pool_size; - pool->create = &pool_create; - pool->fence = &pool_fence; - pool->kernel = &pool_kernel; - pool->validate = NULL; - pool->setstatic = NULL; - pool->waitIdle = &pool_waitIdle; - pool->takeDown = &pool_takedown; - pool->data = NULL; - return pool; -} - - -static void * -pool_setstatic(struct _DriBufferPool *pool, unsigned long offset, - unsigned long size, void *virtual, unsigned flags) -{ - drmBO *buf = (drmBO *) malloc(sizeof(*buf)); - int ret; - - if (!buf) - return NULL; - - ret = drmBOCreate(pool->fd, offset, size, 0, NULL, drm_bo_type_fake, - flags, 0, buf); - - if (ret) { - free(buf); - return NULL; - } - - buf->virtual = virtual; - - return (void *) buf; -} - - -struct _DriBufferPool * -driDRMStaticPoolInit(int fd) -{ - struct _DriBufferPool *pool; - - pool = (struct _DriBufferPool *) malloc(sizeof(*pool)); - - if (!pool) - return NULL; - - pool->fd = fd; - pool->map = &pool_map; - pool->unmap = &pool_unmap; - pool->destroy = &pool_destroy; - pool->offset = &pool_offset; - pool->flags = &pool_flags; - pool->size = &pool_size; - pool->create = NULL; - pool->fence = &pool_fence; - pool->kernel = &pool_kernel; - pool->validate = NULL; - pool->setstatic = &pool_setstatic; - pool->waitIdle = &pool_waitIdle; - pool->takeDown = &pool_takedown; - pool->data = NULL; - return pool; -} -- cgit v1.2.3