From ccf1910dd4b2a8ccd04ddbdf725b6dd3f8026eee Mon Sep 17 00:00:00 2001 From: Alan Hourihane Date: Sun, 17 Aug 2008 20:17:18 +0100 Subject: consolidate intel directories. we now have src/gallium/winsys/drm/intel/{common,dri,egl} --- .../winsys/drm/intel/common/ws_dri_slabpool.c | 968 +++++++++++++++++++++ 1 file changed, 968 insertions(+) create mode 100644 src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c (limited to 'src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c') diff --git a/src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c b/src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c new file mode 100644 index 0000000000..dfcf6d6b19 --- /dev/null +++ b/src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c @@ -0,0 +1,968 @@ +/************************************************************************** + * + * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include +#include +#include +#include +#include +#include "ws_dri_bufpool.h" +#include "ws_dri_fencemgr.h" +#include "ws_dri_bufmgr.h" +#include "glthread.h" + +#define DRI_SLABPOOL_ALLOC_RETRIES 100 + +struct _DriSlab; + +struct _DriSlabBuffer { + int isSlabBuffer; + drmBO *bo; + struct _DriFenceObject *fence; + struct _DriSlab *parent; + drmMMListHead head; + uint32_t mapCount; + uint32_t start; + uint32_t fenceType; + int unFenced; + _glthread_Cond event; +}; + +struct _DriKernelBO { + int fd; + drmBO bo; + drmMMListHead timeoutHead; + drmMMListHead head; + struct timeval timeFreed; + uint32_t pageAlignment; + void *virtual; +}; + +struct _DriSlab{ + drmMMListHead head; + drmMMListHead freeBuffers; + uint32_t numBuffers; + uint32_t numFree; + struct _DriSlabBuffer *buffers; + struct _DriSlabSizeHeader *header; + struct _DriKernelBO *kbo; +}; + + +struct _DriSlabSizeHeader { + drmMMListHead slabs; + drmMMListHead freeSlabs; + drmMMListHead delayedBuffers; + uint32_t numDelayed; + struct _DriSlabPool *slabPool; + uint32_t bufSize; + _glthread_Mutex mutex; +}; + +struct _DriFreeSlabManager { + struct timeval slabTimeout; + struct timeval checkInterval; + struct timeval nextCheck; + drmMMListHead timeoutList; + drmMMListHead unCached; + drmMMListHead cached; + _glthread_Mutex mutex; +}; + + +struct _DriSlabPool { + + /* + * The data of this structure remains constant after + * initialization and thus needs no mutex protection. + */ + + struct _DriFreeSlabManager *fMan; + uint64_t proposedFlags; + uint64_t validMask; + uint32_t *bucketSizes; + uint32_t numBuckets; + uint32_t pageSize; + int fd; + int pageAlignment; + int maxSlabSize; + int desiredNumBuffers; + struct _DriSlabSizeHeader *headers; +}; + +/* + * FIXME: Perhaps arrange timeout slabs in size buckets for fast + * retreival?? + */ + + +static inline int +driTimeAfterEq(struct timeval *arg1, struct timeval *arg2) +{ + return ((arg1->tv_sec > arg2->tv_sec) || + ((arg1->tv_sec == arg2->tv_sec) && + (arg1->tv_usec > arg2->tv_usec))); +} + +static inline void +driTimeAdd(struct timeval *arg, struct timeval *add) +{ + unsigned int sec; + + arg->tv_sec += add->tv_sec; + arg->tv_usec += add->tv_usec; + sec = arg->tv_usec / 1000000; + arg->tv_sec += sec; + arg->tv_usec -= sec*1000000; +} + +static void +driFreeKernelBO(struct _DriKernelBO *kbo) +{ + if (!kbo) + return; + + (void) drmBOUnreference(kbo->fd, &kbo->bo); + free(kbo); +} + + +static void +driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager *fMan, + struct timeval *time) +{ + drmMMListHead *list, *next; + struct _DriKernelBO *kbo; + + if (!driTimeAfterEq(time, &fMan->nextCheck)) + return; + + for (list = fMan->timeoutList.next, next = list->next; + list != &fMan->timeoutList; + list = next, next = list->next) { + + kbo = DRMLISTENTRY(struct _DriKernelBO, list, timeoutHead); + + if (!driTimeAfterEq(time, &kbo->timeFreed)) + break; + + DRMLISTDELINIT(&kbo->timeoutHead); + DRMLISTDELINIT(&kbo->head); + driFreeKernelBO(kbo); + } + + fMan->nextCheck = *time; + driTimeAdd(&fMan->nextCheck, &fMan->checkInterval); +} + + +/* + * Add a _DriKernelBO to the free slab manager. + * This means that it is available for reuse, but if it's not + * reused in a while, it will be freed. + */ + +static void +driSetKernelBOFree(struct _DriFreeSlabManager *fMan, + struct _DriKernelBO *kbo) +{ + struct timeval time; + + _glthread_LOCK_MUTEX(fMan->mutex); + gettimeofday(&time, NULL); + driTimeAdd(&time, &fMan->slabTimeout); + + kbo->timeFreed = time; + + if (kbo->bo.flags & DRM_BO_FLAG_CACHED) + DRMLISTADD(&kbo->head, &fMan->cached); + else + DRMLISTADD(&kbo->head, &fMan->unCached); + + DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList); + driFreeTimeoutKBOsLocked(fMan, &time); + + _glthread_UNLOCK_MUTEX(fMan->mutex); +} + +/* + * Get a _DriKernelBO for us to use as storage for a slab. + * + */ + +static struct _DriKernelBO * +driAllocKernelBO(struct _DriSlabSizeHeader *header) + +{ + struct _DriSlabPool *slabPool = header->slabPool; + struct _DriFreeSlabManager *fMan = slabPool->fMan; + drmMMListHead *list, *next, *head; + uint32_t size = header->bufSize * slabPool->desiredNumBuffers; + struct _DriKernelBO *kbo; + struct _DriKernelBO *kboTmp; + int ret; + + /* + * FIXME: We should perhaps allow some variation in slabsize in order + * to efficiently reuse slabs. + */ + + size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize; + size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1); + _glthread_LOCK_MUTEX(fMan->mutex); + + kbo = NULL; + + retry: + head = (slabPool->proposedFlags & DRM_BO_FLAG_CACHED) ? + &fMan->cached : &fMan->unCached; + + for (list = head->next, next = list->next; + list != head; + list = next, next = list->next) { + + kboTmp = DRMLISTENTRY(struct _DriKernelBO, list, head); + + if ((kboTmp->bo.size == size) && + (slabPool->pageAlignment == 0 || + (kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) { + + if (!kbo) + kbo = kboTmp; + + if ((kbo->bo.proposedFlags ^ slabPool->proposedFlags) == 0) + break; + + } + } + + if (kbo) { + DRMLISTDELINIT(&kbo->head); + DRMLISTDELINIT(&kbo->timeoutHead); + } + + _glthread_UNLOCK_MUTEX(fMan->mutex); + + if (kbo) { + uint64_t new_mask = kbo->bo.proposedFlags ^ slabPool->proposedFlags; + + ret = 0; + if (new_mask) { + ret = drmBOSetStatus(kbo->fd, &kbo->bo, slabPool->proposedFlags, + new_mask, DRM_BO_HINT_DONT_FENCE, 0, 0); + } + if (ret == 0) + return kbo; + + driFreeKernelBO(kbo); + kbo = NULL; + goto retry; + } + + kbo = calloc(1, sizeof(struct _DriKernelBO)); + if (!kbo) + return NULL; + + kbo->fd = slabPool->fd; + DRMINITLISTHEAD(&kbo->head); + DRMINITLISTHEAD(&kbo->timeoutHead); + ret = drmBOCreate(kbo->fd, size, slabPool->pageAlignment, NULL, + slabPool->proposedFlags, + DRM_BO_HINT_DONT_FENCE, &kbo->bo); + if (ret) + goto out_err0; + + ret = drmBOMap(kbo->fd, &kbo->bo, + DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, + 0, &kbo->virtual); + + if (ret) + goto out_err1; + + ret = drmBOUnmap(kbo->fd, &kbo->bo); + if (ret) + goto out_err1; + + return kbo; + + out_err1: + drmBOUnreference(kbo->fd, &kbo->bo); + out_err0: + free(kbo); + return NULL; +} + + +static int +driAllocSlab(struct _DriSlabSizeHeader *header) +{ + struct _DriSlab *slab; + struct _DriSlabBuffer *buf; + uint32_t numBuffers; + int ret; + int i; + + slab = calloc(1, sizeof(*slab)); + if (!slab) + return -ENOMEM; + + slab->kbo = driAllocKernelBO(header); + if (!slab->kbo) { + ret = -ENOMEM; + goto out_err0; + } + + numBuffers = slab->kbo->bo.size / header->bufSize; + + slab->buffers = calloc(numBuffers, sizeof(*slab->buffers)); + if (!slab->buffers) { + ret = -ENOMEM; + goto out_err1; + } + + DRMINITLISTHEAD(&slab->head); + DRMINITLISTHEAD(&slab->freeBuffers); + slab->numBuffers = numBuffers; + slab->numFree = 0; + slab->header = header; + + buf = slab->buffers; + for (i=0; i < numBuffers; ++i) { + buf->parent = slab; + buf->start = i* header->bufSize; + buf->mapCount = 0; + buf->isSlabBuffer = 1; + _glthread_INIT_COND(buf->event); + DRMLISTADDTAIL(&buf->head, &slab->freeBuffers); + slab->numFree++; + buf++; + } + + DRMLISTADDTAIL(&slab->head, &header->slabs); + + return 0; + + out_err1: + driSetKernelBOFree(header->slabPool->fMan, slab->kbo); + free(slab->buffers); + out_err0: + free(slab); + return ret; +} + +/* + * Delete a buffer from the slab header delayed list and put + * it on the slab free list. + */ + +static void +driSlabFreeBufferLocked(struct _DriSlabBuffer *buf) +{ + struct _DriSlab *slab = buf->parent; + struct _DriSlabSizeHeader *header = slab->header; + drmMMListHead *list = &buf->head; + + DRMLISTDEL(list); + DRMLISTADDTAIL(list, &slab->freeBuffers); + slab->numFree++; + + if (slab->head.next == &slab->head) + DRMLISTADDTAIL(&slab->head, &header->slabs); + + if (slab->numFree == slab->numBuffers) { + list = &slab->head; + DRMLISTDEL(list); + DRMLISTADDTAIL(list, &header->freeSlabs); + } + + if (header->slabs.next == &header->slabs || + slab->numFree != slab->numBuffers) { + + drmMMListHead *next; + struct _DriFreeSlabManager *fMan = header->slabPool->fMan; + + for (list = header->freeSlabs.next, next = list->next; + list != &header->freeSlabs; + list = next, next = list->next) { + + slab = DRMLISTENTRY(struct _DriSlab, list, head); + + DRMLISTDELINIT(list); + driSetKernelBOFree(fMan, slab->kbo); + free(slab->buffers); + free(slab); + } + } +} + +static void +driSlabCheckFreeLocked(struct _DriSlabSizeHeader *header, int wait) +{ + drmMMListHead *list, *prev, *first; + struct _DriSlabBuffer *buf; + struct _DriSlab *slab; + int firstWasSignaled = 1; + int signaled; + int i; + int ret; + + /* + * Rerun the freeing test if the youngest tested buffer + * was signaled, since there might be more idle buffers + * in the delay list. + */ + + while (firstWasSignaled) { + firstWasSignaled = 0; + signaled = 0; + first = header->delayedBuffers.next; + + /* Only examine the oldest 1/3 of delayed buffers: + */ + if (header->numDelayed > 3) { + for (i = 0; i < header->numDelayed; i += 3) { + first = first->next; + } + } + + for (list = first, prev = list->prev; + list != &header->delayedBuffers; + list = prev, prev = list->prev) { + buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head); + slab = buf->parent; + + if (!signaled) { + if (wait) { + ret = driFenceFinish(buf->fence, buf->fenceType, 0); + if (ret) + break; + signaled = 1; + wait = 0; + } else { + signaled = driFenceSignaled(buf->fence, buf->fenceType); + } + if (signaled) { + if (list == first) + firstWasSignaled = 1; + driFenceUnReference(&buf->fence); + header->numDelayed--; + driSlabFreeBufferLocked(buf); + } + } else if (driFenceSignaledCached(buf->fence, buf->fenceType)) { + driFenceUnReference(&buf->fence); + header->numDelayed--; + driSlabFreeBufferLocked(buf); + } + } + } +} + + +static struct _DriSlabBuffer * +driSlabAllocBuffer(struct _DriSlabSizeHeader *header) +{ + static struct _DriSlabBuffer *buf; + struct _DriSlab *slab; + drmMMListHead *list; + int count = DRI_SLABPOOL_ALLOC_RETRIES; + + _glthread_LOCK_MUTEX(header->mutex); + while(header->slabs.next == &header->slabs && count > 0) { + driSlabCheckFreeLocked(header, 0); + if (header->slabs.next != &header->slabs) + break; + + _glthread_UNLOCK_MUTEX(header->mutex); + if (count != DRI_SLABPOOL_ALLOC_RETRIES) + usleep(1); + _glthread_LOCK_MUTEX(header->mutex); + (void) driAllocSlab(header); + count--; + } + + list = header->slabs.next; + if (list == &header->slabs) { + _glthread_UNLOCK_MUTEX(header->mutex); + return NULL; + } + slab = DRMLISTENTRY(struct _DriSlab, list, head); + if (--slab->numFree == 0) + DRMLISTDELINIT(list); + + list = slab->freeBuffers.next; + DRMLISTDELINIT(list); + + _glthread_UNLOCK_MUTEX(header->mutex); + buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head); + return buf; +} + +static void * +pool_create(struct _DriBufferPool *driPool, unsigned long size, + uint64_t flags, unsigned hint, unsigned alignment) +{ + struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data; + struct _DriSlabSizeHeader *header; + struct _DriSlabBuffer *buf; + void *dummy; + int i; + int ret; + + /* + * FIXME: Check for compatibility. + */ + + header = pool->headers; + for (i=0; inumBuckets; ++i) { + if (header->bufSize >= size) + break; + header++; + } + + if (i < pool->numBuckets) + return driSlabAllocBuffer(header); + + + /* + * Fall back to allocate a buffer object directly from DRM. + * and wrap it in a driBO structure. + */ + + + buf = calloc(1, sizeof(*buf)); + + if (!buf) + return NULL; + + buf->bo = calloc(1, sizeof(*buf->bo)); + if (!buf->bo) + goto out_err0; + + if (alignment) { + if ((alignment < pool->pageSize) && (pool->pageSize % alignment)) + goto out_err1; + if ((alignment > pool->pageSize) && (alignment % pool->pageSize)) + goto out_err1; + } + + ret = drmBOCreate(pool->fd, size, alignment / pool->pageSize, NULL, + flags, hint, buf->bo); + if (ret) + goto out_err1; + + ret = drmBOMap(pool->fd, buf->bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, + 0, &dummy); + if (ret) + goto out_err2; + + ret = drmBOUnmap(pool->fd, buf->bo); + if (ret) + goto out_err2; + + return buf; + out_err2: + drmBOUnreference(pool->fd, buf->bo); + out_err1: + free(buf->bo); + out_err0: + free(buf); + return NULL; +} + +static int +pool_destroy(struct _DriBufferPool *driPool, void *private) +{ + struct _DriSlabBuffer *buf = + (struct _DriSlabBuffer *) private; + struct _DriSlab *slab; + struct _DriSlabSizeHeader *header; + + if (!buf->isSlabBuffer) { + struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data; + int ret; + + ret = drmBOUnreference(pool->fd, buf->bo); + free(buf->bo); + free(buf); + return ret; + } + + slab = buf->parent; + header = slab->header; + + _glthread_LOCK_MUTEX(header->mutex); + buf->unFenced = 0; + buf->mapCount = 0; + + if (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)) { + DRMLISTADDTAIL(&buf->head, &header->delayedBuffers); + header->numDelayed++; + } else { + if (buf->fence) + driFenceUnReference(&buf->fence); + driSlabFreeBufferLocked(buf); + } + + _glthread_UNLOCK_MUTEX(header->mutex); + return 0; +} + +static int +pool_waitIdle(struct _DriBufferPool *driPool, void *private, + _glthread_Mutex *mutex, int lazy) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + + while(buf->unFenced) + _glthread_COND_WAIT(buf->event, *mutex); + + if (!buf->fence) + return 0; + + driFenceFinish(buf->fence, buf->fenceType, lazy); + driFenceUnReference(&buf->fence); + + return 0; +} + +static int +pool_map(struct _DriBufferPool *pool, void *private, unsigned flags, + int hint, _glthread_Mutex *mutex, void **virtual) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + int busy; + + if (buf->isSlabBuffer) + busy = buf->unFenced || (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)); + else + busy = buf->fence && !driFenceSignaled(buf->fence, buf->fenceType); + + + if (busy) { + if (hint & DRM_BO_HINT_DONT_BLOCK) + return -EBUSY; + else { + (void) pool_waitIdle(pool, private, mutex, 0); + } + } + + ++buf->mapCount; + *virtual = (buf->isSlabBuffer) ? + (void *) ((uint8_t *) buf->parent->kbo->virtual + buf->start) : + (void *) buf->bo->virtual; + + return 0; +} + +static int +pool_unmap(struct _DriBufferPool *pool, void *private) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + + --buf->mapCount; + if (buf->mapCount == 0 && buf->isSlabBuffer) + _glthread_COND_BROADCAST(buf->event); + + return 0; +} + +static unsigned long +pool_offset(struct _DriBufferPool *pool, void *private) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + struct _DriSlab *slab; + struct _DriSlabSizeHeader *header; + + if (!buf->isSlabBuffer) { + assert(buf->bo->proposedFlags & DRM_BO_FLAG_NO_MOVE); + return buf->bo->offset; + } + + slab = buf->parent; + header = slab->header; + + (void) header; + assert(header->slabPool->proposedFlags & DRM_BO_FLAG_NO_MOVE); + return slab->kbo->bo.offset + buf->start; +} + +static unsigned long +pool_poolOffset(struct _DriBufferPool *pool, void *private) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + + return buf->start; +} + +static uint64_t +pool_flags(struct _DriBufferPool *pool, void *private) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + + if (!buf->isSlabBuffer) + return buf->bo->flags; + + return buf->parent->kbo->bo.flags; +} + +static unsigned long +pool_size(struct _DriBufferPool *pool, void *private) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + if (!buf->isSlabBuffer) + return buf->bo->size; + + return buf->parent->header->bufSize; +} + +static int +pool_fence(struct _DriBufferPool *pool, void *private, + struct _DriFenceObject *fence) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + drmBO *bo; + + if (buf->fence) + driFenceUnReference(&buf->fence); + + buf->fence = driFenceReference(fence); + bo = (buf->isSlabBuffer) ? + &buf->parent->kbo->bo: + buf->bo; + buf->fenceType = bo->fenceFlags; + + buf->unFenced = 0; + _glthread_COND_BROADCAST(buf->event); + + return 0; +} + +static drmBO * +pool_kernel(struct _DriBufferPool *pool, void *private) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + + return (buf->isSlabBuffer) ? &buf->parent->kbo->bo : buf->bo; +} + +static int +pool_validate(struct _DriBufferPool *pool, void *private, + _glthread_Mutex *mutex) +{ + struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; + + if (!buf->isSlabBuffer) + return 0; + + while(buf->mapCount != 0) + _glthread_COND_WAIT(buf->event, *mutex); + + buf->unFenced = 1; + return 0; +} + + +struct _DriFreeSlabManager * +driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec) +{ + struct _DriFreeSlabManager *tmp; + + tmp = calloc(1, sizeof(*tmp)); + if (!tmp) + return NULL; + + _glthread_INIT_MUTEX(tmp->mutex); + _glthread_LOCK_MUTEX(tmp->mutex); + tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000; + tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000; + tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000; + + tmp->checkInterval.tv_usec = checkIntervalMsec*1000; + tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000; + tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000; + + gettimeofday(&tmp->nextCheck, NULL); + driTimeAdd(&tmp->nextCheck, &tmp->checkInterval); + DRMINITLISTHEAD(&tmp->timeoutList); + DRMINITLISTHEAD(&tmp->unCached); + DRMINITLISTHEAD(&tmp->cached); + _glthread_UNLOCK_MUTEX(tmp->mutex); + + return tmp; +} + +void +driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan) +{ + struct timeval time; + + time = fMan->nextCheck; + driTimeAdd(&time, &fMan->checkInterval); + + _glthread_LOCK_MUTEX(fMan->mutex); + driFreeTimeoutKBOsLocked(fMan, &time); + _glthread_UNLOCK_MUTEX(fMan->mutex); + + assert(fMan->timeoutList.next == &fMan->timeoutList); + assert(fMan->unCached.next == &fMan->unCached); + assert(fMan->cached.next == &fMan->cached); + + free(fMan); +} + +static void +driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size, + struct _DriSlabSizeHeader *header) +{ + _glthread_INIT_MUTEX(header->mutex); + _glthread_LOCK_MUTEX(header->mutex); + + DRMINITLISTHEAD(&header->slabs); + DRMINITLISTHEAD(&header->freeSlabs); + DRMINITLISTHEAD(&header->delayedBuffers); + + header->numDelayed = 0; + header->slabPool = pool; + header->bufSize = size; + + _glthread_UNLOCK_MUTEX(header->mutex); +} + +static void +driFinishSizeHeader(struct _DriSlabSizeHeader *header) +{ + drmMMListHead *list, *next; + struct _DriSlabBuffer *buf; + + _glthread_LOCK_MUTEX(header->mutex); + for (list = header->delayedBuffers.next, next = list->next; + list != &header->delayedBuffers; + list = next, next = list->next) { + + buf = DRMLISTENTRY(struct _DriSlabBuffer, list , head); + if (buf->fence) { + (void) driFenceFinish(buf->fence, buf->fenceType, 0); + driFenceUnReference(&buf->fence); + } + header->numDelayed--; + driSlabFreeBufferLocked(buf); + } + _glthread_UNLOCK_MUTEX(header->mutex); +} + +static void +pool_takedown(struct _DriBufferPool *driPool) +{ + struct _DriSlabPool *pool = driPool->data; + int i; + + for (i=0; inumBuckets; ++i) { + driFinishSizeHeader(&pool->headers[i]); + } + + free(pool->headers); + free(pool->bucketSizes); + free(pool); + free(driPool); +} + +struct _DriBufferPool * +driSlabPoolInit(int fd, uint64_t flags, + uint64_t validMask, + uint32_t smallestSize, + uint32_t numSizes, + uint32_t desiredNumBuffers, + uint32_t maxSlabSize, + uint32_t pageAlignment, + struct _DriFreeSlabManager *fMan) +{ + struct _DriBufferPool *driPool; + struct _DriSlabPool *pool; + uint32_t i; + + driPool = calloc(1, sizeof(*driPool)); + if (!driPool) + return NULL; + + pool = calloc(1, sizeof(*pool)); + if (!pool) + goto out_err0; + + pool->bucketSizes = calloc(numSizes, sizeof(*pool->bucketSizes)); + if (!pool->bucketSizes) + goto out_err1; + + pool->headers = calloc(numSizes, sizeof(*pool->headers)); + if (!pool->headers) + goto out_err2; + + pool->fMan = fMan; + pool->proposedFlags = flags; + pool->validMask = validMask; + pool->numBuckets = numSizes; + pool->pageSize = getpagesize(); + pool->fd = fd; + pool->pageAlignment = pageAlignment; + pool->maxSlabSize = maxSlabSize; + pool->desiredNumBuffers = desiredNumBuffers; + + for (i=0; inumBuckets; ++i) { + pool->bucketSizes[i] = (smallestSize << i); + driInitSizeHeader(pool, pool->bucketSizes[i], + &pool->headers[i]); + } + + driPool->data = (void *) pool; + driPool->map = &pool_map; + driPool->unmap = &pool_unmap; + driPool->destroy = &pool_destroy; + driPool->offset = &pool_offset; + driPool->poolOffset = &pool_poolOffset; + driPool->flags = &pool_flags; + driPool->size = &pool_size; + driPool->create = &pool_create; + driPool->fence = &pool_fence; + driPool->kernel = &pool_kernel; + driPool->validate = &pool_validate; + driPool->waitIdle = &pool_waitIdle; + driPool->takeDown = &pool_takedown; + + return driPool; + + out_err2: + free(pool->bucketSizes); + out_err1: + free(pool); + out_err0: + free(driPool); + + return NULL; +} -- cgit v1.2.3 From 0bb852fa49e7f9a31036089ea4f5dfbd312a4a3a Mon Sep 17 00:00:00 2001 From: Brian Paul Date: Tue, 26 Aug 2008 16:35:12 -0600 Subject: gallium: thread wrapper clean-up In p_thread.h replace _glthread_* functions with new pipe_* functions. Remove other old cruft. --- .../auxiliary/pipebuffer/pb_buffer_fenced.c | 24 +- src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c | 18 +- src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c | 20 +- src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c | 24 +- src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c | 20 +- src/gallium/auxiliary/rtasm/rtasm_execmem.c | 11 +- src/gallium/include/pipe/p_thread.h | 366 ++++++++------------- .../winsys/drm/intel/common/ws_dri_bufmgr.c | 102 +++--- .../winsys/drm/intel/common/ws_dri_bufpool.h | 8 +- .../winsys/drm/intel/common/ws_dri_drmpool.c | 4 +- .../winsys/drm/intel/common/ws_dri_fencemgr.c | 66 ++-- .../winsys/drm/intel/common/ws_dri_mallocpool.c | 6 +- .../winsys/drm/intel/common/ws_dri_slabpool.c | 66 ++-- src/gallium/winsys/drm/intel/dri/intel_lock.c | 8 +- src/gallium/winsys/xlib/glxapi.c | 26 +- src/gallium/winsys/xlib/xm_api.c | 9 +- src/gallium/winsys/xlib/xmesaP.h | 3 +- 17 files changed, 328 insertions(+), 453 deletions(-) (limited to 'src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c') diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c index 410d336fef..17b2781052 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c @@ -69,7 +69,7 @@ struct fenced_buffer_list { - _glthread_Mutex mutex; + pipe_mutex mutex; struct pipe_winsys *winsys; @@ -240,7 +240,7 @@ fenced_buffer_destroy(struct pb_buffer *buf) struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_buffer_list *fenced_list = fenced_buf->list; - _glthread_LOCK_MUTEX(fenced_list->mutex); + pipe_mutex_lock(fenced_list->mutex); assert(fenced_buf->base.base.refcount == 0); if (fenced_buf->fence) { struct pipe_winsys *winsys = fenced_list->winsys; @@ -263,7 +263,7 @@ fenced_buffer_destroy(struct pb_buffer *buf) else { _fenced_buffer_destroy(fenced_buf); } - _glthread_UNLOCK_MUTEX(fenced_list->mutex); + pipe_mutex_unlock(fenced_list->mutex); } @@ -396,7 +396,7 @@ buffer_fence(struct pb_buffer *buf, return; } - _glthread_LOCK_MUTEX(fenced_list->mutex); + pipe_mutex_lock(fenced_list->mutex); if (fenced_buf->fence) _fenced_buffer_remove(fenced_list, fenced_buf); if (fence) { @@ -404,7 +404,7 @@ buffer_fence(struct pb_buffer *buf, fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE; _fenced_buffer_add(fenced_buf); } - _glthread_UNLOCK_MUTEX(fenced_list->mutex); + pipe_mutex_unlock(fenced_list->mutex); } @@ -423,7 +423,7 @@ fenced_buffer_list_create(struct pipe_winsys *winsys) fenced_list->numDelayed = 0; - _glthread_INIT_MUTEX(fenced_list->mutex); + pipe_mutex_init(fenced_list->mutex); return fenced_list; } @@ -433,28 +433,28 @@ void fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list, int wait) { - _glthread_LOCK_MUTEX(fenced_list->mutex); + pipe_mutex_lock(fenced_list->mutex); _fenced_buffer_list_check_free(fenced_list, wait); - _glthread_UNLOCK_MUTEX(fenced_list->mutex); + pipe_mutex_unlock(fenced_list->mutex); } void fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list) { - _glthread_LOCK_MUTEX(fenced_list->mutex); + pipe_mutex_lock(fenced_list->mutex); /* Wait on outstanding fences */ while (fenced_list->numDelayed) { - _glthread_UNLOCK_MUTEX(fenced_list->mutex); + pipe_mutex_unlock(fenced_list->mutex); #if defined(PIPE_OS_LINUX) sched_yield(); #endif _fenced_buffer_list_check_free(fenced_list, 1); - _glthread_LOCK_MUTEX(fenced_list->mutex); + pipe_mutex_lock(fenced_list->mutex); } - _glthread_UNLOCK_MUTEX(fenced_list->mutex); + pipe_mutex_unlock(fenced_list->mutex); FREE(fenced_list); } diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c index b914c2d0fe..e2b8fe0f98 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c @@ -79,7 +79,7 @@ struct pb_cache_manager struct pb_manager *provider; unsigned usecs; - _glthread_Mutex mutex; + pipe_mutex mutex; struct list_head delayed; size_t numDelayed; @@ -153,7 +153,7 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf) struct pb_cache_buffer *buf = pb_cache_buffer(_buf); struct pb_cache_manager *mgr = buf->mgr; - _glthread_LOCK_MUTEX(mgr->mutex); + pipe_mutex_lock(mgr->mutex); assert(buf->base.base.refcount == 0); _pb_cache_buffer_list_check_free(mgr); @@ -162,7 +162,7 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf) util_time_add(&buf->start, mgr->usecs, &buf->end); LIST_ADDTAIL(&buf->head, &mgr->delayed); ++mgr->numDelayed; - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); } @@ -235,7 +235,7 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr, struct list_head *curr, *next; struct util_time now; - _glthread_LOCK_MUTEX(mgr->mutex); + pipe_mutex_lock(mgr->mutex); buf = NULL; curr = mgr->delayed.next; @@ -264,12 +264,12 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr, if(buf) { LIST_DEL(&buf->head); - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); ++buf->base.base.refcount; return &buf->base; } - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); buf = CALLOC_STRUCT(pb_cache_buffer); if(!buf) @@ -305,7 +305,7 @@ pb_cache_flush(struct pb_manager *_mgr) struct list_head *curr, *next; struct pb_cache_buffer *buf; - _glthread_LOCK_MUTEX(mgr->mutex); + pipe_mutex_lock(mgr->mutex); curr = mgr->delayed.next; next = curr->next; while(curr != &mgr->delayed) { @@ -314,7 +314,7 @@ pb_cache_flush(struct pb_manager *_mgr) curr = next; next = curr->next; } - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); } @@ -345,7 +345,7 @@ pb_cache_manager_create(struct pb_manager *provider, mgr->usecs = usecs; LIST_INITHEAD(&mgr->delayed); mgr->numDelayed = 0; - _glthread_INIT_MUTEX(mgr->mutex); + pipe_mutex_init(mgr->mutex); return &mgr->base; } diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c index b40eb6cc90..e8c7f8e1f8 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c @@ -53,7 +53,7 @@ struct mm_pb_manager { struct pb_manager base; - _glthread_Mutex mutex; + pipe_mutex mutex; size_t size; struct mem_block *heap; @@ -99,10 +99,10 @@ mm_buffer_destroy(struct pb_buffer *buf) assert(buf->base.refcount == 0); - _glthread_LOCK_MUTEX(mm->mutex); + pipe_mutex_lock(mm->mutex); mmFreeMem(mm_buf->block); FREE(buf); - _glthread_UNLOCK_MUTEX(mm->mutex); + pipe_mutex_unlock(mm->mutex); } @@ -158,11 +158,11 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr, if(desc->alignment % (1 << mm->align2)) return NULL; - _glthread_LOCK_MUTEX(mm->mutex); + pipe_mutex_lock(mm->mutex); mm_buf = CALLOC_STRUCT(mm_buffer); if (!mm_buf) { - _glthread_UNLOCK_MUTEX(mm->mutex); + pipe_mutex_unlock(mm->mutex); return NULL; } @@ -185,7 +185,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr, mm_buf->block = mmAllocMem(mm->heap, size, mm->align2, 0); if(!mm_buf->block) { FREE(mm_buf); - _glthread_UNLOCK_MUTEX(mm->mutex); + pipe_mutex_unlock(mm->mutex); return NULL; } } @@ -194,7 +194,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr, assert(0 <= (unsigned)mm_buf->block->ofs && (unsigned)mm_buf->block->ofs < mm->size); assert(size <= (unsigned)mm_buf->block->size && (unsigned)mm_buf->block->ofs + (unsigned)mm_buf->block->size <= mm->size); - _glthread_UNLOCK_MUTEX(mm->mutex); + pipe_mutex_unlock(mm->mutex); return SUPER(mm_buf); } @@ -204,14 +204,14 @@ mm_bufmgr_destroy(struct pb_manager *mgr) { struct mm_pb_manager *mm = mm_pb_manager(mgr); - _glthread_LOCK_MUTEX(mm->mutex); + pipe_mutex_lock(mm->mutex); mmDestroy(mm->heap); pb_unmap(mm->buffer); pb_reference(&mm->buffer, NULL); - _glthread_UNLOCK_MUTEX(mm->mutex); + pipe_mutex_unlock(mm->mutex); FREE(mgr); } @@ -236,7 +236,7 @@ mm_bufmgr_create_from_buffer(struct pb_buffer *buffer, mm->size = size; mm->align2 = align2; /* 64-byte alignment */ - _glthread_INIT_MUTEX(mm->mutex); + pipe_mutex_init(mm->mutex); mm->buffer = buffer; diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c index 93d2cc9635..3ef72c5bbb 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c @@ -56,7 +56,7 @@ struct pool_pb_manager { struct pb_manager base; - _glthread_Mutex mutex; + pipe_mutex mutex; size_t bufSize; size_t bufAlign; @@ -110,10 +110,10 @@ pool_buffer_destroy(struct pb_buffer *buf) assert(pool_buf->base.base.refcount == 0); - _glthread_LOCK_MUTEX(pool->mutex); + pipe_mutex_lock(pool->mutex); LIST_ADD(&pool_buf->head, &pool->free); pool->numFree++; - _glthread_UNLOCK_MUTEX(pool->mutex); + pipe_mutex_unlock(pool->mutex); } @@ -124,9 +124,9 @@ pool_buffer_map(struct pb_buffer *buf, unsigned flags) struct pool_pb_manager *pool = pool_buf->mgr; void *map; - _glthread_LOCK_MUTEX(pool->mutex); + pipe_mutex_lock(pool->mutex); map = (unsigned char *) pool->map + pool_buf->start; - _glthread_UNLOCK_MUTEX(pool->mutex); + pipe_mutex_unlock(pool->mutex); return map; } @@ -171,10 +171,10 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr, assert(size == pool->bufSize); assert(pool->bufAlign % desc->alignment == 0); - _glthread_LOCK_MUTEX(pool->mutex); + pipe_mutex_lock(pool->mutex); if (pool->numFree == 0) { - _glthread_UNLOCK_MUTEX(pool->mutex); + pipe_mutex_unlock(pool->mutex); debug_printf("warning: out of fixed size buffer objects\n"); return NULL; } @@ -182,7 +182,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr, item = pool->free.next; if (item == &pool->free) { - _glthread_UNLOCK_MUTEX(pool->mutex); + pipe_mutex_unlock(pool->mutex); debug_printf("error: fixed size buffer pool corruption\n"); return NULL; } @@ -190,7 +190,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr, LIST_DEL(item); --pool->numFree; - _glthread_UNLOCK_MUTEX(pool->mutex); + pipe_mutex_unlock(pool->mutex); pool_buf = LIST_ENTRY(struct pool_buffer, item, head); assert(pool_buf->base.base.refcount == 0); @@ -206,14 +206,14 @@ static void pool_bufmgr_destroy(struct pb_manager *mgr) { struct pool_pb_manager *pool = pool_pb_manager(mgr); - _glthread_LOCK_MUTEX(pool->mutex); + pipe_mutex_lock(pool->mutex); FREE(pool->bufs); pb_unmap(pool->buffer); pb_reference(&pool->buffer, NULL); - _glthread_UNLOCK_MUTEX(pool->mutex); + pipe_mutex_unlock(pool->mutex); FREE(mgr); } @@ -246,7 +246,7 @@ pool_bufmgr_create(struct pb_manager *provider, pool->bufSize = bufSize; pool->bufAlign = desc->alignment; - _glthread_INIT_MUTEX(pool->mutex); + pipe_mutex_init(pool->mutex); pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc); if (!pool->buffer) diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c index af307e265a..ac0296b26a 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c @@ -57,7 +57,7 @@ struct pb_slab_buffer struct list_head head; unsigned mapCount; size_t start; - _glthread_Cond event; + pipe_condvar event; }; struct pb_slab @@ -85,7 +85,7 @@ struct pb_slab_manager struct list_head slabs; struct list_head freeSlabs; - _glthread_Mutex mutex; + pipe_mutex mutex; }; /** @@ -143,7 +143,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf) struct pb_slab_manager *mgr = slab->mgr; struct list_head *list = &buf->head; - _glthread_LOCK_MUTEX(mgr->mutex); + pipe_mutex_lock(mgr->mutex); assert(buf->base.base.refcount == 0); @@ -179,7 +179,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf) } } - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); } @@ -201,7 +201,7 @@ pb_slab_buffer_unmap(struct pb_buffer *_buf) --buf->mapCount; if (buf->mapCount == 0) - _glthread_COND_BROADCAST(buf->event); + pipe_condvar_broadcast(buf->event); } @@ -283,7 +283,7 @@ pb_slab_create(struct pb_slab_manager *mgr) buf->slab = slab; buf->start = i* mgr->bufSize; buf->mapCount = 0; - _glthread_INIT_COND(buf->event); + pipe_condvar_init(buf->event); LIST_ADDTAIL(&buf->head, &slab->freeBuffers); slab->numFree++; buf++; @@ -328,11 +328,11 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr, if(!pb_check_usage(desc->usage, mgr->desc.usage)) return NULL; - _glthread_LOCK_MUTEX(mgr->mutex); + pipe_mutex_lock(mgr->mutex); if (mgr->slabs.next == &mgr->slabs) { (void) pb_slab_create(mgr); if (mgr->slabs.next == &mgr->slabs) { - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); return NULL; } } @@ -344,7 +344,7 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr, list = slab->freeBuffers.next; LIST_DELINIT(list); - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); buf = LIST_ENTRY(struct pb_slab_buffer, list, head); ++buf->base.base.refcount; @@ -388,7 +388,7 @@ pb_slab_manager_create(struct pb_manager *provider, LIST_INITHEAD(&mgr->slabs); LIST_INITHEAD(&mgr->freeSlabs); - _glthread_INIT_MUTEX(mgr->mutex); + pipe_mutex_init(mgr->mutex); return &mgr->base; } diff --git a/src/gallium/auxiliary/rtasm/rtasm_execmem.c b/src/gallium/auxiliary/rtasm/rtasm_execmem.c index dfa5c35ab6..19087589a8 100644 --- a/src/gallium/auxiliary/rtasm/rtasm_execmem.c +++ b/src/gallium/auxiliary/rtasm/rtasm_execmem.c @@ -47,11 +47,12 @@ #include #include +#include "pipe/p_thread.h" #include "util/u_mm.h" #define EXEC_HEAP_SIZE (10*1024*1024) -_glthread_DECLARE_STATIC_MUTEX(exec_mutex); +pipe_static_mutex(exec_mutex); static struct mem_block *exec_heap = NULL; static unsigned char *exec_mem = NULL; @@ -76,7 +77,7 @@ rtasm_exec_malloc(size_t size) struct mem_block *block = NULL; void *addr = NULL; - _glthread_LOCK_MUTEX(exec_mutex); + pipe_mutex_lock(exec_mutex); init_heap(); @@ -90,7 +91,7 @@ rtasm_exec_malloc(size_t size) else debug_printf("rtasm_exec_malloc failed\n"); - _glthread_UNLOCK_MUTEX(exec_mutex); + pipe_mutex_unlock(exec_mutex); return addr; } @@ -99,7 +100,7 @@ rtasm_exec_malloc(size_t size) void rtasm_exec_free(void *addr) { - _glthread_LOCK_MUTEX(exec_mutex); + pipe_mutex_lock(exec_mutex); if (exec_heap) { struct mem_block *block = mmFindBlock(exec_heap, (unsigned char *)addr - exec_mem); @@ -108,7 +109,7 @@ rtasm_exec_free(void *addr) mmFreeMem(block); } - _glthread_UNLOCK_MUTEX(exec_mutex); + pipe_mutex_unlock(exec_mutex); } diff --git a/src/gallium/include/pipe/p_thread.h b/src/gallium/include/pipe/p_thread.h index 6e526b7aa8..4e6f7cbb44 100644 --- a/src/gallium/include/pipe/p_thread.h +++ b/src/gallium/include/pipe/p_thread.h @@ -23,307 +23,199 @@ * **************************************************************************/ + /** - * @file - * Thread - * - * Initial version by John Stone (j.stone@acm.org) (johns@cs.umr.edu) - * and Christoph Poliwoda (poliwoda@volumegraphics.com) - * Revised by Keith Whitwell - * Adapted for new gl dispatcher by Brian Paul - * - * - * - * DOCUMENTATION - * - * This thread module exports the following types: - * _glthread_TSD Thread-specific data area - * _glthread_Thread Thread datatype - * _glthread_Mutex Mutual exclusion lock - * - * Macros: - * _glthread_DECLARE_STATIC_MUTEX(name) Declare a non-local mutex - * _glthread_INIT_MUTEX(name) Initialize a mutex - * _glthread_LOCK_MUTEX(name) Lock a mutex - * _glthread_UNLOCK_MUTEX(name) Unlock a mutex - * - * Functions: - * _glthread_GetID(v) Get integer thread ID - * _glthread_InitTSD() Initialize thread-specific data - * _glthread_GetTSD() Get thread-specific data - * _glthread_SetTSD() Set thread-specific data - * - * If this file is accidentally included by a non-threaded build, - * it should not cause the build to fail, or otherwise cause problems. - * In general, it should only be included when needed however. + * Thread, mutex, condition var and thread-specific data functions. */ -#ifndef _P_THREAD_H_ -#define _P_THREAD_H_ +#ifndef _P_THREAD2_H_ +#define _P_THREAD2_H_ -#if (defined(PTHREADS) || defined(SOLARIS_THREADS) ||\ - defined(WIN32_THREADS) || defined(USE_XTHREADS) || defined(BEOS_THREADS)) \ - && !defined(THREADS) -# define THREADS -#endif -#ifdef VMS -#include -#endif +#include "pipe/p_compiler.h" -/* - * POSIX threads. This should be your choice in the Unix world - * whenever possible. When building with POSIX threads, be sure - * to enable any compiler flags which will cause the MT-safe - * libc (if one exists) to be used when linking, as well as any - * header macros for MT-safe errno, etc. For Solaris, this is the -mt - * compiler flag. On Solaris with gcc, use -D_REENTRANT to enable - * proper compiling for MT-safe libc etc. - */ -#if defined(PTHREADS) -#include /* POSIX threads headers */ -typedef struct { - pthread_key_t key; - int initMagic; -} _glthread_TSD; +#if defined(PIPE_OS_LINUX) -typedef pthread_t _glthread_Thread; +#include /* POSIX threads headers */ +#include /* for perror() */ -typedef pthread_mutex_t _glthread_Mutex; -#define _glthread_DECLARE_STATIC_MUTEX(name) \ - static _glthread_Mutex name = PTHREAD_MUTEX_INITIALIZER +typedef pthread_t pipe_thread; +typedef pthread_mutex_t pipe_mutex; +typedef pthread_cond_t pipe_condvar; -#define _glthread_INIT_MUTEX(name) \ - pthread_mutex_init(&(name), NULL) +#define pipe_static_mutex(mutex) \ + static pipe_mutex mutex = PTHREAD_MUTEX_INITIALIZER -#define _glthread_DESTROY_MUTEX(name) \ - pthread_mutex_destroy(&(name)) +#define pipe_mutex_init(mutex) \ + pthread_mutex_init(&(mutex), NULL) -#define _glthread_LOCK_MUTEX(name) \ - (void) pthread_mutex_lock(&(name)) +#define pipe_mutex_destroy(mutex) \ + pthread_mutex_destroy(&(mutex)) -#define _glthread_UNLOCK_MUTEX(name) \ - (void) pthread_mutex_unlock(&(name)) +#define pipe_mutex_lock(mutex) \ + (void) pthread_mutex_lock(&(mutex)) -typedef pthread_cond_t _glthread_Cond; +#define pipe_mutex_unlock(mutex) \ + (void) pthread_mutex_unlock(&(mutex)) -#define _glthread_DECLARE_STATIC_COND(name) \ - static _glthread_Cond name = PTHREAD_COND_INITIALIZER +#define pipe_static_condvar(mutex) \ + static pipe_condvar mutex = PTHREAD_COND_INITIALIZER -#define _glthread_INIT_COND(cond) \ +#define pipe_condvar_init(cond) \ pthread_cond_init(&(cond), NULL) -#define _glthread_DESTROY_COND(name) \ - pthread_cond_destroy(&(name)) +#define pipe_condvar_destroy(cond) \ + pthread_cond_destroy(&(cond)) -#define _glthread_COND_WAIT(cond, mutex) \ +#define pipe_condvar_wait(cond, mutex) \ pthread_cond_wait(&(cond), &(mutex)) -#define _glthread_COND_SIGNAL(cond) \ +#define pipe_condvar_signal(cond) \ pthread_cond_signal(&(cond)) -#define _glthread_COND_BROADCAST(cond) \ +#define pipe_condvar_broadcast(cond) \ pthread_cond_broadcast(&(cond)) -#endif /* PTHREADS */ - - +#elif defined(PIPE_OS_WINDOWS) -/* - * Solaris threads. Use only up to Solaris 2.4. - * Solaris 2.5 and higher provide POSIX threads. - * Be sure to compile with -mt on the Solaris compilers, or - * use -D_REENTRANT if using gcc. - */ -#ifdef SOLARIS_THREADS -#include - -typedef struct { - thread_key_t key; - mutex_t keylock; - int initMagic; -} _glthread_TSD; - -typedef thread_t _glthread_Thread; - -typedef mutex_t _glthread_Mutex; - -/* XXX need to really implement mutex-related macros */ -#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = 0 -#define _glthread_INIT_MUTEX(name) (void) name -#define _glthread_DESTROY_MUTEX(name) (void) name -#define _glthread_LOCK_MUTEX(name) (void) name -#define _glthread_UNLOCK_MUTEX(name) (void) name - -#endif /* SOLARIS_THREADS */ - - - - -/* - * Windows threads. Should work with Windows NT and 95. - * IMPORTANT: Link with multithreaded runtime library when THREADS are - * used! - */ -#ifdef WIN32_THREADS #include -typedef struct { - DWORD key; - int initMagic; -} _glthread_TSD; - -typedef HANDLE _glthread_Thread; +typedef HANDLE pipe_thread; +typedef CRITICAL_SECTION pipe_mutex; -typedef CRITICAL_SECTION _glthread_Mutex; +#define pipe_static_mutex(name) \ + /*static*/ pipe_mutex name = {0,0,0,0,0,0} -#define _glthread_DECLARE_STATIC_MUTEX(name) /*static*/ _glthread_Mutex name = {0,0,0,0,0,0} -#define _glthread_INIT_MUTEX(name) InitializeCriticalSection(&name) -#define _glthread_DESTROY_MUTEX(name) DeleteCriticalSection(&name) -#define _glthread_LOCK_MUTEX(name) EnterCriticalSection(&name) -#define _glthread_UNLOCK_MUTEX(name) LeaveCriticalSection(&name) +#define pipe_mutex_init(name) \ + InitializeCriticalSection(&name) -#endif /* WIN32_THREADS */ +#define pipe_mutex_destroy(name) \ + DeleteCriticalSection(&name) +#define pipe_mutex_lock(name) \ + EnterCriticalSection(&name) +#define pipe_mutex_unlock(name) \ + LeaveCriticalSection(&name) -/* - * XFree86 has its own thread wrapper, Xthreads.h - * We wrap it again for GL. - */ -#ifdef USE_XTHREADS -#include - -typedef struct { - xthread_key_t key; - int initMagic; -} _glthread_TSD; - -typedef xthread_t _glthread_Thread; - -typedef xmutex_rec _glthread_Mutex; - -#ifdef XMUTEX_INITIALIZER -#define _glthread_DECLARE_STATIC_MUTEX(name) \ - static _glthread_Mutex name = XMUTEX_INITIALIZER #else -#define _glthread_DECLARE_STATIC_MUTEX(name) \ - static _glthread_Mutex name -#endif - -#define _glthread_INIT_MUTEX(name) \ - xmutex_init(&(name)) - -#define _glthread_DESTROY_MUTEX(name) \ - xmutex_clear(&(name)) - -#define _glthread_LOCK_MUTEX(name) \ - (void) xmutex_lock(&(name)) - -#define _glthread_UNLOCK_MUTEX(name) \ - (void) xmutex_unlock(&(name)) - -#endif /* USE_XTHREADS */ - - - -/* - * BeOS threads. R5.x required. - */ -#ifdef BEOS_THREADS - -#include -#include - -typedef struct { - int32 key; - int initMagic; -} _glthread_TSD; - -typedef thread_id _glthread_Thread; - -/* Use Benaphore, aka speeder semaphore */ -typedef struct { - int32 lock; - sem_id sem; -} benaphore; -typedef benaphore _glthread_Mutex; -#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = { 0, 0 } -#define _glthread_INIT_MUTEX(name) name.sem = create_sem(0, #name"_benaphore"), name.lock = 0 -#define _glthread_DESTROY_MUTEX(name) delete_sem(name.sem), name.lock = 0 -#define _glthread_LOCK_MUTEX(name) if (name.sem == 0) _glthread_INIT_MUTEX(name); \ - if (atomic_add(&(name.lock), 1) >= 1) acquire_sem(name.sem) -#define _glthread_UNLOCK_MUTEX(name) if (atomic_add(&(name.lock), -1) > 1) release_sem(name.sem) +/** Dummy definitions */ -#endif /* BEOS_THREADS */ +typedef unsigned pipe_thread; +typedef unsigned pipe_mutex; +typedef unsigned pipe_condvar; +typedef unsigned pipe_tsd; +#define pipe_static_mutex(mutex) \ + static pipe_mutex mutex = 0 +#define pipe_mutex_init(mutex) \ + (void) mutex -#ifndef THREADS +#define pipe_mutex_destroy(mutex) \ + (void) mutex -/* - * THREADS not defined - */ - -typedef unsigned _glthread_TSD; - -typedef unsigned _glthread_Thread; - -typedef unsigned _glthread_Mutex; +#define pipe_mutex_lock(mutex) \ + (void) mutex -#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = 0 +#define pipe_mutex_unlock(mutex) \ + (void) mutex -#define _glthread_INIT_MUTEX(name) (void) name +#define pipe_static_condvar(condvar) \ + static _glthread_Cond condvar = 0 -#define _glthread_DESTROY_MUTEX(name) (void) name +#define pipe_condvar_init(condvar) \ + (void) condvar -#define _glthread_LOCK_MUTEX(name) (void) name +#define pipe_condvar_destroy(condvar) \ + (void) condvar -#define _glthread_UNLOCK_MUTEX(name) (void) name +#define pipe_condvar_wait(condvar, mutex) \ + (void) condvar -typedef unsigned _glthread_Cond; +#define pipe_condvar_signal(condvar) \ + (void) condvar -#define _glthread_DECLARE_STATIC_COND(name) static _glthread_Cond name = 0 +#define pipe_condvar_broadcast(condvar) \ + (void) condvar -#define _glthread_INIT_COND(name) (void) name -#define _glthread_DESTROY_COND(name) (void) name - -#define _glthread_COND_WAIT(name, mutex) (void) name - -#define _glthread_COND_SIGNAL(name) (void) name - -#define _glthread_COND_BROADCAST(name) (void) name - -#endif /* THREADS */ +#endif /* PIPE_OS_? */ /* - * Platform independent thread specific data API. + * Thread-specific data. */ -extern unsigned long -_glthread_GetID(void); - - -extern void -_glthread_InitTSD(_glthread_TSD *); +typedef struct { +#if defined(PIPE_OS_LINUX) + pthread_key_t key; +#elif defined(PIPE_OS_WINDOWS) + DWORD key; +#endif + int initMagic; +} pipe_tsd; -extern void * -_glthread_GetTSD(_glthread_TSD *); +#define PIPE_TSD_INIT_MAGIC 0xff8adc98 -extern void -_glthread_SetTSD(_glthread_TSD *, void *); +static INLINE void +pipe_tsd_init(pipe_tsd *tsd) +{ +#if defined(PIPE_OS_LINUX) + if (pthread_key_create(&tsd->key, NULL/*free*/) != 0) { + perror("pthread_key_create(): failed to allocate key for thread specific data"); + exit(-1); + } +#elif defined(PIPE_OS_WINDOWS) + assert(0); +#endif + tsd->initMagic = PIPE_TSD_INIT_MAGIC; +} + +static INLINE void * +pipe_tsd_get(pipe_tsd *tsd) +{ + if (tsd->initMagic != (int) PIPE_TSD_INIT_MAGIC) { + pipe_tsd_init(tsd); + } +#if defined(PIPE_OS_LINUX) + return pthread_getspecific(tsd->key); +#elif defined(PIPE_OS_WINDOWS) + assert(0); + return NULL; +#else + assert(0); + return NULL; +#endif +} + +static INLINE void +pipe_tsd_set(pipe_tsd *tsd, void *value) +{ + if (tsd->initMagic != (int) PIPE_TSD_INIT_MAGIC) { + pipe_tsd_init(tsd); + } +#if defined(PIPE_OS_LINUX) + if (pthread_setspecific(tsd->key, value) != 0) { + perror("pthread_set_specific() failed"); + exit(-1); + } +#elif defined(PIPE_OS_WINDOWS) + assert(0); +#else + assert(0); +#endif +} -#endif /* _P_THREAD_H_ */ +#endif /* _P_THREAD2_H_ */ diff --git a/src/gallium/winsys/drm/intel/common/ws_dri_bufmgr.c b/src/gallium/winsys/drm/intel/common/ws_dri_bufmgr.c index b6d901f85e..499f7bef8f 100644 --- a/src/gallium/winsys/drm/intel/common/ws_dri_bufmgr.c +++ b/src/gallium/winsys/drm/intel/common/ws_dri_bufmgr.c @@ -33,7 +33,7 @@ #include #include #include -#include "glthread.h" +#include "pipe/p_thread.h" #include "errno.h" #include "ws_dri_bufmgr.h" #include "string.h" @@ -51,8 +51,8 @@ * driBufferObject mutex - > this rw lock. */ -_glthread_DECLARE_STATIC_MUTEX(bmMutex); -_glthread_DECLARE_STATIC_COND(bmCond); +pipe_static_mutex(bmMutex); +pipe_static_condvar(bmCond); static int kernelReaders = 0; static int num_buffers = 0; @@ -241,29 +241,29 @@ static int drmBOResetList(drmBOList *list) void driWriteLockKernelBO(void) { - _glthread_LOCK_MUTEX(bmMutex); + pipe_mutex_lock(bmMutex); while(kernelReaders != 0) - _glthread_COND_WAIT(bmCond, bmMutex); + pipe_condvar_wait(bmCond, bmMutex); } void driWriteUnlockKernelBO(void) { - _glthread_UNLOCK_MUTEX(bmMutex); + pipe_mutex_unlock(bmMutex); } void driReadLockKernelBO(void) { - _glthread_LOCK_MUTEX(bmMutex); + pipe_mutex_lock(bmMutex); kernelReaders++; - _glthread_UNLOCK_MUTEX(bmMutex); + pipe_mutex_unlock(bmMutex); } void driReadUnlockKernelBO(void) { - _glthread_LOCK_MUTEX(bmMutex); + pipe_mutex_lock(bmMutex); if (--kernelReaders == 0) - _glthread_COND_BROADCAST(bmCond); - _glthread_UNLOCK_MUTEX(bmMutex); + pipe_condvar_broadcast(bmCond); + pipe_mutex_unlock(bmMutex); } @@ -277,7 +277,7 @@ void driReadUnlockKernelBO(void) typedef struct _DriBufferObject { DriBufferPool *pool; - _glthread_Mutex mutex; + pipe_mutx mutex; int refCount; const char *name; uint64_t flags; @@ -318,12 +318,12 @@ driBOKernel(struct _DriBufferObject *buf) drmBO *ret; driReadLockKernelBO(); - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); assert(buf->private != NULL); ret = buf->pool->kernel(buf->pool, buf->private); if (!ret) BM_CKFATAL(-EINVAL); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); driReadUnlockKernelBO(); return ret; @@ -338,9 +338,9 @@ driBOWaitIdle(struct _DriBufferObject *buf, int lazy) * that time?? */ - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, &buf->mutex, lazy)); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); } void * @@ -353,11 +353,11 @@ driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint) return buf->userData; } - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); assert(buf->private != NULL); retval = buf->pool->map(buf->pool, buf->private, flags, hint, &buf->mutex, &virtual); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); return retval == 0 ? virtual : NULL; } @@ -369,9 +369,9 @@ driBOUnmap(struct _DriBufferObject *buf) return; assert(buf->private != NULL); - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private)); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); } unsigned long @@ -381,9 +381,9 @@ driBOOffset(struct _DriBufferObject *buf) assert(buf->private != NULL); - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); ret = buf->pool->offset(buf->pool, buf->private); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); return ret; } @@ -394,9 +394,9 @@ driBOPoolOffset(struct _DriBufferObject *buf) assert(buf->private != NULL); - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); ret = buf->pool->poolOffset(buf->pool, buf->private); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); return ret; } @@ -408,9 +408,9 @@ driBOFlags(struct _DriBufferObject *buf) assert(buf->private != NULL); driReadLockKernelBO(); - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); ret = buf->pool->flags(buf->pool, buf->private); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); driReadUnlockKernelBO(); return ret; } @@ -418,12 +418,12 @@ driBOFlags(struct _DriBufferObject *buf) struct _DriBufferObject * driBOReference(struct _DriBufferObject *buf) { - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); if (++buf->refCount == 1) { - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); BM_CKFATAL(-EINVAL); } - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); return buf; } @@ -435,10 +435,10 @@ driBOUnReference(struct _DriBufferObject *buf) if (!buf) return; - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); tmp = --buf->refCount; if (!tmp) { - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); if (buf->private) { if (buf->createdByReference) buf->pool->unreference(buf->pool, buf->private); @@ -451,7 +451,7 @@ driBOUnReference(struct _DriBufferObject *buf) num_buffers--; free(buf); } else - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); } @@ -469,7 +469,7 @@ driBOData(struct _DriBufferObject *buf, assert(!buf->userBuffer); /* XXX just do a memcpy? */ - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); pool = buf->pool; if (pool == NULL && newPool != NULL) { @@ -556,7 +556,7 @@ driBOData(struct _DriBufferObject *buf, } out: - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); return retval; } @@ -569,7 +569,7 @@ driBOSubData(struct _DriBufferObject *buf, assert(!buf->userBuffer); /* XXX just do a memcpy? */ - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); if (size && data) { BM_CKFATAL(buf->pool->map(buf->pool, buf->private, DRM_BO_FLAG_WRITE, 0, &buf->mutex, @@ -577,7 +577,7 @@ driBOSubData(struct _DriBufferObject *buf, memcpy((unsigned char *) virtual + offset, data, size); BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private)); } - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); } void @@ -588,21 +588,21 @@ driBOGetSubData(struct _DriBufferObject *buf, assert(!buf->userBuffer); /* XXX just do a memcpy? */ - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); if (size && data) { BM_CKFATAL(buf->pool->map(buf->pool, buf->private, DRM_BO_FLAG_READ, 0, &buf->mutex, &virtual)); memcpy(data, (unsigned char *) virtual + offset, size); BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private)); } - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); } void driBOSetReferenced(struct _DriBufferObject *buf, unsigned long handle) { - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); if (buf->private != NULL) { assert((size_t)"Invalid buffer for setReferenced\n" & 0); BM_CKFATAL(-EINVAL); @@ -619,7 +619,7 @@ driBOSetReferenced(struct _DriBufferObject *buf, } buf->createdByReference = TRUE; buf->flags = buf->pool->kernel(buf->pool, buf->private)->flags; - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); } int @@ -644,8 +644,8 @@ driGenBuffers(struct _DriBufferPool *pool, if (!buf) return -ENOMEM; - _glthread_INIT_MUTEX(buf->mutex); - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_init(buf->mutex); + pipe_mutex_lock(buf->mutex); buf->refCount = 1; buf->flags = flags; buf->hint = hint; @@ -653,7 +653,7 @@ driGenBuffers(struct _DriBufferPool *pool, buf->alignment = alignment; buf->pool = pool; buf->createdByReference = 0; - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); buffers[i] = buf; } return 0; @@ -818,7 +818,7 @@ driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf, { int newItem; - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); BM_CKFATAL(driAddValidateItem(&list->drmBuffers, buf->pool->kernel(buf->pool, buf->private), flags, mask, itemLoc, node)); @@ -827,7 +827,7 @@ driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf, if (newItem) buf->refCount++; - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); } drmBOList *driGetdrmBOList(struct _DriBufferList *list) @@ -845,10 +845,10 @@ void driPutdrmBOList(struct _DriBufferList *list) void driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence) { - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); if (buf->pool->fence) BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence)); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); } @@ -908,10 +908,10 @@ driBOValidateUserList(struct _DriBufferList * list) while (curBuf) { buf = (struct _DriBufferObject *) drmBOListBuf(curBuf); - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); if (buf->pool->validate) BM_CKFATAL(buf->pool->validate(buf->pool, buf->private, &buf->mutex)); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); curBuf = drmBOListNext(&list->driBuffers, curBuf); } } @@ -929,9 +929,9 @@ driBOSize(struct _DriBufferObject *buf) { unsigned long size; - _glthread_LOCK_MUTEX(buf->mutex); + pipe_mutex_lock(buf->mutex); size = buf->pool->size(buf->pool, buf->private); - _glthread_UNLOCK_MUTEX(buf->mutex); + pipe_mutex_unlock(buf->mutex); return size; diff --git a/src/gallium/winsys/drm/intel/common/ws_dri_bufpool.h b/src/gallium/winsys/drm/intel/common/ws_dri_bufpool.h index bf60798924..ad3b6f3931 100644 --- a/src/gallium/winsys/drm/intel/common/ws_dri_bufpool.h +++ b/src/gallium/winsys/drm/intel/common/ws_dri_bufpool.h @@ -33,14 +33,14 @@ #define _PSB_BUFPOOL_H_ #include -#include +#include "pipe/p_thread.h" struct _DriFenceObject; typedef struct _DriBufferPool { int fd; int (*map) (struct _DriBufferPool * pool, void *private, - unsigned flags, int hint, _glthread_Mutex *mutex, + unsigned flags, int hint, pipe_mutex *mutex, void **virtual); int (*unmap) (struct _DriBufferPool * pool, void *private); int (*destroy) (struct _DriBufferPool * pool, void *private); @@ -55,8 +55,8 @@ typedef struct _DriBufferPool int (*fence) (struct _DriBufferPool * pool, void *private, struct _DriFenceObject * fence); drmBO *(*kernel) (struct _DriBufferPool * pool, void *private); - int (*validate) (struct _DriBufferPool * pool, void *private, _glthread_Mutex *mutex); - int (*waitIdle) (struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex, + int (*validate) (struct _DriBufferPool * pool, void *private, pipe_mutex *mutex); + int (*waitIdle) (struct _DriBufferPool *pool, void *private, pipe_mutex *mutex, int lazy); int (*setStatus) (struct _DriBufferPool *pool, void *private, uint64_t flag_diff, uint64_t old_flags); diff --git a/src/gallium/winsys/drm/intel/common/ws_dri_drmpool.c b/src/gallium/winsys/drm/intel/common/ws_dri_drmpool.c index 40929efa2f..54618b1c82 100644 --- a/src/gallium/winsys/drm/intel/common/ws_dri_drmpool.c +++ b/src/gallium/winsys/drm/intel/common/ws_dri_drmpool.c @@ -113,7 +113,7 @@ pool_unreference(struct _DriBufferPool *pool, void *private) static int pool_map(struct _DriBufferPool *pool, void *private, unsigned flags, - int hint, _glthread_Mutex *mutex, void **virtual) + int hint, pipe_mutex *mutex, void **virtual) { drmBO *buf = (drmBO *) private; int ret; @@ -202,7 +202,7 @@ pool_kernel(struct _DriBufferPool *pool, void *private) } static int -pool_waitIdle(struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex, +pool_waitIdle(struct _DriBufferPool *pool, void *private, pipe_mutex *mutex, int lazy) { drmBO *buf = (drmBO *) private; diff --git a/src/gallium/winsys/drm/intel/common/ws_dri_fencemgr.c b/src/gallium/winsys/drm/intel/common/ws_dri_fencemgr.c index b56bc269da..831c75d30c 100644 --- a/src/gallium/winsys/drm/intel/common/ws_dri_fencemgr.c +++ b/src/gallium/winsys/drm/intel/common/ws_dri_fencemgr.c @@ -1,5 +1,5 @@ #include "ws_dri_fencemgr.h" -#include "glthread.h" +#include "pipe/p_thread.h" #include #include #include @@ -20,7 +20,7 @@ struct _DriFenceMgr { /* * These members are protected by this->mutex */ - _glthread_Mutex mutex; + pipe_mutex mutex; int refCount; drmMMListHead *heads; int num_fences; @@ -44,7 +44,7 @@ struct _DriFenceObject { /* * These members are protected by this->mutex. */ - _glthread_Mutex mutex; + pipe_mutex mutex; uint32_t signaled_type; void *private; }; @@ -65,8 +65,8 @@ driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info) if (!tmp) return NULL; - _glthread_INIT_MUTEX(tmp->mutex); - _glthread_LOCK_MUTEX(tmp->mutex); + pipe_mutex_init(tmp->mutex); + pipe_mutex_lock(tmp->mutex); tmp->refCount = 1; tmp->info = *info; tmp->num_fences = 0; @@ -77,7 +77,7 @@ driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info) for (i=0; iinfo.num_classes; ++i) { DRMINITLISTHEAD(&tmp->heads[i]); } - _glthread_UNLOCK_MUTEX(tmp->mutex); + pipe_mutex_unlock(tmp->mutex); return tmp; out_err: @@ -95,13 +95,13 @@ driFenceMgrUnrefUnlock(struct _DriFenceMgr **pMgr) if (--mgr->refCount == 0) free(mgr); else - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); } void driFenceMgrUnReference(struct _DriFenceMgr **pMgr) { - _glthread_LOCK_MUTEX((*pMgr)->mutex); + pipe_mutex_lock((*pMgr)->mutex); driFenceMgrUnrefUnlock(pMgr); } @@ -143,9 +143,9 @@ driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr, */ ++entry->refCount; - _glthread_UNLOCK_MUTEX(mgr->mutex); - _glthread_LOCK_MUTEX(entry->mutex); - _glthread_LOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); + pipe_mutex_lock(entry->mutex); + pipe_mutex_lock(mgr->mutex); prev = list->prev; @@ -157,7 +157,7 @@ driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr, * Somebody else removed the entry from the list. */ - _glthread_UNLOCK_MUTEX(entry->mutex); + pipe_mutex_unlock(entry->mutex); driFenceUnReferenceLocked(&entry); return; } @@ -167,7 +167,7 @@ driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr, DRMLISTDELINIT(list); mgr->info.unreference(mgr, &entry->private); } - _glthread_UNLOCK_MUTEX(entry->mutex); + pipe_mutex_unlock(entry->mutex); driFenceUnReferenceLocked(&entry); list = prev; } @@ -181,7 +181,7 @@ driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type, struct _DriFenceMgr *mgr = fence->mgr; int ret = 0; - _glthread_LOCK_MUTEX(fence->mutex); + pipe_mutex_lock(fence->mutex); if ((fence->signaled_type & fence_type) == fence_type) goto out0; @@ -190,16 +190,16 @@ driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type, if (ret) goto out0; - _glthread_LOCK_MUTEX(mgr->mutex); - _glthread_UNLOCK_MUTEX(fence->mutex); + pipe_mutex_lock(mgr->mutex); + pipe_mutex_unlock(fence->mutex); driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class, fence_type); - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); return 0; out0: - _glthread_UNLOCK_MUTEX(fence->mutex); + pipe_mutex_unlock(fence->mutex); return ret; } @@ -207,9 +207,9 @@ uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence) { uint32_t ret; - _glthread_LOCK_MUTEX(fence->mutex); + pipe_mutex_lock(fence->mutex); ret = fence->signaled_type; - _glthread_UNLOCK_MUTEX(fence->mutex); + pipe_mutex_unlock(fence->mutex); return ret; } @@ -221,7 +221,7 @@ driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type, int ret = 0; struct _DriFenceMgr *mgr; - _glthread_LOCK_MUTEX(fence->mutex); + pipe_mutex_lock(fence->mutex); mgr = fence->mgr; *signaled = fence->signaled_type; if ((fence->signaled_type & flush_type) == flush_type) @@ -236,25 +236,25 @@ driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type, if ((fence->signaled_type | *signaled) == fence->signaled_type) goto out0; - _glthread_LOCK_MUTEX(mgr->mutex); - _glthread_UNLOCK_MUTEX(fence->mutex); + pipe_mutex_lock(mgr->mutex); + pipe_mutex_unlock(fence->mutex); driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class, *signaled); - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); return 0; out0: - _glthread_UNLOCK_MUTEX(fence->mutex); + pipe_mutex_unlock(fence->mutex); return ret; } struct _DriFenceObject * driFenceReference(struct _DriFenceObject *fence) { - _glthread_LOCK_MUTEX(fence->mgr->mutex); + pipe_mutex_lock(fence->mgr->mutex); ++fence->refCount; - _glthread_UNLOCK_MUTEX(fence->mgr->mutex); + pipe_mutex_unlock(fence->mgr->mutex); return fence; } @@ -267,7 +267,7 @@ driFenceUnReference(struct _DriFenceObject **pFence) return; mgr = (*pFence)->mgr; - _glthread_LOCK_MUTEX(mgr->mutex); + pipe_mutex_lock(mgr->mutex); ++mgr->refCount; driFenceUnReferenceLocked(pFence); driFenceMgrUnrefUnlock(&mgr); @@ -294,15 +294,15 @@ struct _DriFenceObject return NULL; } - _glthread_INIT_MUTEX(fence->mutex); - _glthread_LOCK_MUTEX(fence->mutex); - _glthread_LOCK_MUTEX(mgr->mutex); + pipe_mutex_init(fence->mutex); + pipe_mutex_lock(fence->mutex); + pipe_mutex_lock(mgr->mutex); fence->refCount = 1; DRMLISTADDTAIL(&fence->head, &mgr->heads[fence_class]); fence->mgr = mgr; ++mgr->refCount; ++mgr->num_fences; - _glthread_UNLOCK_MUTEX(mgr->mutex); + pipe_mutex_unlock(mgr->mutex); fence->fence_class = fence_class; fence->fence_type = fence_type; fence->signaled_type = 0; @@ -312,7 +312,7 @@ struct _DriFenceObject memcpy(fence->private, private, private_size); } - _glthread_UNLOCK_MUTEX(fence->mutex); + pipe_mutex_unlock(fence->mutex); return fence; } diff --git a/src/gallium/winsys/drm/intel/common/ws_dri_mallocpool.c b/src/gallium/winsys/drm/intel/common/ws_dri_mallocpool.c index a80555c9c7..60924eac9e 100644 --- a/src/gallium/winsys/drm/intel/common/ws_dri_mallocpool.c +++ b/src/gallium/winsys/drm/intel/common/ws_dri_mallocpool.c @@ -33,7 +33,7 @@ #include #include #include "pipe/p_debug.h" -#include "glthread.h" +#include "pipe/p_thread.h" #include "ws_dri_bufpool.h" #include "ws_dri_bufmgr.h" @@ -60,14 +60,14 @@ pool_destroy(struct _DriBufferPool *pool, void *private) static int pool_waitIdle(struct _DriBufferPool *pool, void *private, - _glthread_Mutex *mutex, int lazy) + pipe_mutex *mutex, int lazy) { return 0; } static int pool_map(struct _DriBufferPool *pool, void *private, unsigned flags, - int hint, _glthread_Mutex *mutex, void **virtual) + int hint, pipe_mutex *mutex, void **virtual) { *virtual = (void *)((unsigned long *)private + 2); return 0; diff --git a/src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c b/src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c index dfcf6d6b19..391cea50a7 100644 --- a/src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c +++ b/src/gallium/winsys/drm/intel/common/ws_dri_slabpool.c @@ -37,7 +37,7 @@ #include "ws_dri_bufpool.h" #include "ws_dri_fencemgr.h" #include "ws_dri_bufmgr.h" -#include "glthread.h" +#include "pipe/p_thread.h" #define DRI_SLABPOOL_ALLOC_RETRIES 100 @@ -53,7 +53,7 @@ struct _DriSlabBuffer { uint32_t start; uint32_t fenceType; int unFenced; - _glthread_Cond event; + pipe_condvar event; }; struct _DriKernelBO { @@ -84,7 +84,7 @@ struct _DriSlabSizeHeader { uint32_t numDelayed; struct _DriSlabPool *slabPool; uint32_t bufSize; - _glthread_Mutex mutex; + pipe_mutex mutex; }; struct _DriFreeSlabManager { @@ -94,7 +94,7 @@ struct _DriFreeSlabManager { drmMMListHead timeoutList; drmMMListHead unCached; drmMMListHead cached; - _glthread_Mutex mutex; + pipe_mutex mutex; }; @@ -196,7 +196,7 @@ driSetKernelBOFree(struct _DriFreeSlabManager *fMan, { struct timeval time; - _glthread_LOCK_MUTEX(fMan->mutex); + pipe_mutex_lock(fMan->mutex); gettimeofday(&time, NULL); driTimeAdd(&time, &fMan->slabTimeout); @@ -210,7 +210,7 @@ driSetKernelBOFree(struct _DriFreeSlabManager *fMan, DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList); driFreeTimeoutKBOsLocked(fMan, &time); - _glthread_UNLOCK_MUTEX(fMan->mutex); + pipe_mutex_unlock(fMan->mutex); } /* @@ -237,7 +237,7 @@ driAllocKernelBO(struct _DriSlabSizeHeader *header) size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize; size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1); - _glthread_LOCK_MUTEX(fMan->mutex); + pipe_mutex_lock(fMan->mutex); kbo = NULL; @@ -269,7 +269,7 @@ driAllocKernelBO(struct _DriSlabSizeHeader *header) DRMLISTDELINIT(&kbo->timeoutHead); } - _glthread_UNLOCK_MUTEX(fMan->mutex); + pipe_mutex_unlock(fMan->mutex); if (kbo) { uint64_t new_mask = kbo->bo.proposedFlags ^ slabPool->proposedFlags; @@ -360,7 +360,7 @@ driAllocSlab(struct _DriSlabSizeHeader *header) buf->start = i* header->bufSize; buf->mapCount = 0; buf->isSlabBuffer = 1; - _glthread_INIT_COND(buf->event); + pipe_condvar_init(buf->event); DRMLISTADDTAIL(&buf->head, &slab->freeBuffers); slab->numFree++; buf++; @@ -494,23 +494,23 @@ driSlabAllocBuffer(struct _DriSlabSizeHeader *header) drmMMListHead *list; int count = DRI_SLABPOOL_ALLOC_RETRIES; - _glthread_LOCK_MUTEX(header->mutex); + pipe_mutex_lock(header->mutex); while(header->slabs.next == &header->slabs && count > 0) { driSlabCheckFreeLocked(header, 0); if (header->slabs.next != &header->slabs) break; - _glthread_UNLOCK_MUTEX(header->mutex); + pipe_mutex_unlock(header->mutex); if (count != DRI_SLABPOOL_ALLOC_RETRIES) usleep(1); - _glthread_LOCK_MUTEX(header->mutex); + pipe_mutex_lock(header->mutex); (void) driAllocSlab(header); count--; } list = header->slabs.next; if (list == &header->slabs) { - _glthread_UNLOCK_MUTEX(header->mutex); + pipe_mutex_unlock(header->mutex); return NULL; } slab = DRMLISTENTRY(struct _DriSlab, list, head); @@ -520,7 +520,7 @@ driSlabAllocBuffer(struct _DriSlabSizeHeader *header) list = slab->freeBuffers.next; DRMLISTDELINIT(list); - _glthread_UNLOCK_MUTEX(header->mutex); + pipe_mutex_unlock(header->mutex); buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head); return buf; } @@ -618,7 +618,7 @@ pool_destroy(struct _DriBufferPool *driPool, void *private) slab = buf->parent; header = slab->header; - _glthread_LOCK_MUTEX(header->mutex); + pipe_mutex_lock(header->mutex); buf->unFenced = 0; buf->mapCount = 0; @@ -631,18 +631,18 @@ pool_destroy(struct _DriBufferPool *driPool, void *private) driSlabFreeBufferLocked(buf); } - _glthread_UNLOCK_MUTEX(header->mutex); + pipe_mutex_unlock(header->mutex); return 0; } static int pool_waitIdle(struct _DriBufferPool *driPool, void *private, - _glthread_Mutex *mutex, int lazy) + pipe_mutex *mutex, int lazy) { struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; while(buf->unFenced) - _glthread_COND_WAIT(buf->event, *mutex); + pipe_condvar_wait(buf->event, *mutex); if (!buf->fence) return 0; @@ -655,7 +655,7 @@ pool_waitIdle(struct _DriBufferPool *driPool, void *private, static int pool_map(struct _DriBufferPool *pool, void *private, unsigned flags, - int hint, _glthread_Mutex *mutex, void **virtual) + int hint, pipe_mutex *mutex, void **virtual) { struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; int busy; @@ -689,7 +689,7 @@ pool_unmap(struct _DriBufferPool *pool, void *private) --buf->mapCount; if (buf->mapCount == 0 && buf->isSlabBuffer) - _glthread_COND_BROADCAST(buf->event); + pipe_condvar_broadcast(buf->event); return 0; } @@ -760,7 +760,7 @@ pool_fence(struct _DriBufferPool *pool, void *private, buf->fenceType = bo->fenceFlags; buf->unFenced = 0; - _glthread_COND_BROADCAST(buf->event); + pipe_condvar_broadcast(buf->event); return 0; } @@ -775,7 +775,7 @@ pool_kernel(struct _DriBufferPool *pool, void *private) static int pool_validate(struct _DriBufferPool *pool, void *private, - _glthread_Mutex *mutex) + pipe_mutex *mutex) { struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private; @@ -783,7 +783,7 @@ pool_validate(struct _DriBufferPool *pool, void *private, return 0; while(buf->mapCount != 0) - _glthread_COND_WAIT(buf->event, *mutex); + pipe_condvar_wait(buf->event, *mutex); buf->unFenced = 1; return 0; @@ -799,8 +799,8 @@ driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec) if (!tmp) return NULL; - _glthread_INIT_MUTEX(tmp->mutex); - _glthread_LOCK_MUTEX(tmp->mutex); + pipe_mutex_init(tmp->mutex); + pipe_mutex_lock(tmp->mutex); tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000; tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000; tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000; @@ -814,7 +814,7 @@ driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec) DRMINITLISTHEAD(&tmp->timeoutList); DRMINITLISTHEAD(&tmp->unCached); DRMINITLISTHEAD(&tmp->cached); - _glthread_UNLOCK_MUTEX(tmp->mutex); + pipe_mutex_unlock(tmp->mutex); return tmp; } @@ -827,9 +827,9 @@ driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan) time = fMan->nextCheck; driTimeAdd(&time, &fMan->checkInterval); - _glthread_LOCK_MUTEX(fMan->mutex); + pipe_mutex_lock(fMan->mutex); driFreeTimeoutKBOsLocked(fMan, &time); - _glthread_UNLOCK_MUTEX(fMan->mutex); + pipe_mutex_unlock(fMan->mutex); assert(fMan->timeoutList.next == &fMan->timeoutList); assert(fMan->unCached.next == &fMan->unCached); @@ -842,8 +842,8 @@ static void driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size, struct _DriSlabSizeHeader *header) { - _glthread_INIT_MUTEX(header->mutex); - _glthread_LOCK_MUTEX(header->mutex); + pipe_mutex_init(header->mutex); + pipe_mutex_lock(header->mutex); DRMINITLISTHEAD(&header->slabs); DRMINITLISTHEAD(&header->freeSlabs); @@ -853,7 +853,7 @@ driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size, header->slabPool = pool; header->bufSize = size; - _glthread_UNLOCK_MUTEX(header->mutex); + pipe_mutex_unlock(header->mutex); } static void @@ -862,7 +862,7 @@ driFinishSizeHeader(struct _DriSlabSizeHeader *header) drmMMListHead *list, *next; struct _DriSlabBuffer *buf; - _glthread_LOCK_MUTEX(header->mutex); + pipe_mutex_lock(header->mutex); for (list = header->delayedBuffers.next, next = list->next; list != &header->delayedBuffers; list = next, next = list->next) { @@ -875,7 +875,7 @@ driFinishSizeHeader(struct _DriSlabSizeHeader *header) header->numDelayed--; driSlabFreeBufferLocked(buf); } - _glthread_UNLOCK_MUTEX(header->mutex); + pipe_mutex_unlock(header->mutex); } static void diff --git a/src/gallium/winsys/drm/intel/dri/intel_lock.c b/src/gallium/winsys/drm/intel/dri/intel_lock.c index 406284c98f..18c7bba0d5 100644 --- a/src/gallium/winsys/drm/intel/dri/intel_lock.c +++ b/src/gallium/winsys/drm/intel/dri/intel_lock.c @@ -27,7 +27,7 @@ #include "main/glheader.h" -#include "glapi/glthread.h" +#include "pipe/p_lthread.h" #include #include "state_tracker/st_public.h" #include "intel_context.h" @@ -35,7 +35,7 @@ -_glthread_DECLARE_STATIC_MUTEX( lockMutex ); +pipe_static_mutex( lockMutex ); static void @@ -72,7 +72,7 @@ void LOCK_HARDWARE( struct intel_context *intel ) { char __ret = 0; - _glthread_LOCK_MUTEX(lockMutex); + pipe_mutex_lock(lockMutex); assert(!intel->locked); DRM_CAS(intel->driHwLock, intel->hHWContext, @@ -96,7 +96,7 @@ void UNLOCK_HARDWARE( struct intel_context *intel ) DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext); - _glthread_UNLOCK_MUTEX(lockMutex); + pipe_mutex_unlock(lockMutex); DBG(LOCK, "%s - unlocked\n", __progname); } diff --git a/src/gallium/winsys/xlib/glxapi.c b/src/gallium/winsys/xlib/glxapi.c index c2ccce6f52..c059fc3edb 100644 --- a/src/gallium/winsys/xlib/glxapi.c +++ b/src/gallium/winsys/xlib/glxapi.c @@ -37,6 +37,7 @@ #include "main/glheader.h" #include "glapi/glapi.h" #include "glxapi.h" +#include "pipe/p_thread.h" extern struct _glxapi_table *_real_GetGLXDispatchTable(void); @@ -127,26 +128,13 @@ get_dispatch(Display *dpy) /** * GLX API current context. */ -#if defined(GLX_USE_TLS) -PUBLIC __thread void * CurrentContext - __attribute__((tls_model("initial-exec"))); -#elif defined(THREADS) -static _glthread_TSD ContextTSD; /**< Per-thread context pointer */ -#else -static GLXContext CurrentContext = 0; -#endif +pipe_tsd ContextTSD; static void SetCurrentContext(GLXContext c) { -#if defined(GLX_USE_TLS) - CurrentContext = c; -#elif defined(THREADS) - _glthread_SetTSD(&ContextTSD, c); -#else - CurrentContext = c; -#endif + pipe_tsd_set(&ContextTSD, c); } @@ -238,13 +226,7 @@ glXGetConfig(Display *dpy, XVisualInfo *visinfo, int attrib, int *value) GLXContext PUBLIC glXGetCurrentContext(void) { -#if defined(GLX_USE_TLS) - return CurrentContext; -#elif defined(THREADS) - return (GLXContext) _glthread_GetTSD(&ContextTSD); -#else - return CurrentContext; -#endif + return (GLXContext) pipe_tsd_get(&ContextTSD); } diff --git a/src/gallium/winsys/xlib/xm_api.c b/src/gallium/winsys/xlib/xm_api.c index 7256340420..edcadff9c5 100644 --- a/src/gallium/winsys/xlib/xm_api.c +++ b/src/gallium/winsys/xlib/xm_api.c @@ -62,7 +62,6 @@ #include "xmesaP.h" #include "main/context.h" #include "main/framebuffer.h" -#include "glapi/glthread.h" #include "state_tracker/st_public.h" #include "state_tracker/st_context.h" @@ -75,7 +74,7 @@ /** * Global X driver lock */ -_glthread_Mutex _xmesa_lock; +pipe_mutex _xmesa_lock; int xmesa_mode; @@ -245,10 +244,10 @@ xmesa_get_window_size(XMesaDisplay *dpy, XMesaBuffer b, #else Status stat; - _glthread_LOCK_MUTEX(_xmesa_lock); + pipe_mutex_lock(_xmesa_lock); XSync(b->xm_visual->display, 0); /* added for Chromium */ stat = get_drawable_size(dpy, b->drawable, width, height); - _glthread_UNLOCK_MUTEX(_xmesa_lock); + pipe_mutex_unlock(_xmesa_lock); if (!stat) { /* probably querying a window that's recently been destroyed */ @@ -779,7 +778,7 @@ XMesaContext XMesaCreateContext( XMesaVisual v, XMesaContext share_list ) uint pf; if (firstTime) { - _glthread_INIT_MUTEX(_xmesa_lock); + pipe_mutex_init(_xmesa_lock); firstTime = GL_FALSE; } diff --git a/src/gallium/winsys/xlib/xmesaP.h b/src/gallium/winsys/xlib/xmesaP.h index 9b15b2ddf9..fcaeee52bc 100644 --- a/src/gallium/winsys/xlib/xmesaP.h +++ b/src/gallium/winsys/xlib/xmesaP.h @@ -35,9 +35,10 @@ #include "state_tracker/st_context.h" #include "state_tracker/st_public.h" +#include "pipe/p_thread.h" -extern _glthread_Mutex _xmesa_lock; +extern pipe_mutex _xmesa_lock; extern XMesaBuffer XMesaBufferList; -- cgit v1.2.3