summaryrefslogtreecommitdiff
path: root/src/mesa/drivers
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2007-11-30 14:14:11 -0800
committerEric Anholt <eric@anholt.net>2007-11-30 17:28:48 -0800
commit6f8dee03aaeab67532bbadef27f9e2a91e0022d8 (patch)
treea3f22551fb5638cd546c5255f5abff4b9d9eeeb1 /src/mesa/drivers
parentb0b882b666da0673b5f2aa7676e54438538c6a7f (diff)
[intel] intel_bufmgr_ttm style sanity
Diffstat (limited to 'src/mesa/drivers')
-rw-r--r--src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c651
1 files changed, 343 insertions, 308 deletions
diff --git a/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c b/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c
index 32d9886091..8cd3374c5d 100644
--- a/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c
+++ b/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c
@@ -1,10 +1,10 @@
/**************************************************************************
- *
+ *
* Copyright © 2007 Red Hat Inc.
* Copyright © 2007 Intel Corporation
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -12,20 +12,20 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
- *
+ *
+ *
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@@ -52,11 +52,11 @@
struct intel_reloc_info
{
- GLuint type;
- GLuint reloc;
- GLuint delta; /* not needed? */
- GLuint index;
- drm_handle_t handle;
+ GLuint type;
+ GLuint reloc;
+ GLuint delta; /* not needed? */
+ GLuint index;
+ drm_handle_t handle;
};
struct intel_bo_node
@@ -92,39 +92,40 @@ struct intel_bo_list {
};
typedef struct _dri_bufmgr_ttm {
- dri_bufmgr bufmgr;
+ dri_bufmgr bufmgr;
- int fd;
- _glthread_Mutex mutex;
- unsigned int fence_type;
- unsigned int fence_type_flush;
+ int fd;
+ _glthread_Mutex mutex;
+ unsigned int fence_type;
+ unsigned int fence_type_flush;
- uint32_t max_relocs;
- /** ttm relocation list */
- struct intel_bo_list list;
- struct intel_bo_list reloc_list;
+ uint32_t max_relocs;
+ /** ttm relocation list */
+ struct intel_bo_list list;
+ struct intel_bo_list reloc_list;
} dri_bufmgr_ttm;
typedef struct _dri_bo_ttm {
- dri_bo bo;
+ dri_bo bo;
- int refcount; /* Protected by bufmgr->mutex */
- drmBO drm_bo;
- const char *name;
+ int refcount; /* Protected by bufmgr->mutex */
+ drmBO drm_bo;
+ const char *name;
} dri_bo_ttm;
typedef struct _dri_fence_ttm
{
- dri_fence fence;
+ dri_fence fence;
- int refcount; /* Protected by bufmgr->mutex */
- const char *name;
- drmFence drm_fence;
+ int refcount; /* Protected by bufmgr->mutex */
+ const char *name;
+ drmFence drm_fence;
} dri_fence_ttm;
-static void intel_bo_free_list(struct intel_bo_list *list)
+static void
+intel_bo_free_list(struct intel_bo_list *list)
{
struct intel_bo_node *node;
drmMMListHead *l;
@@ -139,12 +140,15 @@ static void intel_bo_free_list(struct intel_bo_list *list)
}
}
-static void generic_destroy(void *nodep)
+static void
+generic_destroy(void *nodep)
{
free(nodep);
}
-static int intel_create_bo_list(int numTarget, struct intel_bo_list *list, void (*destroy)(void *))
+static int
+intel_create_bo_list(int numTarget, struct intel_bo_list *list,
+ void (*destroy)(void *))
{
DRMINITLISTHEAD(&list->list);
list->numCurrent = 0;
@@ -157,7 +161,8 @@ static int intel_create_bo_list(int numTarget, struct intel_bo_list *list, void
static struct drm_i915_op_arg *
-intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_list *reloc_list, GLuint *count_p)
+intel_setup_validate_list(int fd, struct intel_bo_list *list,
+ struct intel_bo_list *reloc_list, GLuint *count_p)
{
struct intel_bo_node *node;
struct intel_bo_reloc_node *rl_node;
@@ -191,7 +196,9 @@ intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_li
req->bo_req.fence_class = 0; /* Backwards compat. */
arg->reloc_handle = 0;
- for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
+ for (rl = reloc_list->list.next; rl != &reloc_list->list;
+ rl = rl->next)
+ {
rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
if (rl_node->handle == node->buf->handle) {
@@ -208,13 +215,14 @@ intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_li
return first;
}
-static void intel_free_validate_list(int fd, struct intel_bo_list *list)
+static void
+intel_free_validate_list(int fd, struct intel_bo_list *list)
{
struct intel_bo_node *node;
drmMMListHead *l;
for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(struct intel_bo_node, l, head);
+ node = DRMLISTENTRY(struct intel_bo_node, l, head);
if (node->destroy)
(*node->destroy)(node->priv);
@@ -222,18 +230,20 @@ static void intel_free_validate_list(int fd, struct intel_bo_list *list)
}
}
-static void intel_free_reloc_list(int fd, struct intel_bo_list *reloc_list)
+static void
+intel_free_reloc_list(int fd, struct intel_bo_list *reloc_list)
{
struct intel_bo_reloc_node *reloc_node;
drmMMListHead *rl, *tmp;
- for (rl = reloc_list->list.next, tmp = rl->next; rl != &reloc_list->list; rl = tmp, tmp = rl->next) {
+ for (rl = reloc_list->list.next, tmp = rl->next; rl != &reloc_list->list;
+ rl = tmp, tmp = rl->next)
+ {
reloc_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
DRMLISTDEL(rl);
if (reloc_node->nr_reloc_types > 1) {
-
/* TODO */
}
@@ -243,8 +253,10 @@ static void intel_free_reloc_list(int fd, struct intel_bo_list *reloc_list)
}
}
-static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, unsigned flags,
- unsigned mask, int *itemLoc, void (*destroy_cb)(void *))
+static int
+intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf,
+ unsigned flags, unsigned mask,
+ int *itemLoc, void (*destroy_cb)(void *))
{
struct intel_bo_node *node, *cur;
drmMMListHead *l;
@@ -261,7 +273,7 @@ static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, un
}
count++;
}
-
+
if (!cur) {
cur = drmMalloc(sizeof(*cur));
if (!cur) {
@@ -275,11 +287,10 @@ static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, un
ret = 1;
DRMLISTADDTAIL(&cur->head, &list->list);
-
} else {
unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
unsigned memFlags = cur->arg0 & flags & memMask;
-
+
if (!memFlags) {
return -EINVAL;
}
@@ -287,7 +298,7 @@ static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, un
return -EINVAL;
}
cur->arg1 |= mask;
- cur->arg0 = memFlags | ((cur->arg0 | flags) &
+ cur->arg0 = memFlags | ((cur->arg0 | flags) &
cur->arg1 & ~DRM_BO_MASK_MEM);
}
*itemLoc = count;
@@ -295,28 +306,40 @@ static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, un
}
-#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * sizeof(uint32_t))
+#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
+ sizeof(uint32_t))
-static int intel_create_new_reloc_type_list(int fd, struct intel_bo_reloc_list *cur_type, int max_relocs)
+static int
+intel_create_new_reloc_type_list(int fd, struct intel_bo_reloc_list *cur_type,
+ int max_relocs)
{
int ret;
-
+
/* should allocate a drmBO here */
ret = drmBOCreate(fd, RELOC_BUF_SIZE(max_relocs), 0,
NULL,
- DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_CACHED,
+ DRM_BO_FLAG_MEM_LOCAL |
+ DRM_BO_FLAG_READ |
+ DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_MAPPABLE |
+ DRM_BO_FLAG_CACHED,
0, &cur_type->buf);
if (ret)
return ret;
-
- ret = drmBOMap(fd, &cur_type->buf, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0, (void **)&cur_type->relocs);
+
+ ret = drmBOMap(fd, &cur_type->buf,
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+ 0, (void **)&cur_type->relocs);
if (ret)
return ret;
return 0;
}
-static int intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list, struct intel_reloc_info *reloc_info, uint32_t max_relocs)
+static int
+intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list,
+ struct intel_reloc_info *reloc_info,
+ uint32_t max_relocs)
{
struct intel_bo_reloc_node *rl_node, *cur;
drmMMListHead *rl, *l;
@@ -361,21 +384,23 @@ static int intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list, st
cur_type = &cur->type_list;
found = 1;
} else {
- for (l = cur->type_list.head.next; l != &cur->type_list.head; l = l->next) {
+ for (l = cur->type_list.head.next; l != &cur->type_list.head;
+ l = l->next)
+ {
cur_type = DRMLISTENTRY(struct intel_bo_reloc_list, l, head);
if (((cur_type->relocs[0] >> 16) & 0xffff) == reloc_info->type)
found = 1;
break;
}
}
-
+
/* didn't find the relocation type */
if (!found) {
cur_type = malloc(sizeof(*cur_type));
if (!cur_type) {
return -ENOMEM;
}
-
+
ret = intel_create_new_reloc_type_list(fd, cur_type, max_relocs);
DRMLISTADDTAIL(&cur_type->head, &cur->type_list.head);
@@ -390,9 +415,12 @@ static int intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list, st
num_relocs = (reloc_start[0] & 0xffff);
- reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER] = reloc_info->reloc;
- reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+1] = reloc_info->delta;
- reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+2] = reloc_info->index;
+ reloc_start[num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER] =
+ reloc_info->reloc;
+ reloc_start[num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER + 1] =
+ reloc_info->delta;
+ reloc_start[num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER + 2] =
+ reloc_info->index;
reloc_start[0]++;
if (((reloc_start[0] & 0xffff)) > (max_relocs)) {
return -ENOMEM;
@@ -405,17 +433,17 @@ static int intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list, st
int
driFenceSignaled(DriFenceObject * fence, unsigned type)
{
- int signaled;
- int ret;
+ int signaled;
+ int ret;
- if (fence == NULL)
- return GL_TRUE;
+ if (fence == NULL)
+ return GL_TRUE;
- _glthread_LOCK_MUTEX(fence->mutex);
- ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
- _glthread_UNLOCK_MUTEX(fence->mutex);
- BM_CKFATAL(ret);
- return signaled;
+ _glthread_LOCK_MUTEX(fence->mutex);
+ ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ BM_CKFATAL(ret);
+ return signaled;
}
#endif
@@ -424,45 +452,45 @@ dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment,
unsigned int location_mask)
{
- dri_bufmgr_ttm *ttm_bufmgr;
- dri_bo_ttm *ttm_buf;
- unsigned int pageSize = getpagesize();
- int ret;
- unsigned int flags, hint;
-
- ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
-
- ttm_buf = malloc(sizeof(*ttm_buf));
- if (!ttm_buf)
- return NULL;
-
- /* The mask argument doesn't do anything for us that we want other than
- * determine which pool (TTM or local) the buffer is allocated into, so just
- * pass all of the allocation class flags.
- */
- flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_EXE;
- /* No hints we want to use. */
- hint = 0;
-
- ret = drmBOCreate(ttm_bufmgr->fd, size, alignment / pageSize,
- NULL, flags, hint, &ttm_buf->drm_bo);
- if (ret != 0) {
- free(ttm_buf);
- return NULL;
- }
- ttm_buf->bo.size = ttm_buf->drm_bo.size;
- ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
- ttm_buf->bo.virtual = NULL;
- ttm_buf->bo.bufmgr = bufmgr;
- ttm_buf->name = name;
- ttm_buf->refcount = 1;
+ dri_bufmgr_ttm *ttm_bufmgr;
+ dri_bo_ttm *ttm_buf;
+ unsigned int pageSize = getpagesize();
+ int ret;
+ unsigned int flags, hint;
+
+ ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
+
+ ttm_buf = malloc(sizeof(*ttm_buf));
+ if (!ttm_buf)
+ return NULL;
+
+ /* The mask argument doesn't do anything for us that we want other than
+ * determine which pool (TTM or local) the buffer is allocated into, so
+ * just pass all of the allocation class flags.
+ */
+ flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_EXE;
+ /* No hints we want to use. */
+ hint = 0;
+
+ ret = drmBOCreate(ttm_bufmgr->fd, size, alignment / pageSize,
+ NULL, flags, hint, &ttm_buf->drm_bo);
+ if (ret != 0) {
+ free(ttm_buf);
+ return NULL;
+ }
+ ttm_buf->bo.size = ttm_buf->drm_bo.size;
+ ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
+ ttm_buf->bo.virtual = NULL;
+ ttm_buf->bo.bufmgr = bufmgr;
+ ttm_buf->name = name;
+ ttm_buf->refcount = 1;
#if BUFMGR_DEBUG
- fprintf(stderr, "bo_create: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
+ fprintf(stderr, "bo_create: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
#endif
- return &ttm_buf->bo;
+ return &ttm_buf->bo;
}
/* Our TTM backend doesn't allow creation of static buffers, as that requires
@@ -474,10 +502,11 @@ dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name,
unsigned long offset, unsigned long size, void *virtual,
unsigned int location_mask)
{
- return NULL;
+ return NULL;
}
-/** Returns a dri_bo wrapping the given buffer object handle.
+/**
+ * Returns a dri_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
@@ -486,121 +515,122 @@ dri_bo *
intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
unsigned int handle)
{
- dri_bufmgr_ttm *ttm_bufmgr;
- dri_bo_ttm *ttm_buf;
- int ret;
+ dri_bufmgr_ttm *ttm_bufmgr;
+ dri_bo_ttm *ttm_buf;
+ int ret;
- ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
+ ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
- ttm_buf = malloc(sizeof(*ttm_buf));
- if (!ttm_buf)
- return NULL;
+ ttm_buf = malloc(sizeof(*ttm_buf));
+ if (!ttm_buf)
+ return NULL;
- ret = drmBOReference(ttm_bufmgr->fd, handle, &ttm_buf->drm_bo);
- if (ret != 0) {
- free(ttm_buf);
- return NULL;
- }
- ttm_buf->bo.size = ttm_buf->drm_bo.size;
- ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
- ttm_buf->bo.virtual = NULL;
- ttm_buf->bo.bufmgr = bufmgr;
- ttm_buf->name = name;
- ttm_buf->refcount = 1;
+ ret = drmBOReference(ttm_bufmgr->fd, handle, &ttm_buf->drm_bo);
+ if (ret != 0) {
+ free(ttm_buf);
+ return NULL;
+ }
+ ttm_buf->bo.size = ttm_buf->drm_bo.size;
+ ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
+ ttm_buf->bo.virtual = NULL;
+ ttm_buf->bo.bufmgr = bufmgr;
+ ttm_buf->name = name;
+ ttm_buf->refcount = 1;
#if BUFMGR_DEBUG
- fprintf(stderr, "bo_create_from_handle: %p %08x (%s)\n", &ttm_buf->bo, handle,
- ttm_buf->name);
+ fprintf(stderr, "bo_create_from_handle: %p %08x (%s)\n",
+ &ttm_buf->bo, handle, ttm_buf->name);
#endif
- return &ttm_buf->bo;
+ return &ttm_buf->bo;
}
static void
dri_ttm_bo_reference(dri_bo *buf)
{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- ttm_buf->refcount++;
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ ttm_buf->refcount++;
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
}
static void
dri_ttm_bo_unreference(dri_bo *buf)
{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- if (!buf)
- return;
+ if (!buf)
+ return;
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- if (--ttm_buf->refcount == 0) {
- int ret;
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ if (--ttm_buf->refcount == 0) {
+ int ret;
- ret = drmBOUnreference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
- if (ret != 0) {
- fprintf(stderr, "drmBOUnreference failed (%s): %s\n", ttm_buf->name,
- strerror(-ret));
- }
+ ret = drmBOUnreference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
+ if (ret != 0) {
+ fprintf(stderr, "drmBOUnreference failed (%s): %s\n",
+ ttm_buf->name, strerror(-ret));
+ }
#if BUFMGR_DEBUG
- fprintf(stderr, "bo_unreference final: %p (%s)\n",
- &ttm_buf->bo, ttm_buf->name);
+ fprintf(stderr, "bo_unreference final: %p (%s)\n",
+ &ttm_buf->bo, ttm_buf->name);
#endif
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
- free(buf);
- return;
- }
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ free(buf);
+ return;
+ }
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
}
static int
dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable)
{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- unsigned int flags;
+ dri_bufmgr_ttm *bufmgr_ttm;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+ unsigned int flags;
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+ bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- flags = DRM_BO_FLAG_READ;
- if (write_enable)
- flags |= DRM_BO_FLAG_WRITE;
+ flags = DRM_BO_FLAG_READ;
+ if (write_enable)
+ flags |= DRM_BO_FLAG_WRITE;
- assert(buf->virtual == NULL);
+ assert(buf->virtual == NULL);
#if BUFMGR_DEBUG
- fprintf(stderr, "bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
+ fprintf(stderr, "bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
#endif
- return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
+ return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
}
static int
dri_ttm_bo_unmap(dri_bo *buf)
{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+ dri_bufmgr_ttm *bufmgr_ttm;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- if (buf == NULL)
- return 0;
+ if (buf == NULL)
+ return 0;
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+ bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- assert(buf->virtual != NULL);
+ assert(buf->virtual != NULL);
- buf->virtual = NULL;
+ buf->virtual = NULL;
#if BUFMGR_DEBUG
- fprintf(stderr, "bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
+ fprintf(stderr, "bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
#endif
- return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
+ return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
}
-/* Returns a dri_bo wrapping the given buffer object handle.
+/**
+ * Returns a dri_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
@@ -609,183 +639,187 @@ dri_fence *
intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
drm_fence_arg_t *arg)
{
- dri_bufmgr_ttm *ttm_bufmgr;
- dri_fence_ttm *ttm_fence;
+ dri_bufmgr_ttm *ttm_bufmgr;
+ dri_fence_ttm *ttm_fence;
- ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
+ ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
- ttm_fence = malloc(sizeof(*ttm_fence));
- if (!ttm_fence)
- return NULL;
+ ttm_fence = malloc(sizeof(*ttm_fence));
+ if (!ttm_fence)
+ return NULL;
- ttm_fence->drm_fence.handle = arg->handle;
- ttm_fence->drm_fence.fence_class = arg->fence_class;
- ttm_fence->drm_fence.type = arg->type;
- ttm_fence->drm_fence.flags = arg->flags;
- ttm_fence->drm_fence.signaled = 0;
- ttm_fence->drm_fence.sequence = arg->sequence;
+ ttm_fence->drm_fence.handle = arg->handle;
+ ttm_fence->drm_fence.fence_class = arg->fence_class;
+ ttm_fence->drm_fence.type = arg->type;
+ ttm_fence->drm_fence.flags = arg->flags;
+ ttm_fence->drm_fence.signaled = 0;
+ ttm_fence->drm_fence.sequence = arg->sequence;
- ttm_fence->fence.bufmgr = bufmgr;
- ttm_fence->name = name;
- ttm_fence->refcount = 1;
+ ttm_fence->fence.bufmgr = bufmgr;
+ ttm_fence->name = name;
+ ttm_fence->refcount = 1;
#if BUFMGR_DEBUG
- fprintf(stderr, "fence_create_from_handle: %p (%s)\n", &ttm_fence->fence,
- ttm_fence->name);
+ fprintf(stderr, "fence_create_from_handle: %p (%s)\n", &ttm_fence->fence,
+ ttm_fence->name);
#endif
- return &ttm_fence->fence;
+ return &ttm_fence->fence;
}
static void
dri_ttm_fence_reference(dri_fence *fence)
{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
+ dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- ++fence_ttm->refcount;
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ ++fence_ttm->refcount;
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
#if BUFMGR_DEBUG
- fprintf(stderr, "fence_reference: %p (%s)\n", &fence_ttm->fence,
- fence_ttm->name);
+ fprintf(stderr, "fence_reference: %p (%s)\n", &fence_ttm->fence,
+ fence_ttm->name);
#endif
}
static void
dri_ttm_fence_unreference(dri_fence *fence)
{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
+ dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
- if (!fence)
- return;
+ if (!fence)
+ return;
#if BUFMGR_DEBUG
- fprintf(stderr, "fence_unreference: %p (%s)\n", &fence_ttm->fence,
- fence_ttm->name);
+ fprintf(stderr, "fence_unreference: %p (%s)\n", &fence_ttm->fence,
+ fence_ttm->name);
#endif
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- if (--fence_ttm->refcount == 0) {
- int ret;
-
- ret = drmFenceUnreference(bufmgr_ttm->fd, &fence_ttm->drm_fence);
- if (ret != 0) {
- fprintf(stderr, "drmFenceUnreference failed (%s): %s\n",
- fence_ttm->name, strerror(-ret));
- }
-
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
- free(fence);
- return;
- }
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ if (--fence_ttm->refcount == 0) {
+ int ret;
+
+ ret = drmFenceUnreference(bufmgr_ttm->fd, &fence_ttm->drm_fence);
+ if (ret != 0) {
+ fprintf(stderr, "drmFenceUnreference failed (%s): %s\n",
+ fence_ttm->name, strerror(-ret));
+ }
+
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ free(fence);
+ return;
+ }
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
}
static void
dri_ttm_fence_wait(dri_fence *fence)
{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
- int ret;
+ dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
+ int ret;
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- ret = drmFenceWait(bufmgr_ttm->fd, 0, &fence_ttm->drm_fence, 0);
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
- if (ret != 0) {
- _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
- __FILE__, __LINE__, ret, fence_ttm->name);
- abort();
- }
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ ret = drmFenceWait(bufmgr_ttm->fd, 0, &fence_ttm->drm_fence, 0);
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ if (ret != 0) {
+ _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
+ __FILE__, __LINE__, ret, fence_ttm->name);
+ abort();
+ }
#if BUFMGR_DEBUG
- fprintf(stderr, "fence_wait: %p (%s)\n", &fence_ttm->fence,
- fence_ttm->name);
+ fprintf(stderr, "fence_wait: %p (%s)\n", &fence_ttm->fence,
+ fence_ttm->name);
#endif
}
static void
dri_bufmgr_ttm_destroy(dri_bufmgr *bufmgr)
{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
- intel_bo_free_list(&bufmgr_ttm->list);
- intel_bo_free_list(&bufmgr_ttm->reloc_list);
+ intel_bo_free_list(&bufmgr_ttm->list);
+ intel_bo_free_list(&bufmgr_ttm->reloc_list);
- _glthread_DESTROY_MUTEX(bufmgr_ttm->mutex);
- free(bufmgr);
+ _glthread_DESTROY_MUTEX(bufmgr_ttm->mutex);
+ free(bufmgr);
}
-static void intel_dribo_destroy_callback(void *priv)
+static void
+intel_dribo_destroy_callback(void *priv)
{
- dri_bo *dribo = priv;
-
- if (dribo) {
- dri_bo_unreference(dribo);
- }
+ dri_bo *dribo = priv;
+
+ if (dribo)
+ dri_bo_unreference(dribo);
}
static void
-dri_ttm_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset,
- dri_bo *relocatee)
-{
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)batch_buf;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
- int newItem;
- struct intel_reloc_info reloc;
- int mask;
- int ret;
-
- mask = DRM_BO_MASK_MEM;
- mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
-
- ret = intel_add_validate_buffer(&bufmgr_ttm->list, relocatee, flags, mask, &newItem, intel_dribo_destroy_callback);
- if (ret < 0)
- return;
-
- if (ret == 1) {
- dri_bo_reference(relocatee);
- }
-
- reloc.type = I915_RELOC_TYPE_0;
- reloc.reloc = offset;
- reloc.delta = delta;
- reloc.index = newItem;
- reloc.handle = ttm_buf->drm_bo.handle;
-
- intel_add_validate_reloc(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list, &reloc, bufmgr_ttm->max_relocs);
- return;
+dri_ttm_emit_reloc(dri_bo *reloc_buf, GLuint flags, GLuint delta,
+ GLuint offset, dri_bo *target_buf)
+{
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)reloc_buf;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)reloc_buf->bufmgr;
+ int newItem;
+ struct intel_reloc_info reloc;
+ int mask;
+ int ret;
+
+ mask = DRM_BO_MASK_MEM;
+ mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
+
+ ret = intel_add_validate_buffer(&bufmgr_ttm->list, target_buf, flags, mask,
+ &newItem, intel_dribo_destroy_callback);
+ if (ret < 0)
+ return;
+
+ if (ret == 1)
+ dri_bo_reference(target_buf);
+
+ reloc.type = I915_RELOC_TYPE_0;
+ reloc.reloc = offset;
+ reloc.delta = delta;
+ reloc.index = newItem;
+ reloc.handle = ttm_buf->drm_bo.handle;
+
+ intel_add_validate_reloc(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list, &reloc,
+ bufmgr_ttm->max_relocs);
+ return;
}
static void *
dri_ttm_process_reloc(dri_bo *batch_buf, GLuint *count)
{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
- void *ptr;
- int itemLoc;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
+ void *ptr;
+ int itemLoc;
- dri_bo_unmap(batch_buf);
+ dri_bo_unmap(batch_buf);
- intel_add_validate_buffer(&bufmgr_ttm->list, batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
- DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE, &itemLoc, NULL);
+ intel_add_validate_buffer(&bufmgr_ttm->list, batch_buf,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE,
+ &itemLoc, NULL);
- ptr = intel_setup_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list, &bufmgr_ttm->reloc_list, count);
+ ptr = intel_setup_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list,
+ &bufmgr_ttm->reloc_list, count);
- return ptr;
+ return ptr;
}
static void
dri_ttm_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
- intel_free_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list);
- intel_free_reloc_list(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list);
+ intel_free_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list);
+ intel_free_reloc_list(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list);
- intel_bo_free_list(&bufmgr_ttm->list);
+ intel_bo_free_list(&bufmgr_ttm->list);
}
/**
@@ -801,33 +835,34 @@ dri_bufmgr *
intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
unsigned int fence_type_flush, int batch_size)
{
- dri_bufmgr_ttm *bufmgr_ttm;
-
- bufmgr_ttm = malloc(sizeof(*bufmgr_ttm));
- bufmgr_ttm->fd = fd;
- bufmgr_ttm->fence_type = fence_type;
- bufmgr_ttm->fence_type_flush = fence_type_flush;
- _glthread_INIT_MUTEX(bufmgr_ttm->mutex);
-
- /* lets go with one relocation per every four dwords - purely heuristic */
- bufmgr_ttm->max_relocs = batch_size / sizeof(uint32_t) / 4;
-
- intel_create_bo_list(10, &bufmgr_ttm->list, NULL);
- intel_create_bo_list(1, &bufmgr_ttm->reloc_list, NULL);
-
- bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
- bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
- bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
- bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
- bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
- bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
- bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
- bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
- bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
- bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
- bufmgr_ttm->bufmgr.emit_reloc = dri_ttm_emit_reloc;
- bufmgr_ttm->bufmgr.process_relocs = dri_ttm_process_reloc;
- bufmgr_ttm->bufmgr.post_submit = dri_ttm_post_submit;
- return &bufmgr_ttm->bufmgr;
+ dri_bufmgr_ttm *bufmgr_ttm;
+
+ bufmgr_ttm = malloc(sizeof(*bufmgr_ttm));
+ bufmgr_ttm->fd = fd;
+ bufmgr_ttm->fence_type = fence_type;
+ bufmgr_ttm->fence_type_flush = fence_type_flush;
+ _glthread_INIT_MUTEX(bufmgr_ttm->mutex);
+
+ /* lets go with one relocation per every four dwords - purely heuristic */
+ bufmgr_ttm->max_relocs = batch_size / sizeof(uint32_t) / 4;
+
+ intel_create_bo_list(10, &bufmgr_ttm->list, NULL);
+ intel_create_bo_list(1, &bufmgr_ttm->reloc_list, NULL);
+
+ bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
+ bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
+ bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
+ bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
+ bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
+ bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
+ bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
+ bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
+ bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
+ bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
+ bufmgr_ttm->bufmgr.emit_reloc = dri_ttm_emit_reloc;
+ bufmgr_ttm->bufmgr.process_relocs = dri_ttm_process_reloc;
+ bufmgr_ttm->bufmgr.post_submit = dri_ttm_post_submit;
+
+ return &bufmgr_ttm->bufmgr;
}