summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2007-09-21 17:05:34 -0700
committerEric Anholt <eric@anholt.net>2007-09-21 17:13:20 -0700
commit39c709c0f6a9b2c910265390b31ce233a991ab68 (patch)
treea4484723b11f36f822e3bfeeeaa3652da2753859 /src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
parent6016de689fb9f29fe148b5ff0daf0b34a8510e23 (diff)
parentd0350dadaa52064f67e4e9001145af14478b2849 (diff)
Merge branch 'i915-unification'
This branch replaces the DRM pool interface used by i915tex with a "dri_bufmgr" interface in dri/common which may be set up to use either TTM or traditional static memory management according to what is available. The i915tex TTM code now requires an updated DDX which provides proper buffer objects for the static front/back/depth, instead of using fake buffers. The driver is now built as i915_dri.so, and should replace the old i915 driver shortly.
Diffstat (limited to 'src/mesa/drivers/dri/i915tex/intel_batchbuffer.c')
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_batchbuffer.c226
1 files changed, 113 insertions, 113 deletions
diff --git a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
index c740c3d7f6..8ee48b5a68 100644
--- a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
@@ -71,62 +71,31 @@
void
intel_batchbuffer_reset(struct intel_batchbuffer *batch)
{
+ struct intel_context *intel = batch->intel;
- int i;
-
- /*
- * Get a new, free batchbuffer.
- */
-
- batch->size = batch->intel->intelScreen->maxBatchSize;
- driBOData(batch->buffer, batch->size, NULL, 0);
-
- driBOResetList(&batch->list);
-
- /*
- * Unreference buffers previously on the relocation list.
- */
-
- for (i = 0; i < batch->nr_relocs; i++) {
- struct buffer_reloc *r = &batch->reloc[i];
- driBOUnReference(r->buf);
+ if (batch->buf != NULL) {
+ dri_bo_unreference(batch->buf);
+ batch->buf = NULL;
}
- batch->list_count = 0;
- batch->nr_relocs = 0;
- batch->flags = 0;
-
- /*
- * We don't refcount the batchbuffer itself since we can't destroy it
- * while it's on the list.
- */
-
-
- driBOAddListItem(&batch->list, batch->buffer,
- DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
- DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE);
-
-
- batch->map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
+ batch->buf = dri_bo_alloc(intel->intelScreen->bufmgr, "batchbuffer",
+ intel->intelScreen->maxBatchSize, 4096,
+ DRM_BO_FLAG_MEM_TT);
+ dri_bo_map(batch->buf, GL_TRUE);
+ batch->map = batch->buf->virtual;
+ batch->size = intel->intelScreen->maxBatchSize;
batch->ptr = batch->map;
}
-/*======================================================================
- * Public functions
- */
struct intel_batchbuffer *
intel_batchbuffer_alloc(struct intel_context *intel)
{
struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
batch->intel = intel;
-
- driGenBuffers(intel->intelScreen->batchPool, "batchbuffer", 1,
- &batch->buffer, 4096,
- DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
batch->last_fence = NULL;
- driBOCreateList(20, &batch->list);
intel_batchbuffer_reset(batch);
+
return batch;
}
@@ -134,20 +103,27 @@ void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
if (batch->last_fence) {
- driFenceFinish(batch->last_fence,
- DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW, GL_FALSE);
- driFenceUnReference(batch->last_fence);
+ dri_fence_wait(batch->last_fence);
+ dri_fence_unreference(batch->last_fence);
batch->last_fence = NULL;
}
if (batch->map) {
- driBOUnmap(batch->buffer);
+ dri_bo_unmap(batch->buf);
batch->map = NULL;
}
- driBOUnReference(batch->buffer);
- batch->buffer = NULL;
+ dri_bo_unreference(batch->buf);
+ batch->buf = NULL;
free(batch);
}
+static int
+relocation_sort(const void *a_in, const void *b_in) {
+ const struct buffer_reloc *a = a_in, *b = b_in;
+
+ return (intptr_t)a->buf < (intptr_t)b->buf ? -1 : 1;
+}
+
+
/* TODO: Push this whole function into bufmgr.
*/
static void
@@ -158,30 +134,68 @@ do_flush_locked(struct intel_batchbuffer *batch,
GLuint *ptr;
GLuint i;
struct intel_context *intel = batch->intel;
- unsigned fenceFlags;
- struct _DriFenceObject *fo;
+ dri_fence *fo;
+ GLboolean performed_rendering = GL_FALSE;
- driBOValidateList(batch->intel->driFd, &batch->list);
+ assert(batch->buf->virtual != NULL);
+ ptr = batch->buf->virtual;
- /* Apply the relocations. This nasty map indicates to me that the
- * whole task should be done internally by the memory manager, and
- * that dma buffers probably need to be pinned within agp space.
+ /* Sort our relocation list in terms of referenced buffer pointer.
+ * This lets us uniquely validate the buffers with the sum of all the flags,
+ * while avoiding O(n^2) on number of relocations.
*/
- ptr = (GLuint *) driBOMap(batch->buffer, DRM_BO_FLAG_WRITE,
- DRM_BO_HINT_ALLOW_UNFENCED_MAP);
-
+ qsort(batch->reloc, batch->nr_relocs, sizeof(batch->reloc[0]),
+ relocation_sort);
+ /* Perform the necessary validations of buffers, and enter the relocations
+ * in the batchbuffer.
+ */
for (i = 0; i < batch->nr_relocs; i++) {
struct buffer_reloc *r = &batch->reloc[i];
- ptr[r->offset / 4] = driBOOffset(r->buf) + r->delta;
+ if (r->validate_flags & DRM_BO_FLAG_WRITE)
+ performed_rendering = GL_TRUE;
+
+ /* If this is the first time we've seen this buffer in the relocation
+ * list, figure out our flags and validate it.
+ */
+ if (i == 0 || batch->reloc[i - 1].buf != r->buf) {
+ uint32_t validate_flags;
+ int j, ret;
+
+ /* Accumulate the flags we need for validating this buffer. */
+ validate_flags = r->validate_flags;
+ for (j = i + 1; j < batch->nr_relocs; j++) {
+ if (batch->reloc[j].buf != r->buf)
+ break;
+ validate_flags |= batch->reloc[j].validate_flags;
+ }
+
+ /* Validate. If we fail, fence to clear the unfenced list and bail
+ * out.
+ */
+ ret = dri_bo_validate(r->buf, validate_flags);
+ if (ret != 0) {
+ dri_bo_unmap(batch->buf);
+ fo = dri_fence_validated(intel->intelScreen->bufmgr,
+ "batchbuffer failure fence", GL_TRUE);
+ dri_fence_unreference(fo);
+ goto done;
+ }
+ }
+ ptr[r->offset / 4] = r->buf->offset + r->delta;
+ dri_bo_unreference(r->buf);
}
- if (INTEL_DEBUG & DEBUG_BATCH)
- intel_decode(ptr, used / 4, 0);
-
- driBOUnmap(batch->buffer);
+ dri_bo_unmap(batch->buf);
batch->map = NULL;
+ batch->ptr = NULL;
+
+ dri_bo_validate(batch->buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE);
+
+ batch->list_count = 0;
+ batch->nr_relocs = 0;
+ batch->flags = 0;
/* Throw away non-effective packets. Won't work once we have
* hardware contexts which would preserve statechanges beyond a
@@ -190,55 +204,49 @@ do_flush_locked(struct intel_batchbuffer *batch,
if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
intel_batch_ioctl(batch->intel,
- driBOOffset(batch->buffer),
+ batch->buf->offset,
used, ignore_cliprects, allow_unlock);
}
-
- /*
- * Kernel fencing. The flags tells the kernel that we've
- * programmed an MI_FLUSH.
+ /* Associate a fence with the validated buffers, and note that we included
+ * a flush at the end.
*/
-
- fenceFlags = DRM_I915_FENCE_FLAG_FLUSHED;
- fo = driFenceBuffers(batch->intel->driFd,
- "Batch fence", fenceFlags);
-
- /*
- * User space fencing.
- */
-
- driBOFence(batch->buffer, fo);
-
- if (driFenceType(fo) == DRM_FENCE_TYPE_EXE) {
+ fo = dri_fence_validated(intel->intelScreen->bufmgr,
+ "Batch fence", GL_TRUE);
- /*
- * Oops. We only validated a batch buffer. This means we
- * didn't do any proper rendering. Discard this fence object.
- */
-
- driFenceUnReference(fo);
- } else {
- driFenceUnReference(batch->last_fence);
+ if (performed_rendering) {
+ dri_fence_unreference(batch->last_fence);
batch->last_fence = fo;
- for (i = 0; i < batch->nr_relocs; i++) {
- struct buffer_reloc *r = &batch->reloc[i];
- driBOFence(r->buf, fo);
- }
+ } else {
+ /* If we didn't validate any buffers for writing by the card, we don't
+ * need to track the fence for glFinish().
+ */
+ dri_fence_unreference(fo);
}
if (intel->numClipRects == 0 && !ignore_cliprects) {
if (allow_unlock) {
+ /* If we are not doing any actual user-visible rendering,
+ * do a sched_yield to keep the app from pegging the cpu while
+ * achieving nothing.
+ */
UNLOCK_HARDWARE(intel);
sched_yield();
LOCK_HARDWARE(intel);
}
intel->vtbl.lost_hardware(intel);
}
+
+done:
+ if (INTEL_DEBUG & DEBUG_BATCH) {
+ dri_bo_map(batch->buf, GL_FALSE);
+ intel_decode(ptr, used / 4, batch->buf->offset);
+ dri_bo_unmap(batch->buf);
+ }
}
-struct _DriFenceObject *
+void
intel_batchbuffer_flush(struct intel_batchbuffer *batch)
{
struct intel_context *intel = batch->intel;
@@ -246,7 +254,7 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
GLboolean was_locked = intel->locked;
if (used == 0)
- return batch->last_fence;
+ return;
/* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
* performance drain that we would like to avoid.
@@ -263,10 +271,6 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
used += 8;
}
- driBOUnmap(batch->buffer);
- batch->ptr = NULL;
- batch->map = NULL;
-
/* TODO: Just pass the relocation list and dma buffer up to the
* kernel.
*/
@@ -282,16 +286,14 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
/* Reset the buffer:
*/
intel_batchbuffer_reset(batch);
- return batch->last_fence;
}
void
intel_batchbuffer_finish(struct intel_batchbuffer *batch)
{
- struct _DriFenceObject *fence = intel_batchbuffer_flush(batch);
- driFenceReference(fence);
- driFenceFinish(fence, 3, GL_FALSE);
- driFenceUnReference(fence);
+ intel_batchbuffer_flush(batch);
+ if (batch->last_fence != NULL)
+ dri_fence_wait(batch->last_fence);
}
@@ -299,20 +301,18 @@ intel_batchbuffer_finish(struct intel_batchbuffer *batch)
*/
GLboolean
intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
- struct _DriBufferObject *buffer,
- GLuint flags, GLuint mask, GLuint delta)
+ dri_bo *buffer,
+ GLuint flags, GLuint delta)
{
- assert(batch->nr_relocs < MAX_RELOCS);
+ struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++];
- driBOAddListItem(&batch->list, buffer, flags, mask);
+ assert(batch->nr_relocs <= MAX_RELOCS);
- {
- struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++];
- driBOReference(buffer);
- r->buf = buffer;
- r->offset = batch->ptr - batch->map;
- r->delta = delta;
- }
+ dri_bo_reference(buffer);
+ r->buf = buffer;
+ r->offset = batch->ptr - batch->map;
+ r->delta = delta;
+ r->validate_flags = flags;
batch->ptr += 4;
return GL_TRUE;