summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/intel/intel_batchbuffer.c
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-05-02 14:11:19 -0700
committerEric Anholt <eric@anholt.net>2008-05-02 14:11:19 -0700
commiteb10cdc838fc31ea2cf59f556f6f7d8b072f5bae (patch)
tree828e7a4bc08bb1565f2f9f0b0669e8f458ac80d1 /src/mesa/drivers/dri/intel/intel_batchbuffer.c
parent89bba44e969f15bf20da6d700c493237b095a588 (diff)
[intel] Fix build for GEM. TTM is now disabled, and fencing is gone.
Fencing was used in two places: ensuring that we didn't get too many frames ahead of ourselves, and glFinish. glFinish will be satisfied by waiting on buffers like we would do for CPU access on them. The "don't get too far ahead" is now the responsibility of the execution manager (kernel).
Diffstat (limited to 'src/mesa/drivers/dri/intel/intel_batchbuffer.c')
-rw-r--r--src/mesa/drivers/dri/intel/intel_batchbuffer.c28
1 files changed, 9 insertions, 19 deletions
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.c b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
index c5b0f531d4..683d06a552 100644
--- a/src/mesa/drivers/dri/intel/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
@@ -99,7 +99,6 @@ intel_batchbuffer_alloc(struct intel_context *intel)
struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
batch->intel = intel;
- batch->last_fence = NULL;
intel_batchbuffer_reset(batch);
return batch;
@@ -108,11 +107,6 @@ intel_batchbuffer_alloc(struct intel_context *intel)
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
- if (batch->last_fence) {
- dri_fence_wait(batch->last_fence);
- dri_fence_unreference(batch->last_fence);
- batch->last_fence = NULL;
- }
if (batch->map) {
dri_bo_unmap(batch->buf);
batch->map = NULL;
@@ -152,7 +146,7 @@ do_flush_locked(struct intel_batchbuffer *batch,
used,
batch->cliprect_mode != LOOP_CLIPRECTS,
allow_unlock,
- execbuf, &batch->last_fence);
+ execbuf);
} else {
dri_process_relocs(batch->buf);
intel_batch_ioctl(batch->intel,
@@ -162,8 +156,8 @@ do_flush_locked(struct intel_batchbuffer *batch,
allow_unlock);
}
}
-
- dri_post_submit(batch->buf, &batch->last_fence);
+
+ dri_post_submit(batch->buf);
if (intel->numClipRects == 0 &&
batch->cliprect_mode == LOOP_CLIPRECTS) {
@@ -243,9 +237,13 @@ _intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file,
UNLOCK_HARDWARE(intel);
if (INTEL_DEBUG & DEBUG_SYNC) {
+ int irq;
+
fprintf(stderr, "waiting for idle\n");
- if (batch->last_fence != NULL)
- dri_fence_wait(batch->last_fence);
+ LOCK_HARDWARE(intel);
+ irq = intelEmitIrqLocked(intel);
+ UNLOCK_HARDWARE(intel);
+ intelWaitIrq(intel, irq);
}
/* Reset the buffer:
@@ -253,14 +251,6 @@ _intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file,
intel_batchbuffer_reset(batch);
}
-void
-intel_batchbuffer_finish(struct intel_batchbuffer *batch)
-{
- intel_batchbuffer_flush(batch);
- if (batch->last_fence != NULL)
- dri_fence_wait(batch->last_fence);
-}
-
/* This is the only way buffers get added to the validate list.
*/