summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/intel/intel_batchbuffer.c
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-01-10 10:48:05 -0800
committerEric Anholt <eric@anholt.net>2008-01-10 12:34:08 -0800
commita04b632350e5d0e9994fc667afc59407a39da0ba (patch)
tree8907f56e859999fe2448530e4f98fc29656a65b9 /src/mesa/drivers/dri/intel/intel_batchbuffer.c
parent7086df58688dc375ffd4c0fb9a9884eae05a6e46 (diff)
[intel] Add more cliprect modes to cover other meanings for batch emits.
The previous change gave us only two modes, one which looped over the batch per cliprect (3d drawing) and one that didn't (state updeast). However, we really want 4: - Batch doesn't care about cliprects (state updates) - Batch needs DRAWING_RECTANGLE looping per cliprect (3d drawing) - Batch needs to be executed just once (region fills, copies, etc.) - Batch already includes cliprect handling, and must be flushed by unlock time (copybuffers, clears). All callers should now be fixed to use one of these states for any batchbuffer emits. Thanks to Keith Whitwell for pointing out the failure.
Diffstat (limited to 'src/mesa/drivers/dri/intel/intel_batchbuffer.c')
-rw-r--r--src/mesa/drivers/dri/intel/intel_batchbuffer.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.c b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
index a6f19916d8..ff2045dabe 100644
--- a/src/mesa/drivers/dri/intel/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
@@ -87,7 +87,7 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
batch->ptr = batch->map;
batch->dirty_state = ~0;
batch->id = batch->intel->batch_id++;
- batch->cliprects_enable = INTEL_BATCH_NO_CLIPRECTS;
+ batch->cliprect_mode = IGNORE_CLIPRECTS;
}
struct intel_batchbuffer *
@@ -143,18 +143,18 @@ do_flush_locked(struct intel_batchbuffer *batch,
*/
if (!(intel->numClipRects == 0 &&
- batch->cliprects_enable == INTEL_BATCH_CLIPRECTS)) {
+ batch->cliprect_mode == LOOP_CLIPRECTS)) {
if (intel->ttm == GL_TRUE) {
intel_exec_ioctl(batch->intel,
used,
- batch->cliprects_enable == INTEL_BATCH_NO_CLIPRECTS,
+ batch->cliprect_mode != LOOP_CLIPRECTS,
allow_unlock,
start, count, &batch->last_fence);
} else {
intel_batch_ioctl(batch->intel,
batch->buf->offset,
used,
- batch->cliprects_enable == INTEL_BATCH_NO_CLIPRECTS,
+ batch->cliprect_mode != LOOP_CLIPRECTS,
allow_unlock);
}
}
@@ -162,7 +162,7 @@ do_flush_locked(struct intel_batchbuffer *batch,
dri_post_submit(batch->buf, &batch->last_fence);
if (intel->numClipRects == 0 &&
- batch->cliprects_enable == INTEL_BATCH_CLIPRECTS) {
+ batch->cliprect_mode == LOOP_CLIPRECTS) {
if (allow_unlock) {
/* If we are not doing any actual user-visible rendering,
* do a sched_yield to keep the app from pegging the cpu while
@@ -264,10 +264,10 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, GLuint bytes,
- enum cliprects_enable cliprects_enable)
+ enum cliprect_mode cliprect_mode)
{
assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(batch, bytes, cliprects_enable);
+ intel_batchbuffer_require_space(batch, bytes, cliprect_mode);
__memcpy(batch->ptr, data, bytes);
batch->ptr += bytes;
}