summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/i915
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-01-14 11:03:05 -0800
committerEric Anholt <eric@anholt.net>2008-01-17 13:55:42 -0800
commit8517079cbcbbf31291b05420f3b776df712dfd47 (patch)
tree5bc8f3c51b30e62ed07ebed389a9125b1d4045f5 /src/mesa/drivers/dri/i915
parentfaeb1bc9f9c5935ecbd32c17d81507d5061a6270 (diff)
[i915] Fix driver from cliprects changes, and clean up state emission.
The fix for pageflipping with cliprects ended up causing a batch flush at an inopportune time, which is fixed by moving it up. Additionally, the recovery code for handling batch wraps at bad times is replaced by just checking for the space up front, and using a no_batch_wrap assert like on 965 to make sure that we weren't wrong about how much space that was.
Diffstat (limited to 'src/mesa/drivers/dri/i915')
-rw-r--r--src/mesa/drivers/dri/i915/i830_vtbl.c53
-rw-r--r--src/mesa/drivers/dri/i915/i915_vtbl.c54
-rw-r--r--src/mesa/drivers/dri/i915/intel_context.h3
-rw-r--r--src/mesa/drivers/dri/i915/intel_tris.c17
4 files changed, 26 insertions, 101 deletions
diff --git a/src/mesa/drivers/dri/i915/i830_vtbl.c b/src/mesa/drivers/dri/i915/i830_vtbl.c
index be4fdff7d6..9bc868b043 100644
--- a/src/mesa/drivers/dri/i915/i830_vtbl.c
+++ b/src/mesa/drivers/dri/i915/i830_vtbl.c
@@ -414,7 +414,7 @@ get_state_size(struct i830_hw_state *state)
/* Push the state into the sarea and/or texture memory.
*/
static void
-i830_do_emit_state(struct intel_context *intel)
+i830_emit_state(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
struct i830_hw_state *state = i830->current;
@@ -423,28 +423,18 @@ i830_do_emit_state(struct intel_context *intel)
BATCH_LOCALS;
/* We don't hold the lock at this point, so want to make sure that
- * there won't be a buffer wrap.
+ * there won't be a buffer wrap between the state emits and the primitive
+ * emit header.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
- */
- intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
-
- /* Workaround. There are cases I haven't been able to track down
- * where we aren't emitting a full state at the start of a new
- * batchbuffer. This code spots that we are on a new batchbuffer
- * and forces a full state emit no matter what.
*
- * In the normal case state->emitted is already zero, this code is
- * another set of checks to make sure it really is.
+ * Set the space as LOOP_CLIPRECTS now, since that's what our primitives
+ * will be emitted under.
*/
- if (intel->batch->id != intel->last_state_batch_id ||
- intel->batch->map == intel->batch->ptr)
- {
- state->emitted = 0;
- intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
- }
+ intel_batchbuffer_require_space(intel->batch, get_state_size(state) + 8,
+ LOOP_CLIPRECTS);
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
@@ -453,11 +443,6 @@ i830_do_emit_state(struct intel_context *intel)
state->emitted |= dirty;
assert(get_dirty(state) == 0);
- if (intel->batch->id != intel->last_state_batch_id) {
- assert(dirty & I830_UPLOAD_CTX);
- intel->last_state_batch_id = intel->batch->id;
- }
-
if (dirty & I830_UPLOAD_INVARIENT) {
DBG("I830_UPLOAD_INVARIENT:\n");
i830_emit_invarient_state(intel);
@@ -537,27 +522,6 @@ i830_do_emit_state(struct intel_context *intel)
intel->batch->dirty_state &= ~dirty;
assert(get_dirty(state) == 0);
-}
-
-static void
-i830_emit_state(struct intel_context *intel)
-{
- struct i830_context *i830 = i830_context(&intel->ctx);
-
- i830_do_emit_state( intel );
-
- /* Second chance - catch batchbuffer wrap in the middle of state
- * emit. This shouldn't happen but it has been observed in
- * testing.
- */
- if (get_dirty( i830->current )) {
- /* Force a full re-emit if this happens.
- */
- i830->current->emitted = 0;
- i830_do_emit_state( intel );
- }
-
- assert(get_dirty(i830->current) == 0);
assert((intel->batch->dirty_state & (1<<1)) == 0);
}
@@ -679,6 +643,9 @@ i830_new_batch(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
i830->state.emitted = 0;
+
+ /* Check that we didn't just wrap our batchbuffer at a bad time. */
+ assert(!intel->no_batch_wrap);
}
diff --git a/src/mesa/drivers/dri/i915/i915_vtbl.c b/src/mesa/drivers/dri/i915/i915_vtbl.c
index 8e5dd453ab..c856a8627d 100644
--- a/src/mesa/drivers/dri/i915/i915_vtbl.c
+++ b/src/mesa/drivers/dri/i915/i915_vtbl.c
@@ -287,11 +287,10 @@ get_state_size(struct i915_hw_state *state)
return sz;
}
-
/* Push the state into the sarea and/or texture memory.
*/
static void
-i915_do_emit_state(struct intel_context *intel)
+i915_emit_state(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
struct i915_hw_state *state = i915->current;
@@ -300,28 +299,18 @@ i915_do_emit_state(struct intel_context *intel)
BATCH_LOCALS;
/* We don't hold the lock at this point, so want to make sure that
- * there won't be a buffer wrap.
+ * there won't be a buffer wrap between the state emits and the primitive
+ * emit header.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
- */
- intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
-
- /* Workaround. There are cases I haven't been able to track down
- * where we aren't emitting a full state at the start of a new
- * batchbuffer. This code spots that we are on a new batchbuffer
- * and forces a full state emit no matter what.
*
- * In the normal case state->emitted is already zero, this code is
- * another set of checks to make sure it really is.
+ * Set the space as LOOP_CLIPRECTS now, since that's what our primitives
+ * will be emitted under.
*/
- if (intel->batch->id != intel->last_state_batch_id ||
- intel->batch->map == intel->batch->ptr)
- {
- state->emitted = 0;
- intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
- }
+ intel_batchbuffer_require_space(intel->batch, get_state_size(state) + 8,
+ LOOP_CLIPRECTS);
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
@@ -330,11 +319,6 @@ i915_do_emit_state(struct intel_context *intel)
state->emitted |= dirty;
assert(get_dirty(state) == 0);
- if (intel->batch->id != intel->last_state_batch_id) {
- assert(dirty & I915_UPLOAD_CTX);
- intel->last_state_batch_id = intel->batch->id;
- }
-
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "%s dirty: %x\n", __FUNCTION__, dirty);
@@ -457,27 +441,6 @@ i915_do_emit_state(struct intel_context *intel)
intel->batch->dirty_state &= ~dirty;
assert(get_dirty(state) == 0);
-}
-
-static void
-i915_emit_state(struct intel_context *intel)
-{
- struct i915_context *i915 = i915_context(&intel->ctx);
-
- i915_do_emit_state( intel );
-
- /* Second chance - catch batchbuffer wrap in the middle of state
- * emit. This shouldn't happen but it has been observed in
- * testing.
- */
- if (get_dirty( i915->current )) {
- /* Force a full re-emit if this happens.
- */
- i915->current->emitted = 0;
- i915_do_emit_state( intel );
- }
-
- assert(get_dirty(i915->current) == 0);
assert((intel->batch->dirty_state & (1<<1)) == 0);
}
@@ -588,6 +551,9 @@ i915_new_batch(struct intel_context *intel)
* difficulties associated with them (physical address requirements).
*/
i915->state.emitted = 0;
+
+ /* Check that we didn't just wrap our batchbuffer at a bad time. */
+ assert(!intel->no_batch_wrap);
}
static GLuint
diff --git a/src/mesa/drivers/dri/i915/intel_context.h b/src/mesa/drivers/dri/i915/intel_context.h
index b1648cd85d..be78b475b4 100644
--- a/src/mesa/drivers/dri/i915/intel_context.h
+++ b/src/mesa/drivers/dri/i915/intel_context.h
@@ -169,8 +169,7 @@ struct intel_context
dri_fence *first_swap_fence;
struct intel_batchbuffer *batch;
- unsigned batch_id;
- GLuint last_state_batch_id;
+ GLboolean no_batch_wrap;
struct
{
diff --git a/src/mesa/drivers/dri/i915/intel_tris.c b/src/mesa/drivers/dri/i915/intel_tris.c
index 6ed54b072a..9d93636900 100644
--- a/src/mesa/drivers/dri/i915/intel_tris.c
+++ b/src/mesa/drivers/dri/i915/intel_tris.c
@@ -89,29 +89,20 @@ intelStartInlinePrimitive(struct intel_context *intel,
{
BATCH_LOCALS;
+ intel_wait_flips(intel);
+
intel->vtbl.emit_state(intel);
- /* Need to make sure at the very least that we don't wrap
- * batchbuffers in BEGIN_BATCH below, otherwise the primitive will
- * be emitted to a batchbuffer missing the required full-state
- * preamble.
- */
- if (intel_batchbuffer_space(intel->batch) < 100) {
- intel_batchbuffer_flush(intel->batch);
- intel->vtbl.emit_state(intel);
- }
+ intel->no_batch_wrap = GL_TRUE;
/* _mesa_printf("%s *", __progname); */
- intel_wait_flips(intel);
-
/* Emit a slot which will be filled with the inline primitive
* command later.
*/
BEGIN_BATCH(2, batch_flags);
OUT_BATCH(0);
- assert(intel->batch->id == intel->last_state_batch_id);
assert((intel->batch->dirty_state & (1<<1)) == 0);
intel->prim.start_ptr = intel->batch->ptr;
@@ -121,6 +112,8 @@ intelStartInlinePrimitive(struct intel_context *intel,
OUT_BATCH(0);
ADVANCE_BATCH();
+ intel->no_batch_wrap = GL_FALSE;
+
/* _mesa_printf(">"); */
}