summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/i915/i915_vtbl.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/i915/i915_vtbl.c')
-rw-r--r--src/mesa/drivers/dri/i915/i915_vtbl.c54
1 files changed, 10 insertions, 44 deletions
diff --git a/src/mesa/drivers/dri/i915/i915_vtbl.c b/src/mesa/drivers/dri/i915/i915_vtbl.c
index 8e5dd453ab..c856a8627d 100644
--- a/src/mesa/drivers/dri/i915/i915_vtbl.c
+++ b/src/mesa/drivers/dri/i915/i915_vtbl.c
@@ -287,11 +287,10 @@ get_state_size(struct i915_hw_state *state)
return sz;
}
-
/* Push the state into the sarea and/or texture memory.
*/
static void
-i915_do_emit_state(struct intel_context *intel)
+i915_emit_state(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
struct i915_hw_state *state = i915->current;
@@ -300,28 +299,18 @@ i915_do_emit_state(struct intel_context *intel)
BATCH_LOCALS;
/* We don't hold the lock at this point, so want to make sure that
- * there won't be a buffer wrap.
+ * there won't be a buffer wrap between the state emits and the primitive
+ * emit header.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
- */
- intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
-
- /* Workaround. There are cases I haven't been able to track down
- * where we aren't emitting a full state at the start of a new
- * batchbuffer. This code spots that we are on a new batchbuffer
- * and forces a full state emit no matter what.
*
- * In the normal case state->emitted is already zero, this code is
- * another set of checks to make sure it really is.
+ * Set the space as LOOP_CLIPRECTS now, since that's what our primitives
+ * will be emitted under.
*/
- if (intel->batch->id != intel->last_state_batch_id ||
- intel->batch->map == intel->batch->ptr)
- {
- state->emitted = 0;
- intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
- }
+ intel_batchbuffer_require_space(intel->batch, get_state_size(state) + 8,
+ LOOP_CLIPRECTS);
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
@@ -330,11 +319,6 @@ i915_do_emit_state(struct intel_context *intel)
state->emitted |= dirty;
assert(get_dirty(state) == 0);
- if (intel->batch->id != intel->last_state_batch_id) {
- assert(dirty & I915_UPLOAD_CTX);
- intel->last_state_batch_id = intel->batch->id;
- }
-
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "%s dirty: %x\n", __FUNCTION__, dirty);
@@ -457,27 +441,6 @@ i915_do_emit_state(struct intel_context *intel)
intel->batch->dirty_state &= ~dirty;
assert(get_dirty(state) == 0);
-}
-
-static void
-i915_emit_state(struct intel_context *intel)
-{
- struct i915_context *i915 = i915_context(&intel->ctx);
-
- i915_do_emit_state( intel );
-
- /* Second chance - catch batchbuffer wrap in the middle of state
- * emit. This shouldn't happen but it has been observed in
- * testing.
- */
- if (get_dirty( i915->current )) {
- /* Force a full re-emit if this happens.
- */
- i915->current->emitted = 0;
- i915_do_emit_state( intel );
- }
-
- assert(get_dirty(i915->current) == 0);
assert((intel->batch->dirty_state & (1<<1)) == 0);
}
@@ -588,6 +551,9 @@ i915_new_batch(struct intel_context *intel)
* difficulties associated with them (physical address requirements).
*/
i915->state.emitted = 0;
+
+ /* Check that we didn't just wrap our batchbuffer at a bad time. */
+ assert(!intel->no_batch_wrap);
}
static GLuint