summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/i965/brw_draw.c
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-01-09 14:21:52 -0800
committerEric Anholt <eric@anholt.net>2008-01-09 14:41:54 -0800
commit7da98d7ebaf4475812f2ce44062d50bee393faf7 (patch)
tree39ecd24abfc831ef7b19933b2ec1e5d26c53f3a7 /src/mesa/drivers/dri/i965/brw_draw.c
parentdc1608ae9d90a490ce32aa005488e3591a6d8369 (diff)
[965] Allow more than one draw_prims per batchbuffer.
The comment about (vbo)_exec_api.c appeared to be stale, as the VBO code seems to only use non-named VBOs (not actual VBOs) or freshly-allocated VBO data. This brings a 2x speedup to openarena, because we can submit nearly-full batchbuffers instead of many 450-byte ones.
Diffstat (limited to 'src/mesa/drivers/dri/i965/brw_draw.c')
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c
index 9225748aaa..887eebe475 100644
--- a/src/mesa/drivers/dri/i965/brw_draw.c
+++ b/src/mesa/drivers/dri/i965/brw_draw.c
@@ -270,12 +270,23 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx,
LOCK_HARDWARE(intel);
if (brw->intel.numClipRects == 0) {
- assert(intel->batch->ptr == intel->batch->map);
UNLOCK_HARDWARE(intel);
return GL_TRUE;
}
{
+ /* Flush the batch if it's approaching full, so that we don't wrap while
+ * we've got validated state that needs to be in the same batch as the
+ * primitives. This fraction is just a guess (minimal full state plus
+ * a primitive is around 512 bytes), and would be better if we had
+ * an upper bound of how much we might emit in a single
+ * brw_try_draw_prims().
+ */
+ if (intel->batch->ptr - intel->batch->map > intel->batch->size * 3 / 4)
+ intel_batchbuffer_flush(intel->batch);
+
+ brw->no_batch_wrap = GL_TRUE;
+
/* Set the first primitive early, ahead of validate_state:
*/
brw_set_prim(brw, prim[0].mode);
@@ -310,12 +321,7 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx,
out:
- /* Currently have to do this to synchronize with the map/unmap of
- * the vertex buffer in brw_exec_api.c. Not sure if there is any
- * way around this, as not every flush is due to a buffer filling
- * up.
- */
- intel_batchbuffer_flush( brw->intel.batch );
+ brw->no_batch_wrap = GL_FALSE;
/* Free any old data so it doesn't clog up texture memory - we
* won't be referencing it again.