summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/i965/brw_state_batch.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-02-10 20:25:51 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2011-02-21 12:59:35 +0000
commit8d68a90e225d831a395ba788e425cb717eec1f9a (patch)
tree1c46eb19125ebbb205625783dd184e0489231d01 /src/mesa/drivers/dri/i965/brw_state_batch.c
parent3f55683927278e57f3ef8a151d15f4cffdc060dc (diff)
intel: use pwrite for batch
It's faster. Not only is the memcpy more efficiently performed in the kernel (making up for the system call overhead), but by not using mmap we remove the greater overhead of tracking the vma of every batch. And it means we can read back from the batch buffer without incurring the cost of a uncached read through the GTT. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src/mesa/drivers/dri/i965/brw_state_batch.c')
-rw-r--r--src/mesa/drivers/dri/i965/brw_state_batch.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_state_batch.c b/src/mesa/drivers/dri/i965/brw_state_batch.c
index 6d7b6a429d..f363a922c1 100644
--- a/src/mesa/drivers/dri/i965/brw_state_batch.c
+++ b/src/mesa/drivers/dri/i965/brw_state_batch.c
@@ -48,7 +48,7 @@ GLboolean brw_cached_batch_struct( struct brw_context *brw,
struct header *newheader = (struct header *)data;
if (brw->emit_state_always) {
- intel_batchbuffer_data(brw->intel.batch, data, sz, false);
+ intel_batchbuffer_data(&brw->intel, data, sz, false);
return GL_TRUE;
}
@@ -75,7 +75,7 @@ GLboolean brw_cached_batch_struct( struct brw_context *brw,
emit:
memcpy(item->header, newheader, sz);
- intel_batchbuffer_data(brw->intel.batch, data, sz, false);
+ intel_batchbuffer_data(&brw->intel, data, sz, false);
return GL_TRUE;
}
@@ -118,10 +118,10 @@ brw_state_batch(struct brw_context *brw,
int alignment,
uint32_t *out_offset)
{
- struct intel_batchbuffer *batch = brw->intel.batch;
+ struct intel_batchbuffer *batch = &brw->intel.batch;
uint32_t offset;
- assert(size < batch->buf->size);
+ assert(size < batch->bo->size);
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
/* If allocating from the top would wrap below the batchbuffer, or
@@ -129,13 +129,13 @@ brw_state_batch(struct brw_context *brw,
* space, then flush and try again.
*/
if (batch->state_batch_offset < size ||
- offset < batch->ptr - batch->map + batch->reserved_space) {
- intel_batchbuffer_flush(batch);
+ offset < 4*batch->used + batch->reserved_space) {
+ intel_batchbuffer_flush(&brw->intel);
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
}
batch->state_batch_offset = offset;
*out_offset = offset;
- return batch->map + offset;
+ return batch->map + (offset>>2);
}