summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/intel/intel_batchbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/intel/intel_batchbuffer.c')
-rw-r--r--src/mesa/drivers/dri/intel/intel_batchbuffer.c39
1 files changed, 35 insertions, 4 deletions
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.c b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
index ae0f8a16f9..9768b0deee 100644
--- a/src/mesa/drivers/dri/intel/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
@@ -104,7 +104,8 @@ do_flush_locked(struct intel_batchbuffer *batch, GLuint used)
batch->map = NULL;
batch->ptr = NULL;
- dri_bo_exec(batch->buf, used, NULL, 0, (x_off & 0xffff) | (y_off << 16));
+ if (!intel->no_hw)
+ dri_bo_exec(batch->buf, used, NULL, 0, (x_off & 0xffff) | (y_off << 16));
if (INTEL_DEBUG & DEBUG_BATCH) {
dri_bo_map(batch->buf, GL_FALSE);
@@ -158,9 +159,10 @@ _intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file,
}
/* Mark the end of the buffer. */
- *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; /* noop */
+ *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END;
batch->ptr += 4;
used = batch->ptr - batch->map;
+ assert (used <= batch->buf->size);
/* Workaround for recursive batchbuffer flushing: If the window is
* moved, we can get into a case where we try to flush during a
@@ -208,9 +210,11 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
{
int ret;
+ assert(delta < buffer->size);
+
if (batch->ptr - batch->map > batch->buf->size)
- _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n",
- batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
+ printf ("bad relocation ptr %p map %p offset %d size %lu\n",
+ batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
ret = dri_bo_emit_reloc(batch->buf, read_domains, write_domain,
delta, batch->ptr - batch->map, buffer);
@@ -224,6 +228,33 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
return GL_TRUE;
}
+GLboolean
+intel_batchbuffer_emit_reloc_fenced(struct intel_batchbuffer *batch,
+ drm_intel_bo *buffer,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta)
+{
+ int ret;
+
+ assert(delta < buffer->size);
+
+ if (batch->ptr - batch->map > batch->buf->size)
+ printf ("bad relocation ptr %p map %p offset %d size %lu\n",
+ batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
+ ret = drm_intel_bo_emit_reloc_fence(batch->buf, batch->ptr - batch->map,
+ buffer, delta,
+ read_domains, write_domain);
+
+ /*
+ * Using the old buffer offset, write in what the right data would
+ * be, in case the buffer doesn't move and we can short-circuit the
+ * relocation processing in the kernel
+ */
+ intel_batchbuffer_emit_dword (batch, buffer->offset + delta);
+
+ return GL_TRUE;
+}
+
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, GLuint bytes)