summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/i965/brw_draw_upload.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/i965/brw_draw_upload.c')
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw_upload.c92
1 files changed, 69 insertions, 23 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_draw_upload.c b/src/mesa/drivers/dri/i965/brw_draw_upload.c
index fd9c3915c4..4aa17fa02d 100644
--- a/src/mesa/drivers/dri/i965/brw_draw_upload.c
+++ b/src/mesa/drivers/dri/i965/brw_draw_upload.c
@@ -383,7 +383,6 @@ static void brw_prepare_vertices(struct brw_context *brw)
struct brw_vertex_element *input = brw->vb.enabled[i];
input->element_size = get_size(input->glarray->Type) * input->glarray->Size;
- input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1;
if (input->glarray->BufferObj->Name != 0) {
struct intel_buffer_object *intel_buffer =
@@ -396,6 +395,7 @@ static void brw_prepare_vertices(struct brw_context *brw)
dri_bo_reference(input->bo);
input->offset = (unsigned long)input->glarray->Ptr;
input->stride = input->glarray->StrideB;
+ input->count = input->glarray->_MaxElement;
/* This is a common place to reach if the user mistakenly supplies
* a pointer in place of a VBO offset. If we just let it go through,
@@ -411,6 +411,7 @@ static void brw_prepare_vertices(struct brw_context *brw)
*/
assert(input->offset < input->bo->size);
} else {
+ input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1;
if (input->bo != NULL) {
/* Already-uploaded vertex data is present from a previous
* prepare_vertices, but we had to re-validate state due to
@@ -534,7 +535,19 @@ static void brw_emit_vertices(struct brw_context *brw)
OUT_RELOC(input->bo,
I915_GEM_DOMAIN_VERTEX, 0,
input->offset);
- OUT_BATCH(brw->vb.max_index);
+ if (BRW_IS_IGDNG(brw)) {
+ if (input->stride) {
+ OUT_RELOC(input->bo,
+ I915_GEM_DOMAIN_VERTEX, 0,
+ input->offset + input->stride * input->count);
+ } else {
+ assert(input->count == 1);
+ OUT_RELOC(input->bo,
+ I915_GEM_DOMAIN_VERTEX, 0,
+ input->offset + input->element_size);
+ }
+ } else
+ OUT_BATCH(input->stride ? input->count : 0);
OUT_BATCH(0); /* Instance data step rate */
}
ADVANCE_BATCH();
@@ -564,11 +577,18 @@ static void brw_emit_vertices(struct brw_context *brw)
BRW_VE0_VALID |
(format << BRW_VE0_FORMAT_SHIFT) |
(0 << BRW_VE0_SRC_OFFSET_SHIFT));
- OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
- (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
- (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
- (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
- ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
+
+ if (BRW_IS_IGDNG(brw))
+ OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
+ (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
+ (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
+ (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
+ else
+ OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
+ (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
+ (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
+ (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
+ ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
}
ADVANCE_BATCH();
}
@@ -592,17 +612,20 @@ static void brw_prepare_indices(struct brw_context *brw)
dri_bo *bo = NULL;
struct gl_buffer_object *bufferobj;
GLuint offset;
+ GLuint ib_type_size;
if (index_buffer == NULL)
return;
- ib_size = get_size(index_buffer->type) * index_buffer->count;
+ ib_type_size = get_size(index_buffer->type);
+ ib_size = ib_type_size * index_buffer->count;
bufferobj = index_buffer->obj;;
/* Turn into a proper VBO:
*/
if (!bufferobj->Name) {
-
+ brw->ib.start_vertex_offset = 0;
+
/* Get new bufferobj, offset:
*/
get_space(brw, ib_size, &bo, &offset);
@@ -618,6 +641,7 @@ static void brw_prepare_indices(struct brw_context *brw)
}
} else {
offset = (GLuint) (unsigned long) index_buffer->ptr;
+ brw->ib.start_vertex_offset = 0;
/* If the index buffer isn't aligned to its element size, we have to
* rebase it into a temporary.
@@ -638,39 +662,62 @@ static void brw_prepare_indices(struct brw_context *brw)
bo = intel_bufferobj_buffer(intel, intel_buffer_object(bufferobj),
INTEL_READ);
dri_bo_reference(bo);
+
+ /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
+ * the index buffer state when we're just moving the start index
+ * of our drawing.
+ */
+ brw->ib.start_vertex_offset = offset / ib_type_size;
+ offset = 0;
+ ib_size = bo->size;
}
}
- dri_bo_unreference(brw->ib.bo);
- brw->ib.bo = bo;
- brw->ib.offset = offset;
+ if (brw->ib.bo != bo ||
+ brw->ib.offset != offset ||
+ brw->ib.size != ib_size)
+ {
+ drm_intel_bo_unreference(brw->ib.bo);
+ brw->ib.bo = bo;
+ brw->ib.offset = offset;
+ brw->ib.size = ib_size;
+
+ brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
+ } else {
+ drm_intel_bo_unreference(bo);
+ }
brw_add_validated_bo(brw, brw->ib.bo);
}
-static void brw_emit_indices(struct brw_context *brw)
+const struct brw_tracked_state brw_indices = {
+ .dirty = {
+ .mesa = 0,
+ .brw = BRW_NEW_INDICES,
+ .cache = 0,
+ },
+ .prepare = brw_prepare_indices,
+};
+
+static void brw_emit_index_buffer(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
- GLuint ib_size;
if (index_buffer == NULL)
return;
- ib_size = get_size(index_buffer->type) * index_buffer->count - 1;
-
/* Emit the indexbuffer packet:
*/
{
struct brw_indexbuffer ib;
memset(&ib, 0, sizeof(ib));
-
+
ib.header.bits.opcode = CMD_INDEX_BUFFER;
ib.header.bits.length = sizeof(ib)/4 - 2;
ib.header.bits.index_format = get_index_type(index_buffer->type);
ib.header.bits.cut_index_enable = 0;
-
BEGIN_BATCH(4, IGNORE_CLIPRECTS);
OUT_BATCH( ib.header.dword );
@@ -679,18 +726,17 @@ static void brw_emit_indices(struct brw_context *brw)
brw->ib.offset);
OUT_RELOC(brw->ib.bo,
I915_GEM_DOMAIN_VERTEX, 0,
- brw->ib.offset + ib_size);
+ brw->ib.offset + brw->ib.size);
OUT_BATCH( 0 );
ADVANCE_BATCH();
}
}
-const struct brw_tracked_state brw_indices = {
+const struct brw_tracked_state brw_index_buffer = {
.dirty = {
.mesa = 0,
- .brw = BRW_NEW_BATCH | BRW_NEW_INDICES,
+ .brw = BRW_NEW_BATCH | BRW_NEW_INDEX_BUFFER,
.cache = 0,
},
- .prepare = brw_prepare_indices,
- .emit = brw_emit_indices,
+ .emit = brw_emit_index_buffer,
};