summaryrefslogtreecommitdiff
path: root/src/mesa/pipe/cell/ppu/cell_vbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/pipe/cell/ppu/cell_vbuf.c')
-rw-r--r--src/mesa/pipe/cell/ppu/cell_vbuf.c113
1 files changed, 80 insertions, 33 deletions
diff --git a/src/mesa/pipe/cell/ppu/cell_vbuf.c b/src/mesa/pipe/cell/ppu/cell_vbuf.c
index ee572b3a51..6e12e16fe0 100644
--- a/src/mesa/pipe/cell/ppu/cell_vbuf.c
+++ b/src/mesa/pipe/cell/ppu/cell_vbuf.c
@@ -40,8 +40,8 @@
/** Allow prim indexes, verts to be inlined after RENDER command */
-#define ALLOW_INLINE_INDEXES 1
-#define ALLOW_INLINE_VERTS 1
+#define ALLOW_INLINE_INDEXES 01
+#define ALLOW_INLINE_VERTS 0
/**
@@ -55,6 +55,9 @@ struct cell_vbuf_render
uint prim;
uint vertex_size;
void *vertex_buffer;
+#if 1
+ uint vertex_buf;
+#endif
};
@@ -81,14 +84,53 @@ cell_vbuf_allocate_vertices(struct vbuf_render *vbr,
{
struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
/*printf("Alloc verts %u * %u\n", vertex_size, nr_vertices);*/
+#if 0
assert(!cvbr->vertex_buffer);
cvbr->vertex_buffer = align_malloc(vertex_size * nr_vertices, 16);
+#else
+ assert(cvbr->vertex_buf == ~0);
+ cvbr->vertex_buf = cell_get_empty_buffer(cvbr->cell);
+ cvbr->vertex_buffer = cvbr->cell->buffer[cvbr->vertex_buf];
+ printf("%s vertex_buf = %u\n", __FUNCTION__, cvbr->vertex_buf);
+#endif
cvbr->vertex_size = vertex_size;
return cvbr->vertex_buffer;
}
static void
+cell_vbuf_release_vertices(struct vbuf_render *vbr, void *vertices,
+ unsigned vertex_size, unsigned vertices_used)
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ struct cell_context *cell = cvbr->cell;
+
+ /*printf("Free verts %u * %u\n", vertex_size, vertices_used);*/
+#if 0
+ align_free(vertices);
+#else
+ printf("%s vertex_buf = %u count = %u\n",
+ __FUNCTION__, cvbr->vertex_buf, vertices_used);
+
+ {
+ struct cell_command_release_verts *release
+ = (struct cell_command_release_verts *)
+ cell_batch_alloc(cell, sizeof(struct cell_command_release_verts));
+ release->opcode = CELL_CMD_RELEASE_VERTS;
+ release->vertex_buf = cvbr->vertex_buf;
+ }
+
+ cvbr->vertex_buf = ~0;
+ cell_flush_int(&cell->pipe, 0x0);/*NEW*/
+#endif
+
+ assert(vertices == cvbr->vertex_buffer);
+ cvbr->vertex_buffer = NULL;
+}
+
+
+
+static void
cell_vbuf_set_primitive(struct vbuf_render *vbr, unsigned prim)
{
struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
@@ -124,7 +166,7 @@ cell_vbuf_draw(struct vbuf_render *vbr,
printf("%u %u %u, ", indices[i+0], indices[i+1], indices[i+2]);
}
printf("\n");
-#elif 0
+#elif 01
printf("cell_vbuf_draw() nr_indices = %u nr_verts = %u indexes = [%u %u %u ...]\n",
nr_indices, nr_vertices,
indices[0], indices[1], indices[2]);
@@ -157,28 +199,26 @@ cell_vbuf_draw(struct vbuf_render *vbr,
const uint index_bytes = ROUNDUP4(nr_indices * 2);
const uint vertex_bytes = nr_vertices * 4 * cell->vertex_info.size;
+ const uint batch_size = sizeof(struct cell_command_render)
+ + index_bytes;
+
struct cell_command_render *render
= (struct cell_command_render *)
- cell_batch_alloc(cell, sizeof(*render));
+ cell_batch_alloc(cell, batch_size);
+
render->opcode = CELL_CMD_RENDER;
render->prim_type = cvbr->prim;
render->num_indexes = nr_indices;
- if (ALLOW_INLINE_INDEXES &&
- index_bytes <= cell_batch_free_space(cell)) {
- /* indices inlined, right after render cmd */
- void *dst = cell_batch_alloc(cell, index_bytes);
- memcpy(dst, indices, nr_indices * 2);
- render->inline_indexes = TRUE;
- render->index_data = NULL;
- }
- else {
- /* indices in separate buffer */
- render->inline_indexes = FALSE;
- render->index_data = indices;
- ASSERT_ALIGN16(render->index_data);
- }
+ /* append indices after render command */
+ memcpy(render + 1, indices, nr_indices * 2);
+ render->inline_indexes = TRUE;
+ render->index_data = NULL;
+
+ /* if there's room, append vertices after the indices, else leave
+ * vertices in the original/separate buffer.
+ */
render->vertex_size = 4 * cell->vertex_info.size;
render->num_verts = nr_vertices;
if (ALLOW_INLINE_VERTS &&
@@ -188,12 +228,21 @@ cell_vbuf_draw(struct vbuf_render *vbr,
void *dst = cell_batch_alloc(cell, vertex_bytes);
memcpy(dst, vertices, vertex_bytes);
render->inline_verts = TRUE;
+#if 0
render->vertex_data = NULL;
+#else
+ render->vertex_buf = ~0;
+#endif
}
else {
render->inline_verts = FALSE;
+#if 0
render->vertex_data = vertices;
ASSERT_ALIGN16(render->vertex_data);
+#else
+ ASSERT(cvbr->vertex_buf >= 0);
+ render->vertex_buf = cvbr->vertex_buf;
+#endif
}
@@ -203,7 +252,7 @@ cell_vbuf_draw(struct vbuf_render *vbr,
render->ymax = ymax;
}
-#if 01
+#if 0
/* XXX this is temporary */
cell_flush_int(&cell->pipe, PIPE_FLUSH_WAIT);
#endif
@@ -211,20 +260,6 @@ cell_vbuf_draw(struct vbuf_render *vbr,
static void
-cell_vbuf_release_vertices(struct vbuf_render *vbr, void *vertices,
- unsigned vertex_size, unsigned vertices_used)
-{
- struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
-
- /*printf("Free verts %u * %u\n", vertex_size, vertices_used);*/
- align_free(vertices);
-
- assert(vertices == cvbr->vertex_buffer);
- cvbr->vertex_buffer = NULL;
-}
-
-
-static void
cell_vbuf_destroy(struct vbuf_render *vbr)
{
struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
@@ -244,8 +279,17 @@ cell_init_vbuf(struct cell_context *cell)
cell->vbuf_render = CALLOC_STRUCT(cell_vbuf_render);
+#if 0
cell->vbuf_render->base.max_indices = CELL_MAX_VBUF_INDEXES;
cell->vbuf_render->base.max_vertex_buffer_bytes = CELL_MAX_VBUF_SIZE;
+#else
+ cell->vbuf_render->base.max_indices
+ = (CELL_BUFFER_SIZE
+ - sizeof(struct cell_command_render)
+ - sizeof(struct cell_command_release_verts))
+ / sizeof(ushort);
+ cell->vbuf_render->base.max_vertex_buffer_bytes = CELL_BUFFER_SIZE;
+#endif
cell->vbuf_render->base.get_vertex_info = cell_vbuf_get_vertex_info;
cell->vbuf_render->base.allocate_vertices = cell_vbuf_allocate_vertices;
@@ -255,6 +299,9 @@ cell_init_vbuf(struct cell_context *cell)
cell->vbuf_render->base.destroy = cell_vbuf_destroy;
cell->vbuf_render->cell = cell;
+#if 1
+ cell->vbuf_render->vertex_buf = ~0;
+#endif
cell->vbuf = draw_vbuf_stage(cell->draw, &cell->vbuf_render->base);
}