summaryrefslogtreecommitdiff
path: root/src/mesa/pipe/cell/ppu
diff options
context:
space:
mode:
authorBrian <brian.paul@tungstengraphics.com>2008-01-28 10:41:27 -0700
committerBrian <brian.paul@tungstengraphics.com>2008-01-28 11:31:57 -0700
commit5b5ec94663d566b4840975c4ef4740abb138bb12 (patch)
treea4da3d95390f9e9bd09d28e645119f62cb1a771c /src/mesa/pipe/cell/ppu
parent7024019d4e6e2a1618e910a127bea8c3b7661a54 (diff)
Cell: clean-up of render path
Finally removed a number of unneeded flush commands. Vertex buffers are allocated from the general buffer pool, freed by SPUs when done. Still an occasional failed assertion (invalid batch buffer command)...
Diffstat (limited to 'src/mesa/pipe/cell/ppu')
-rw-r--r--src/mesa/pipe/cell/ppu/cell_vbuf.c60
1 files changed, 18 insertions, 42 deletions
diff --git a/src/mesa/pipe/cell/ppu/cell_vbuf.c b/src/mesa/pipe/cell/ppu/cell_vbuf.c
index 6e12e16fe0..b2a25d767b 100644
--- a/src/mesa/pipe/cell/ppu/cell_vbuf.c
+++ b/src/mesa/pipe/cell/ppu/cell_vbuf.c
@@ -39,9 +39,8 @@
#include "pipe/draw/draw_vbuf.h"
-/** Allow prim indexes, verts to be inlined after RENDER command */
-#define ALLOW_INLINE_INDEXES 01
-#define ALLOW_INLINE_VERTS 0
+/** Allow vertex data to be inlined after RENDER command */
+#define ALLOW_INLINE_VERTS 1
/**
@@ -52,12 +51,10 @@ struct cell_vbuf_render
{
struct vbuf_render base;
struct cell_context *cell;
- uint prim;
- uint vertex_size;
- void *vertex_buffer;
-#if 1
- uint vertex_buf;
-#endif
+ uint prim; /**< PIPE_PRIM_x */
+ uint vertex_size; /**< in bytes */
+ void *vertex_buffer; /**< just for debug, really */
+ uint vertex_buf; /**< in [0, CELL_NUM_BUFFERS-1] */
};
@@ -84,15 +81,10 @@ cell_vbuf_allocate_vertices(struct vbuf_render *vbr,
{
struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
/*printf("Alloc verts %u * %u\n", vertex_size, nr_vertices);*/
-#if 0
- assert(!cvbr->vertex_buffer);
- cvbr->vertex_buffer = align_malloc(vertex_size * nr_vertices, 16);
-#else
+
assert(cvbr->vertex_buf == ~0);
cvbr->vertex_buf = cell_get_empty_buffer(cvbr->cell);
cvbr->vertex_buffer = cvbr->cell->buffer[cvbr->vertex_buf];
- printf("%s vertex_buf = %u\n", __FUNCTION__, cvbr->vertex_buf);
-#endif
cvbr->vertex_size = vertex_size;
return cvbr->vertex_buffer;
}
@@ -105,14 +97,13 @@ cell_vbuf_release_vertices(struct vbuf_render *vbr, void *vertices,
struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
struct cell_context *cell = cvbr->cell;
- /*printf("Free verts %u * %u\n", vertex_size, vertices_used);*/
-#if 0
- align_free(vertices);
-#else
+ /*
printf("%s vertex_buf = %u count = %u\n",
__FUNCTION__, cvbr->vertex_buf, vertices_used);
+ */
- {
+ /* Tell SPUs they can release the vert buf */
+ if (cvbr->vertex_buf != ~0U) {
struct cell_command_release_verts *release
= (struct cell_command_release_verts *)
cell_batch_alloc(cell, sizeof(struct cell_command_release_verts));
@@ -121,8 +112,7 @@ cell_vbuf_release_vertices(struct vbuf_render *vbr, void *vertices,
}
cvbr->vertex_buf = ~0;
- cell_flush_int(&cell->pipe, 0x0);/*NEW*/
-#endif
+ cell_flush_int(&cell->pipe, 0x0);
assert(vertices == cvbr->vertex_buffer);
cvbr->vertex_buffer = NULL;
@@ -166,7 +156,7 @@ cell_vbuf_draw(struct vbuf_render *vbr,
printf("%u %u %u, ", indices[i+0], indices[i+1], indices[i+2]);
}
printf("\n");
-#elif 01
+#elif 0
printf("cell_vbuf_draw() nr_indices = %u nr_verts = %u indexes = [%u %u %u ...]\n",
nr_indices, nr_vertices,
indices[0], indices[1], indices[2]);
@@ -213,8 +203,6 @@ cell_vbuf_draw(struct vbuf_render *vbr,
/* append indices after render command */
memcpy(render + 1, indices, nr_indices * 2);
- render->inline_indexes = TRUE;
- render->index_data = NULL;
/* if there's room, append vertices after the indices, else leave
* vertices in the original/separate buffer.
@@ -222,30 +210,20 @@ cell_vbuf_draw(struct vbuf_render *vbr,
render->vertex_size = 4 * cell->vertex_info.size;
render->num_verts = nr_vertices;
if (ALLOW_INLINE_VERTS &&
- render->inline_indexes &&
vertex_bytes <= cell_batch_free_space(cell)) {
/* vertex data inlined, after indices */
void *dst = cell_batch_alloc(cell, vertex_bytes);
memcpy(dst, vertices, vertex_bytes);
render->inline_verts = TRUE;
-#if 0
- render->vertex_data = NULL;
-#else
render->vertex_buf = ~0;
-#endif
}
else {
+ /* vertex data in separate buffer */
render->inline_verts = FALSE;
-#if 0
- render->vertex_data = vertices;
- ASSERT_ALIGN16(render->vertex_data);
-#else
ASSERT(cvbr->vertex_buf >= 0);
render->vertex_buf = cvbr->vertex_buf;
-#endif
}
-
render->xmin = xmin;
render->ymin = ymin;
render->xmax = xmax;
@@ -253,7 +231,7 @@ cell_vbuf_draw(struct vbuf_render *vbr,
}
#if 0
- /* XXX this is temporary */
+ /* helpful for debug */
cell_flush_int(&cell->pipe, PIPE_FLUSH_WAIT);
#endif
}
@@ -279,17 +257,15 @@ cell_init_vbuf(struct cell_context *cell)
cell->vbuf_render = CALLOC_STRUCT(cell_vbuf_render);
-#if 0
- cell->vbuf_render->base.max_indices = CELL_MAX_VBUF_INDEXES;
- cell->vbuf_render->base.max_vertex_buffer_bytes = CELL_MAX_VBUF_SIZE;
-#else
+ /* The max number of indexes is what can fix into a batch buffer,
+ * minus the render and release-verts commands.
+ */
cell->vbuf_render->base.max_indices
= (CELL_BUFFER_SIZE
- sizeof(struct cell_command_render)
- sizeof(struct cell_command_release_verts))
/ sizeof(ushort);
cell->vbuf_render->base.max_vertex_buffer_bytes = CELL_BUFFER_SIZE;
-#endif
cell->vbuf_render->base.get_vertex_info = cell_vbuf_get_vertex_info;
cell->vbuf_render->base.allocate_vertices = cell_vbuf_allocate_vertices;