summaryrefslogtreecommitdiff
path: root/src/mesa/pipe/cell/spu/spu_main.c
diff options
context:
space:
mode:
authorBrian <brian.paul@tungstengraphics.com>2008-01-28 10:41:27 -0700
committerBrian <brian.paul@tungstengraphics.com>2008-01-28 11:31:57 -0700
commit5b5ec94663d566b4840975c4ef4740abb138bb12 (patch)
treea4da3d95390f9e9bd09d28e645119f62cb1a771c /src/mesa/pipe/cell/spu/spu_main.c
parent7024019d4e6e2a1618e910a127bea8c3b7661a54 (diff)
Cell: clean-up of render path
Finally removed a number of unneeded flush commands. Vertex buffers are allocated from the general buffer pool, freed by SPUs when done. Still an occasional failed assertion (invalid batch buffer command)...
Diffstat (limited to 'src/mesa/pipe/cell/spu/spu_main.c')
-rw-r--r--src/mesa/pipe/cell/spu/spu_main.c112
1 files changed, 19 insertions, 93 deletions
diff --git a/src/mesa/pipe/cell/spu/spu_main.c b/src/mesa/pipe/cell/spu/spu_main.c
index eb979718f8..5b50ec6953 100644
--- a/src/mesa/pipe/cell/spu/spu_main.c
+++ b/src/mesa/pipe/cell/spu/spu_main.c
@@ -239,59 +239,45 @@ static void
cmd_render(const struct cell_command_render *render, uint *pos_incr)
{
/* we'll DMA into these buffers */
- ubyte vertex_data[CELL_MAX_VBUF_SIZE] ALIGN16_ATTRIB;
- ushort index_data[CELL_MAX_VBUF_INDEXES] ALIGN16_ATTRIB;
+ ubyte vertex_data[CELL_BUFFER_SIZE] ALIGN16_ATTRIB;
const uint vertex_size = render->vertex_size; /* in bytes */
const uint total_vertex_bytes = render->num_verts * vertex_size;
const ubyte *vertices;
const ushort *indexes;
- uint mask;
uint i, j;
if (Debug) {
printf("SPU %u: RENDER prim %u, num_vert=%u num_ind=%u "
- "inline_vert=%u inline_ind=%u\n",
+ "inline_vert=%u\n",
spu.init.id,
render->prim_type,
render->num_verts,
render->num_indexes,
- render->inline_verts,
- render->inline_indexes);
+ render->inline_verts);
/*
printf(" bound: %g, %g .. %g, %g\n",
render->xmin, render->ymin, render->xmax, render->ymax);
*/
- /*
- printf("SPU %u: indices at %p vertices at %p\n",
- spu.init.id,
- render->index_data, render->vertex_data);
- */
}
ASSERT(sizeof(*render) % 4 == 0);
-#if 0
- ASSERT_ALIGN16(render->vertex_data);
-#else
-#endif
- ASSERT_ALIGN16(render->index_data);
+ ASSERT(total_vertex_bytes % 16 == 0);
+ /* indexes are right after the render command in the batch buffer */
+ indexes = (const ushort *) (render + 1);
+ *pos_incr = (render->num_indexes * 2 + 3) / 4;
- /**
- ** Get vertex, index buffers if not inlined
- **/
- if (!render->inline_verts) {
- void *src;
- ASSERT(total_vertex_bytes % 16 == 0);
-
-#if 0
- src = render->vertex_data;
-#else
- spu.cur_vertex_buf = render->vertex_buf;
- src = spu.init.buffers[render->vertex_buf];
-#endif
+ if (render->inline_verts) {
+ /* Vertices are right after indexes in batch buffer */
+ vertices = (const ubyte *) (render + 1) + *pos_incr * 4;
+ *pos_incr = *pos_incr + total_vertex_bytes / 4;
+ }
+ else {
+ /* Begin DMA fetch of vertex buffer */
+ void *src = spu.init.buffers[render->vertex_buf];
mfc_get(vertex_data, /* dest */
(unsigned int) src,
total_vertex_bytes, /* size */
@@ -300,63 +286,11 @@ cmd_render(const struct cell_command_render *render, uint *pos_incr)
0 /* rid */);
vertices = vertex_data;
- }
-
- if (!render->inline_indexes) {
- uint total_index_bytes;
-
- *pos_incr = 0;
-
- total_index_bytes = render->num_indexes * sizeof(ushort);
- if (total_index_bytes < 16)
- total_index_bytes = 16;
- else
- total_index_bytes = ROUNDUP16(total_index_bytes);
- indexes = index_data;
-
- /* get index data from main memory */
- mfc_get(index_data, /* dest */
- (unsigned int) render->index_data, /* src */
- total_index_bytes,
- TAG_INDEX_BUFFER,
- 0, /* tid */
- 0 /* rid */);
- }
-
-
- /**
- ** Get pointers to inlined indexes, verts, if present
- **/
- if (render->inline_indexes) {
- /* indexes are right after the render command in the batch buffer */
- indexes = (ushort *) (render + 1);
- *pos_incr = (render->num_indexes * 2 + 3) / 4;
-
- if (render->inline_verts) {
- /* vertices are after indexes, if inlined */
- vertices = (const ubyte *) (render + 1) + *pos_incr * 4;
- *pos_incr = *pos_incr + total_vertex_bytes / 4;
- spu.cur_vertex_buf = ~0;
- }
+ wait_on_mask(1 << TAG_VERTEX_BUFFER);
}
- /* wait for vertex and/or index buffers if not inlined */
- mask = 0x0;
- if (!render->inline_verts)
- mask |= (1 << TAG_VERTEX_BUFFER);
- if (!render->inline_indexes)
- mask |= (1 << TAG_INDEX_BUFFER);
- wait_on_mask_all(mask);
-
-#if 0
- if (!render->inline_verts) {
- printf("SPU %u: release vbuf %u\n", spu.init.id, render->vertex_buf);
- release_buffer(render->vertex_buf);
- }
-#endif
-
/**
** find tiles which intersect the prim bounding box
**/
@@ -372,7 +306,7 @@ cmd_render(const struct cell_command_render *render, uint *pos_incr)
#endif
/* make sure any pending clears have completed */
- wait_on_mask(1 << TAG_SURFACE_CLEAR);
+ wait_on_mask(1 << TAG_SURFACE_CLEAR); /* XXX temporary */
/**
@@ -405,14 +339,6 @@ cmd_render(const struct cell_command_render *render, uint *pos_incr)
for (j = 0; j < render->num_indexes; j += 3) {
const float *v0, *v1, *v2;
- if (indexes[j] == 0xffff) {
- printf("index[%u] = 0xffff\n", j);
- }
-
- ASSERT(indexes[j] != 0xffff);
- ASSERT(indexes[j+1] != 0xffff);
- ASSERT(indexes[j+2] != 0xffff);
-
v0 = (const float *) (vertices + indexes[j+0] * vertex_size);
v1 = (const float *) (vertices + indexes[j+1] * vertex_size);
v2 = (const float *) (vertices + indexes[j+2] * vertex_size);
@@ -450,8 +376,8 @@ cmd_release_verts(const struct cell_command_release_verts *release)
{
if (Debug)
printf("SPU %u: RELEASE VERTS %u\n",
- spu.init.id, spu.cur_vertex_buf);
- ASSERT(spu.cur_vertex_buf == release->vertex_buf);
+ spu.init.id, release->vertex_buf);
+ ASSERT(release->vertex_buf != ~0U);
release_buffer(release->vertex_buf);
}