summaryrefslogtreecommitdiff
path: root/src/gallium/drivers/cell/ppu/cell_vbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/cell/ppu/cell_vbuf.c')
-rw-r--r--src/gallium/drivers/cell/ppu/cell_vbuf.c55
1 files changed, 43 insertions, 12 deletions
diff --git a/src/gallium/drivers/cell/ppu/cell_vbuf.c b/src/gallium/drivers/cell/ppu/cell_vbuf.c
index aa63435b93..cfaffb52a8 100644
--- a/src/gallium/drivers/cell/ppu/cell_vbuf.c
+++ b/src/gallium/drivers/cell/ppu/cell_vbuf.c
@@ -38,6 +38,7 @@
#include "cell_batch.h"
#include "cell_context.h"
+#include "cell_fence.h"
#include "cell_flush.h"
#include "cell_spu.h"
#include "cell_vbuf.h"
@@ -61,6 +62,7 @@ struct cell_vbuf_render
uint vertex_size; /**< in bytes */
void *vertex_buffer; /**< just for debug, really */
uint vertex_buf; /**< in [0, CELL_NUM_BUFFERS-1] */
+ uint vertex_buffer_size; /**< size in bytes */
};
@@ -81,24 +83,26 @@ cell_vbuf_get_vertex_info(struct vbuf_render *vbr)
}
-static void *
+static boolean
cell_vbuf_allocate_vertices(struct vbuf_render *vbr,
ushort vertex_size, ushort nr_vertices)
{
struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ unsigned size = vertex_size * nr_vertices;
/*printf("Alloc verts %u * %u\n", vertex_size, nr_vertices);*/
assert(cvbr->vertex_buf == ~0);
cvbr->vertex_buf = cell_get_empty_buffer(cvbr->cell);
cvbr->vertex_buffer = cvbr->cell->buffer[cvbr->vertex_buf];
+ cvbr->vertex_buffer_size = size;
cvbr->vertex_size = vertex_size;
- return cvbr->vertex_buffer;
+
+ return cvbr->vertex_buffer != NULL;
}
static void
-cell_vbuf_release_vertices(struct vbuf_render *vbr, void *vertices,
- unsigned vertex_size, unsigned vertices_used)
+cell_vbuf_release_vertices(struct vbuf_render *vbr)
{
struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
struct cell_context *cell = cvbr->cell;
@@ -108,23 +112,47 @@ cell_vbuf_release_vertices(struct vbuf_render *vbr, void *vertices,
__FUNCTION__, cvbr->vertex_buf, vertices_used);
*/
+ /* Make sure texture buffers aren't released until we're done rendering
+ * with them.
+ */
+ cell_add_fenced_textures(cell);
+
/* Tell SPUs they can release the vert buf */
if (cvbr->vertex_buf != ~0U) {
+ STATIC_ASSERT(sizeof(struct cell_command_release_verts) % 16 == 0);
struct cell_command_release_verts *release
= (struct cell_command_release_verts *)
- cell_batch_alloc(cell, sizeof(struct cell_command_release_verts));
- release->opcode = CELL_CMD_RELEASE_VERTS;
+ cell_batch_alloc16(cell, sizeof(struct cell_command_release_verts));
+ release->opcode[0] = CELL_CMD_RELEASE_VERTS;
release->vertex_buf = cvbr->vertex_buf;
}
cvbr->vertex_buf = ~0;
cell_flush_int(cell, 0x0);
- assert(vertices == cvbr->vertex_buffer);
cvbr->vertex_buffer = NULL;
}
+static void *
+cell_vbuf_map_vertices(struct vbuf_render *vbr)
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ return cvbr->vertex_buffer;
+}
+
+
+static void
+cell_vbuf_unmap_vertices(struct vbuf_render *vbr,
+ ushort min_index,
+ ushort max_index )
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ assert( cvbr->vertex_buffer_size >= (max_index+1) * cvbr->vertex_size );
+ /* do nothing */
+}
+
+
static boolean
cell_vbuf_set_primitive(struct vbuf_render *vbr, unsigned prim)
@@ -204,15 +232,16 @@ cell_vbuf_draw(struct vbuf_render *vbr,
/* build/insert batch RENDER command */
{
- const uint index_bytes = ROUNDUP8(nr_indices * 2);
- const uint vertex_bytes = nr_vertices * 4 * cell->vertex_info.size;
+ const uint index_bytes = ROUNDUP16(nr_indices * 2);
+ const uint vertex_bytes = ROUNDUP16(nr_vertices * 4 * cell->vertex_info.size);
+ STATIC_ASSERT(sizeof(struct cell_command_render) % 16 == 0);
const uint batch_size = sizeof(struct cell_command_render) + index_bytes;
struct cell_command_render *render
= (struct cell_command_render *)
- cell_batch_alloc(cell, batch_size);
+ cell_batch_alloc16(cell, batch_size);
- render->opcode = CELL_CMD_RENDER;
+ render->opcode[0] = CELL_CMD_RENDER;
render->prim_type = cvbr->prim;
render->num_indexes = nr_indices;
@@ -230,7 +259,7 @@ cell_vbuf_draw(struct vbuf_render *vbr,
min_index == 0 &&
vertex_bytes + 16 <= cell_batch_free_space(cell)) {
/* vertex data inlined, after indices, at 16-byte boundary */
- void *dst = cell_batch_alloc_aligned(cell, vertex_bytes, 16);
+ void *dst = cell_batch_alloc16(cell, vertex_bytes);
memcpy(dst, vertices, vertex_bytes);
render->inline_verts = TRUE;
render->vertex_buf = ~0;
@@ -287,6 +316,8 @@ cell_init_vbuf(struct cell_context *cell)
cell->vbuf_render->base.get_vertex_info = cell_vbuf_get_vertex_info;
cell->vbuf_render->base.allocate_vertices = cell_vbuf_allocate_vertices;
+ cell->vbuf_render->base.map_vertices = cell_vbuf_map_vertices;
+ cell->vbuf_render->base.unmap_vertices = cell_vbuf_unmap_vertices;
cell->vbuf_render->base.set_primitive = cell_vbuf_set_primitive;
cell->vbuf_render->base.draw = cell_vbuf_draw;
cell->vbuf_render->base.release_vertices = cell_vbuf_release_vertices;