summaryrefslogtreecommitdiff
path: root/src/gallium/drivers/r300/r300_render.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/r300/r300_render.c')
-rw-r--r--src/gallium/drivers/r300/r300_render.c494
1 files changed, 369 insertions, 125 deletions
diff --git a/src/gallium/drivers/r300/r300_render.c b/src/gallium/drivers/r300/r300_render.c
index 6c891029a5..23b61df89c 100644
--- a/src/gallium/drivers/r300/r300_render.c
+++ b/src/gallium/drivers/r300/r300_render.c
@@ -30,22 +30,21 @@
#include "util/u_format.h"
#include "util/u_memory.h"
+#include "util/u_upload_mgr.h"
#include "util/u_prim.h"
#include "r300_cs.h"
#include "r300_context.h"
+#include "r300_screen_buffer.h"
#include "r300_emit.h"
#include "r300_reg.h"
#include "r300_render.h"
#include "r300_state_derived.h"
-/* r300_render: Vertex and index buffer primitive emission. */
-#define R300_MAX_VBO_SIZE (1024 * 1024)
-
/* XXX The DRM rejects VAP_ALT_NUM_VERTICES.. */
//#define ENABLE_ALT_NUM_VERTS
-uint32_t r300_translate_primitive(unsigned prim)
+static uint32_t r300_translate_primitive(unsigned prim)
{
switch (prim) {
case PIPE_PRIM_POINTS:
@@ -123,7 +122,7 @@ static uint32_t r300_provoking_vertex_fixes(struct r300_context *r300,
static boolean r300_reserve_cs_space(struct r300_context *r300,
unsigned dwords)
{
- if (!r300->winsys->check_cs(r300->winsys, dwords)) {
+ if (!r300->rws->check_cs(r300->rws, dwords)) {
r300->context.flush(&r300->context, 0, NULL);
return TRUE;
}
@@ -131,19 +130,53 @@ static boolean r300_reserve_cs_space(struct r300_context *r300,
}
static boolean immd_is_good_idea(struct r300_context *r300,
- unsigned count)
+ unsigned count)
{
- return count <= 4;
+ struct pipe_vertex_element* velem;
+ struct pipe_vertex_buffer* vbuf;
+ boolean checked[PIPE_MAX_ATTRIBS] = {0};
+ unsigned vertex_element_count = r300->velems->count;
+ unsigned i, vbi;
+
+ if (count > 10 || DBG_ON(r300, DBG_NO_IMMD)) {
+ return FALSE;
+ }
+
+ /* We shouldn't map buffers referenced by CS, busy buffers,
+ * and ones placed in VRAM. */
+ /* XXX Check for VRAM buffers. */
+ for (i = 0; i < vertex_element_count; i++) {
+ velem = &r300->velems->velem[i];
+ vbi = velem->vertex_buffer_index;
+
+ if (!checked[vbi]) {
+ vbuf = &r300->vertex_buffer[vbi];
+
+ if (r300_buffer_is_referenced(&r300->context,
+ vbuf->buffer,
+ R300_REF_CS | R300_REF_HW)) {
+ /* It's a very bad idea to map it... */
+ return FALSE;
+ }
+ checked[vbi] = TRUE;
+ }
+ }
+ return TRUE;
}
-static void r300_emit_draw_arrays_immediate(struct r300_context *r300,
- unsigned mode,
- unsigned start,
- unsigned count)
+/*****************************************************************************
+ * The emission of draw packets for r500. Older GPUs may use these functions *
+ * after resolving fallback issues (e.g. stencil ref two-sided). *
+ ****************************************************************************/
+
+void r500_emit_draw_arrays_immediate(struct r300_context *r300,
+ unsigned mode,
+ unsigned start,
+ unsigned count)
{
struct pipe_vertex_element* velem;
struct pipe_vertex_buffer* vbuf;
- unsigned vertex_element_count = r300->vertex_element_count;
+ unsigned vertex_element_count = r300->velems->count;
unsigned i, v, vbi, dw, elem_offset, dwords;
/* Size of the vertex, in dwords. */
@@ -161,12 +194,13 @@ static void r300_emit_draw_arrays_immediate(struct r300_context *r300,
/* Mapped vertex buffers. */
uint32_t* map[PIPE_MAX_ATTRIBS] = {0};
+ struct pipe_transfer* transfer[PIPE_MAX_ATTRIBS] = {NULL};
CS_LOCALS(r300);
/* Calculate the vertex size, offsets, strides etc. and map the buffers. */
for (i = 0; i < vertex_element_count; i++) {
- velem = &r300->vertex_element[i];
+ velem = &r300->velems->velem[i];
offset[i] = velem->src_offset / 4;
size[i] = util_format_get_blocksize(velem->src_format) / 4;
vertex_size += size[i];
@@ -175,26 +209,28 @@ static void r300_emit_draw_arrays_immediate(struct r300_context *r300,
/* Map the buffer. */
if (!map[vbi]) {
vbuf = &r300->vertex_buffer[vbi];
- map[vbi] = (uint32_t*)pipe_buffer_map(r300->context.screen,
+ map[vbi] = (uint32_t*)pipe_buffer_map(&r300->context,
vbuf->buffer,
- PIPE_BUFFER_USAGE_CPU_READ);
+ PIPE_TRANSFER_READ,
+ &transfer[vbi]);
map[vbi] += vbuf->buffer_offset / 4;
stride[vbi] = vbuf->stride / 4;
}
}
- dwords = 10 + count * vertex_size;
+ dwords = 9 + count * vertex_size;
r300_reserve_cs_space(r300, r300_get_num_dirty_dwords(r300) + dwords);
- r300_emit_buffer_validate(r300, FALSE, 0);
+ r300_emit_buffer_validate(r300, FALSE, NULL);
r300_emit_dirty_state(r300);
BEGIN_CS(dwords);
OUT_CS_REG(R300_GA_COLOR_CONTROL,
r300_provoking_vertex_fixes(r300, mode));
OUT_CS_REG(R300_VAP_VTX_SIZE, vertex_size);
- OUT_CS_REG(R300_VAP_VF_MIN_VTX_INDX, 0);
- OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, count - 1);
+ OUT_CS_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2);
+ OUT_CS(count - 1);
+ OUT_CS(0);
OUT_CS_PKT3(R300_PACKET3_3D_DRAW_IMMD_2, count * vertex_size);
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED | (count << 16) |
r300_translate_primitive(mode));
@@ -202,7 +238,7 @@ static void r300_emit_draw_arrays_immediate(struct r300_context *r300,
/* Emit vertices. */
for (v = 0; v < count; v++) {
for (i = 0; i < vertex_element_count; i++) {
- velem = &r300->vertex_element[i];
+ velem = &r300->velems->velem[i];
vbi = velem->vertex_buffer_index;
elem_offset = offset[i] + stride[vbi] * (v + start);
@@ -215,19 +251,19 @@ static void r300_emit_draw_arrays_immediate(struct r300_context *r300,
/* Unmap buffers. */
for (i = 0; i < vertex_element_count; i++) {
- vbi = r300->vertex_element[i].vertex_buffer_index;
+ vbi = r300->velems->velem[i].vertex_buffer_index;
if (map[vbi]) {
vbuf = &r300->vertex_buffer[vbi];
- pipe_buffer_unmap(r300->context.screen, vbuf->buffer);
+ pipe_buffer_unmap(&r300->context, vbuf->buffer, transfer[vbi]);
map[vbi] = NULL;
}
}
}
-static void r300_emit_draw_arrays(struct r300_context *r300,
- unsigned mode,
- unsigned count)
+void r500_emit_draw_arrays(struct r300_context *r300,
+ unsigned mode,
+ unsigned count)
{
#if defined(ENABLE_ALT_NUM_VERTS)
boolean alt_num_verts = count > 65535;
@@ -237,16 +273,21 @@ static void r300_emit_draw_arrays(struct r300_context *r300,
CS_LOCALS(r300);
if (alt_num_verts) {
- assert(count < (1 << 24));
- BEGIN_CS(10);
+ if (count >= (1 << 24)) {
+ fprintf(stderr, "r300: Got a huge number of vertices: %i, "
+ "refusing to render.\n", count);
+ return;
+ }
+ BEGIN_CS(9);
OUT_CS_REG(R500_VAP_ALT_NUM_VERTICES, count);
} else {
- BEGIN_CS(8);
+ BEGIN_CS(7);
}
OUT_CS_REG(R300_GA_COLOR_CONTROL,
r300_provoking_vertex_fixes(r300, mode));
- OUT_CS_REG(R300_VAP_VF_MIN_VTX_INDX, 0);
- OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, count - 1);
+ OUT_CS_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2);
+ OUT_CS(count - 1);
+ OUT_CS(0);
OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) |
r300_translate_primitive(mode) |
@@ -254,14 +295,15 @@ static void r300_emit_draw_arrays(struct r300_context *r300,
END_CS;
}
-static void r300_emit_draw_elements(struct r300_context *r300,
- struct pipe_buffer* indexBuffer,
- unsigned indexSize,
- unsigned minIndex,
- unsigned maxIndex,
- unsigned mode,
- unsigned start,
- unsigned count)
+void r500_emit_draw_elements(struct r300_context *r300,
+ struct pipe_resource* indexBuffer,
+ unsigned indexSize,
+ int indexBias,
+ unsigned minIndex,
+ unsigned maxIndex,
+ unsigned mode,
+ unsigned start,
+ unsigned count)
{
uint32_t count_dwords;
uint32_t offset_dwords = indexSize * start / sizeof(uint32_t);
@@ -272,24 +314,30 @@ static void r300_emit_draw_elements(struct r300_context *r300,
#endif
CS_LOCALS(r300);
- assert((start * indexSize) % 4 == 0);
- assert(count < (1 << 24));
+ if (count >= (1 << 24)) {
+ fprintf(stderr, "r300: Got a huge number of vertices: %i, "
+ "refusing to render.\n", count);
+ return;
+ }
+
+ assert(indexBias == 0);
+
+ maxIndex = MIN2(maxIndex, r300->vertex_buffer_max_index);
DBG(r300, DBG_DRAW, "r300: Indexbuf of %u indices, min %u max %u\n",
count, minIndex, maxIndex);
- maxIndex = MIN2(maxIndex, ((1 << 24) - 1));
-
if (alt_num_verts) {
- BEGIN_CS(16);
+ BEGIN_CS(15);
OUT_CS_REG(R500_VAP_ALT_NUM_VERTICES, count);
} else {
- BEGIN_CS(14);
+ BEGIN_CS(13);
}
OUT_CS_REG(R300_GA_COLOR_CONTROL,
r300_provoking_vertex_fixes(r300, mode));
- OUT_CS_REG(R300_VAP_VF_MIN_VTX_INDX, minIndex);
- OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, maxIndex);
+ OUT_CS_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2);
+ OUT_CS(maxIndex);
+ OUT_CS(minIndex);
OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 0);
if (indexSize == 4) {
count_dwords = count;
@@ -313,30 +361,132 @@ static void r300_emit_draw_elements(struct r300_context *r300,
OUT_CS(R300_INDX_BUFFER_ONE_REG_WR | (R300_VAP_PORT_IDX0 >> 2) |
(0 << R300_INDX_BUFFER_SKIP_SHIFT));
OUT_CS(offset_dwords << 2);
- OUT_CS_RELOC(indexBuffer, count_dwords,
- RADEON_GEM_DOMAIN_GTT, 0, 0);
+ OUT_CS_BUF_RELOC(indexBuffer, count_dwords,
+ RADEON_GEM_DOMAIN_GTT, 0, 0);
END_CS;
}
+/*****************************************************************************
+ * The emission of draw packets for r300 which take care of the two-sided *
+ * stencil ref fallback and call r500's functions. *
+ ****************************************************************************/
+
+/* Set drawing for front faces. */
+static void r300_begin_stencil_ref_fallback(struct r300_context *r300)
+{
+ struct r300_rs_state *rs = (struct r300_rs_state*)r300->rs_state.state;
+ CS_LOCALS(r300);
+
+ BEGIN_CS(2);
+ OUT_CS_REG(R300_SU_CULL_MODE, rs->cull_mode | R300_CULL_BACK);
+ END_CS;
+}
+
+/* Set drawing for back faces. */
+static void r300_switch_stencil_ref_side(struct r300_context *r300)
+{
+ struct r300_rs_state *rs = (struct r300_rs_state*)r300->rs_state.state;
+ struct r300_dsa_state *dsa = (struct r300_dsa_state*)r300->dsa_state.state;
+ CS_LOCALS(r300);
+
+ BEGIN_CS(4);
+ OUT_CS_REG(R300_SU_CULL_MODE, rs->cull_mode | R300_CULL_FRONT);
+ OUT_CS_REG(R300_ZB_STENCILREFMASK,
+ dsa->stencil_ref_bf | r300->stencil_ref.ref_value[1]);
+ END_CS;
+}
+
+/* Restore the original state. */
+static void r300_end_stencil_ref_fallback(struct r300_context *r300)
+{
+ struct r300_rs_state *rs = (struct r300_rs_state*)r300->rs_state.state;
+ struct r300_dsa_state *dsa = (struct r300_dsa_state*)r300->dsa_state.state;
+ CS_LOCALS(r300);
+
+ BEGIN_CS(4);
+ OUT_CS_REG(R300_SU_CULL_MODE, rs->cull_mode);
+ OUT_CS_REG(R300_ZB_STENCILREFMASK,
+ dsa->stencil_ref_mask | r300->stencil_ref.ref_value[0]);
+ END_CS;
+}
+
+void r300_emit_draw_arrays_immediate(struct r300_context *r300,
+ unsigned mode,
+ unsigned start,
+ unsigned count)
+{
+ if (!r300->stencil_ref_bf_fallback) {
+ r500_emit_draw_arrays_immediate(r300, mode, start, count);
+ } else {
+ r300_begin_stencil_ref_fallback(r300);
+ r500_emit_draw_arrays_immediate(r300, mode, start, count);
+ r300_switch_stencil_ref_side(r300);
+ r500_emit_draw_arrays_immediate(r300, mode, start, count);
+ r300_end_stencil_ref_fallback(r300);
+ }
+}
+
+void r300_emit_draw_arrays(struct r300_context *r300,
+ unsigned mode,
+ unsigned count)
+{
+ if (!r300->stencil_ref_bf_fallback) {
+ r500_emit_draw_arrays(r300, mode, count);
+ } else {
+ r300_begin_stencil_ref_fallback(r300);
+ r500_emit_draw_arrays(r300, mode, count);
+ r300_switch_stencil_ref_side(r300);
+ r500_emit_draw_arrays(r300, mode, count);
+ r300_end_stencil_ref_fallback(r300);
+ }
+}
+
+void r300_emit_draw_elements(struct r300_context *r300,
+ struct pipe_resource* indexBuffer,
+ unsigned indexSize,
+ int indexBias,
+ unsigned minIndex,
+ unsigned maxIndex,
+ unsigned mode,
+ unsigned start,
+ unsigned count)
+{
+ if (!r300->stencil_ref_bf_fallback) {
+ r500_emit_draw_elements(r300, indexBuffer, indexSize, indexBias,
+ minIndex, maxIndex, mode, start, count);
+ } else {
+ r300_begin_stencil_ref_fallback(r300);
+ r500_emit_draw_elements(r300, indexBuffer, indexSize, indexBias,
+ minIndex, maxIndex, mode, start, count);
+ r300_switch_stencil_ref_side(r300);
+ r500_emit_draw_elements(r300, indexBuffer, indexSize, indexBias,
+ minIndex, maxIndex, mode, start, count);
+ r300_end_stencil_ref_fallback(r300);
+ }
+}
+
static void r300_shorten_ubyte_elts(struct r300_context* r300,
- struct pipe_buffer** elts,
+ struct pipe_resource** elts,
+ unsigned start,
unsigned count)
{
+ struct pipe_context* context = &r300->context;
struct pipe_screen* screen = r300->context.screen;
- struct pipe_buffer* new_elts;
+ struct pipe_resource* new_elts;
unsigned char *in_map;
unsigned short *out_map;
+ struct pipe_transfer *src_transfer, *dst_transfer;
unsigned i;
- new_elts = screen->buffer_create(screen, 32,
- PIPE_BUFFER_USAGE_INDEX |
- PIPE_BUFFER_USAGE_CPU_WRITE |
- PIPE_BUFFER_USAGE_GPU_READ,
- 2 * count);
+ new_elts = pipe_buffer_create(screen,
+ PIPE_BIND_INDEX_BUFFER,
+ 2 * count);
+
+ in_map = pipe_buffer_map(context, *elts, PIPE_TRANSFER_READ, &src_transfer);
+ out_map = pipe_buffer_map(context, new_elts, PIPE_TRANSFER_WRITE, &dst_transfer);
- in_map = pipe_buffer_map(screen, *elts, PIPE_BUFFER_USAGE_CPU_READ);
- out_map = pipe_buffer_map(screen, new_elts, PIPE_BUFFER_USAGE_CPU_WRITE);
+ in_map += start;
for (i = 0; i < count; i++) {
*out_map = (unsigned short)*in_map;
@@ -344,16 +494,45 @@ static void r300_shorten_ubyte_elts(struct r300_context* r300,
out_map++;
}
- pipe_buffer_unmap(screen, *elts);
- pipe_buffer_unmap(screen, new_elts);
+ pipe_buffer_unmap(context, *elts, src_transfer);
+ pipe_buffer_unmap(context, new_elts, dst_transfer);
+
+ *elts = new_elts;
+}
+
+static void r300_align_ushort_elts(struct r300_context *r300,
+ struct pipe_resource **elts,
+ unsigned start, unsigned count)
+{
+ struct pipe_context* context = &r300->context;
+ struct pipe_transfer *in_transfer = NULL;
+ struct pipe_transfer *out_transfer = NULL;
+ struct pipe_resource* new_elts;
+ unsigned short *in_map;
+ unsigned short *out_map;
+
+ new_elts = pipe_buffer_create(context->screen,
+ PIPE_BIND_INDEX_BUFFER,
+ 2 * count);
+
+ in_map = pipe_buffer_map(context, *elts,
+ PIPE_TRANSFER_READ, &in_transfer);
+ out_map = pipe_buffer_map(context, new_elts,
+ PIPE_TRANSFER_WRITE, &out_transfer);
+
+ memcpy(out_map, in_map+start, 2 * count);
+
+ pipe_buffer_unmap(context, *elts, in_transfer);
+ pipe_buffer_unmap(context, new_elts, out_transfer);
*elts = new_elts;
}
/* This is the fast-path drawing & emission for HW TCL. */
void r300_draw_range_elements(struct pipe_context* pipe,
- struct pipe_buffer* indexBuffer,
+ struct pipe_resource* indexBuffer,
unsigned indexSize,
+ int indexBias,
unsigned minIndex,
unsigned maxIndex,
unsigned mode,
@@ -361,40 +540,53 @@ void r300_draw_range_elements(struct pipe_context* pipe,
unsigned count)
{
struct r300_context* r300 = r300_context(pipe);
- struct pipe_buffer* orgIndexBuffer = indexBuffer;
+ struct pipe_resource* orgIndexBuffer = indexBuffer;
#if defined(ENABLE_ALT_NUM_VERTS)
- boolean alt_num_verts = r300_screen(pipe->screen)->caps->is_r500 &&
+ boolean alt_num_verts = r300->screen->caps.is_r500 &&
count > 65536;
#else
boolean alt_num_verts = FALSE;
#endif
unsigned short_count;
+ if (r300->skip_rendering) {
+ return;
+ }
+
if (!u_trim_pipe_prim(mode, &count)) {
return;
}
if (indexSize == 1) {
- r300_shorten_ubyte_elts(r300, &indexBuffer, count);
+ r300_shorten_ubyte_elts(r300, &indexBuffer, start, count);
indexSize = 2;
+ start = 0;
+ } else if (indexSize == 2 && start % 2 != 0) {
+ r300_align_ushort_elts(r300, &indexBuffer, start, count);
+ start = 0;
}
r300_update_derived_state(r300);
+ r300_upload_index_buffer(r300, &indexBuffer, indexSize, start, count);
+
/* 128 dwords for emit_aos and emit_draw_elements */
r300_reserve_cs_space(r300, r300_get_num_dirty_dwords(r300) + 128);
r300_emit_buffer_validate(r300, TRUE, indexBuffer);
r300_emit_dirty_state(r300);
r300_emit_aos(r300, 0);
+ u_upload_flush(r300->upload_vb);
+ u_upload_flush(r300->upload_ib);
if (alt_num_verts || count <= 65535) {
- r300_emit_draw_elements(r300, indexBuffer, indexSize, minIndex,
- maxIndex, mode, start, count);
+ r300->emit_draw_elements(r300, indexBuffer, indexSize, indexBias,
+ minIndex, maxIndex, mode, start, count);
} else {
do {
short_count = MIN2(count, 65534);
- r300_emit_draw_elements(r300, indexBuffer, indexSize, minIndex,
- maxIndex, mode, start, short_count);
+ r300->emit_draw_elements(r300, indexBuffer, indexSize, indexBias,
+ minIndex, maxIndex,
+ mode, start, short_count);
start += short_count;
count -= short_count;
@@ -409,20 +601,20 @@ void r300_draw_range_elements(struct pipe_context* pipe,
}
if (indexBuffer != orgIndexBuffer) {
- pipe->screen->buffer_destroy(indexBuffer);
+ pipe_resource_reference( &indexBuffer, NULL );
}
}
/* Simple helpers for context setup. Should probably be moved to util. */
void r300_draw_elements(struct pipe_context* pipe,
- struct pipe_buffer* indexBuffer,
- unsigned indexSize, unsigned mode,
+ struct pipe_resource* indexBuffer,
+ unsigned indexSize, int indexBias, unsigned mode,
unsigned start, unsigned count)
{
struct r300_context *r300 = r300_context(pipe);
- pipe->draw_range_elements(pipe, indexBuffer, indexSize, 0,
- r300->vertex_buffer_max_index,
+ pipe->draw_range_elements(pipe, indexBuffer, indexSize, indexBias,
+ 0, r300->vertex_buffer_max_index,
mode, start, count);
}
@@ -431,13 +623,17 @@ void r300_draw_arrays(struct pipe_context* pipe, unsigned mode,
{
struct r300_context* r300 = r300_context(pipe);
#if defined(ENABLE_ALT_NUM_VERTS)
- boolean alt_num_verts = r300_screen(pipe->screen)->caps->is_r500 &&
+ boolean alt_num_verts = r300->screen->caps.is_r500 &&
count > 65536;
#else
boolean alt_num_verts = FALSE;
#endif
unsigned short_count;
+ if (r300->skip_rendering) {
+ return;
+ }
+
if (!u_trim_pipe_prim(mode, &count)) {
return;
}
@@ -445,22 +641,22 @@ void r300_draw_arrays(struct pipe_context* pipe, unsigned mode,
r300_update_derived_state(r300);
if (immd_is_good_idea(r300, count)) {
- r300_emit_draw_arrays_immediate(r300, mode, start, count);
+ r300->emit_draw_arrays_immediate(r300, mode, start, count);
} else {
/* Make sure there are at least 128 spare dwords in the command buffer.
* (most of it being consumed by emit_aos) */
r300_reserve_cs_space(r300, r300_get_num_dirty_dwords(r300) + 128);
- r300_emit_buffer_validate(r300, TRUE, 0);
+ r300_emit_buffer_validate(r300, TRUE, NULL);
r300_emit_dirty_state(r300);
if (alt_num_verts || count <= 65535) {
r300_emit_aos(r300, start);
- r300_emit_draw_arrays(r300, mode, count);
+ r300->emit_draw_arrays(r300, mode, count);
} else {
do {
short_count = MIN2(count, 65535);
r300_emit_aos(r300, start);
- r300_emit_draw_arrays(r300, mode, short_count);
+ r300->emit_draw_arrays(r300, mode, short_count);
start += short_count;
count -= short_count;
@@ -468,11 +664,12 @@ void r300_draw_arrays(struct pipe_context* pipe, unsigned mode,
/* Again, we emit both AOS and draw_arrays so there should be
* at least 128 spare dwords. */
if (count && r300_reserve_cs_space(r300, 128)) {
- r300_emit_buffer_validate(r300, TRUE, 0);
+ r300_emit_buffer_validate(r300, TRUE, NULL);
r300_emit_dirty_state(r300);
}
} while (count);
}
+ u_upload_flush(r300->upload_vb);
}
}
@@ -488,40 +685,41 @@ void r300_swtcl_draw_arrays(struct pipe_context* pipe,
unsigned count)
{
struct r300_context* r300 = r300_context(pipe);
+ struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
int i;
+ if (r300->skip_rendering) {
+ return;
+ }
+
if (!u_trim_pipe_prim(mode, &count)) {
return;
}
for (i = 0; i < r300->vertex_buffer_count; i++) {
- void* buf = pipe_buffer_map(pipe->screen,
+ void* buf = pipe_buffer_map(pipe,
r300->vertex_buffer[i].buffer,
- PIPE_BUFFER_USAGE_CPU_READ);
+ PIPE_TRANSFER_READ,
+ &vb_transfer[i]);
draw_set_mapped_vertex_buffer(r300->draw, i, buf);
}
- draw_set_mapped_element_buffer(r300->draw, 0, NULL);
-
- draw_set_mapped_constant_buffer(r300->draw,
- PIPE_SHADER_VERTEX,
- 0,
- r300->shader_constants[PIPE_SHADER_VERTEX].constants,
- r300->shader_constants[PIPE_SHADER_VERTEX].count *
- (sizeof(float) * 4));
+ draw_set_mapped_element_buffer(r300->draw, 0, 0, NULL);
draw_arrays(r300->draw, mode, start, count);
for (i = 0; i < r300->vertex_buffer_count; i++) {
- pipe_buffer_unmap(pipe->screen, r300->vertex_buffer[i].buffer);
+ pipe_buffer_unmap(pipe, r300->vertex_buffer[i].buffer,
+ vb_transfer[i]);
draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
}
}
/* SW TCL elements, using Draw. */
void r300_swtcl_draw_range_elements(struct pipe_context* pipe,
- struct pipe_buffer* indexBuffer,
+ struct pipe_resource* indexBuffer,
unsigned indexSize,
+ int indexBias,
unsigned minIndex,
unsigned maxIndex,
unsigned mode,
@@ -529,42 +727,45 @@ void r300_swtcl_draw_range_elements(struct pipe_context* pipe,
unsigned count)
{
struct r300_context* r300 = r300_context(pipe);
+ struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
+ struct pipe_transfer *ib_transfer;
int i;
void* indices;
+ if (r300->skip_rendering) {
+ return;
+ }
+
if (!u_trim_pipe_prim(mode, &count)) {
return;
}
for (i = 0; i < r300->vertex_buffer_count; i++) {
- void* buf = pipe_buffer_map(pipe->screen,
+ void* buf = pipe_buffer_map(pipe,
r300->vertex_buffer[i].buffer,
- PIPE_BUFFER_USAGE_CPU_READ);
+ PIPE_TRANSFER_READ,
+ &vb_transfer[i]);
draw_set_mapped_vertex_buffer(r300->draw, i, buf);
}
- indices = pipe_buffer_map(pipe->screen, indexBuffer,
- PIPE_BUFFER_USAGE_CPU_READ);
- draw_set_mapped_element_buffer_range(r300->draw, indexSize,
+ indices = pipe_buffer_map(pipe, indexBuffer,
+ PIPE_TRANSFER_READ, &ib_transfer);
+ draw_set_mapped_element_buffer_range(r300->draw, indexSize, indexBias,
minIndex, maxIndex, indices);
- draw_set_mapped_constant_buffer(r300->draw,
- PIPE_SHADER_VERTEX,
- 0,
- r300->shader_constants[PIPE_SHADER_VERTEX].constants,
- r300->shader_constants[PIPE_SHADER_VERTEX].count *
- (sizeof(float) * 4));
-
draw_arrays(r300->draw, mode, start, count);
for (i = 0; i < r300->vertex_buffer_count; i++) {
- pipe_buffer_unmap(pipe->screen, r300->vertex_buffer[i].buffer);
+ pipe_buffer_unmap(pipe, r300->vertex_buffer[i].buffer,
+ vb_transfer[i]);
draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
}
- pipe_buffer_unmap(pipe->screen, indexBuffer);
- draw_set_mapped_element_buffer_range(r300->draw, 0, start,
- start + count - 1, NULL);
+ pipe_buffer_unmap(pipe, indexBuffer,
+ ib_transfer);
+ draw_set_mapped_element_buffer_range(r300->draw, 0, 0,
+ start, start + count - 1,
+ NULL);
}
/* Object for rendering using Draw. */
@@ -581,11 +782,13 @@ struct r300_render {
unsigned hwprim;
/* VBO */
- struct pipe_buffer* vbo;
+ struct pipe_resource* vbo;
size_t vbo_size;
size_t vbo_offset;
size_t vbo_max_used;
void * vbo_ptr;
+
+ struct pipe_transfer *vbo_transfer;
};
static INLINE struct r300_render*
@@ -616,13 +819,12 @@ static boolean r300_render_allocate_vertices(struct vbuf_render* render,
if (size + r300render->vbo_offset > r300render->vbo_size)
{
- pipe_buffer_reference(&r300->vbo, NULL);
+ pipe_resource_reference(&r300->vbo, NULL);
r300render->vbo = pipe_buffer_create(screen,
- 64,
- PIPE_BUFFER_USAGE_VERTEX,
- R300_MAX_VBO_SIZE);
+ PIPE_BIND_VERTEX_BUFFER,
+ R300_MAX_DRAW_VBO_SIZE);
r300render->vbo_offset = 0;
- r300render->vbo_size = R300_MAX_VBO_SIZE;
+ r300render->vbo_size = R300_MAX_DRAW_VBO_SIZE;
}
r300render->vertex_size = vertex_size;
@@ -635,10 +837,11 @@ static boolean r300_render_allocate_vertices(struct vbuf_render* render,
static void* r300_render_map_vertices(struct vbuf_render* render)
{
struct r300_render* r300render = r300_render(render);
- struct pipe_screen* screen = r300render->r300->context.screen;
- r300render->vbo_ptr = pipe_buffer_map(screen, r300render->vbo,
- PIPE_BUFFER_USAGE_CPU_WRITE);
+ r300render->vbo_ptr = pipe_buffer_map(&r300render->r300->context,
+ r300render->vbo,
+ PIPE_TRANSFER_WRITE,
+ &r300render->vbo_transfer);
return ((uint8_t*)r300render->vbo_ptr + r300render->vbo_offset);
}
@@ -648,7 +851,7 @@ static void r300_render_unmap_vertices(struct vbuf_render* render,
ushort max)
{
struct r300_render* r300render = r300_render(render);
- struct pipe_screen* screen = r300render->r300->context.screen;
+ struct pipe_context* context = &r300render->r300->context;
CS_LOCALS(r300render->r300);
BEGIN_CS(2);
OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max);
@@ -656,7 +859,7 @@ static void r300_render_unmap_vertices(struct vbuf_render* render,
r300render->vbo_max_used = MAX2(r300render->vbo_max_used,
r300render->vertex_size * (max + 1));
- pipe_buffer_unmap(screen, r300render->vbo);
+ pipe_buffer_unmap(context, r300render->vbo, r300render->vbo_transfer);
}
static void r300_render_release_vertices(struct vbuf_render* render)
@@ -678,9 +881,9 @@ static boolean r300_render_set_primitive(struct vbuf_render* render,
return TRUE;
}
-static void r300_render_draw_arrays(struct vbuf_render* render,
- unsigned start,
- unsigned count)
+static void r500_render_draw_arrays(struct vbuf_render* render,
+ unsigned start,
+ unsigned count)
{
struct r300_render* r300render = r300_render(render);
struct r300_context* r300 = r300render->r300;
@@ -688,6 +891,7 @@ static void r300_render_draw_arrays(struct vbuf_render* render,
CS_LOCALS(r300);
r300_reserve_cs_space(r300, r300_get_num_dirty_dwords(r300) + 2);
+ r300_emit_buffer_validate(r300, FALSE, NULL);
r300_emit_dirty_state(r300);
DBG(r300, DBG_DRAW, "r300: Doing vbuf render, count %d\n", count);
@@ -699,9 +903,9 @@ static void r300_render_draw_arrays(struct vbuf_render* render,
END_CS;
}
-static void r300_render_draw(struct vbuf_render* render,
- const ushort* indices,
- uint count)
+static void r500_render_draw(struct vbuf_render* render,
+ const ushort* indices,
+ uint count)
{
struct r300_render* r300render = r300_render(render);
struct r300_context* r300 = r300render->r300;
@@ -711,6 +915,7 @@ static void r300_render_draw(struct vbuf_render* render,
CS_LOCALS(r300);
r300_reserve_cs_space(r300, r300_get_num_dirty_dwords(r300) + dwords);
+ r300_emit_buffer_validate(r300, FALSE, NULL);
r300_emit_dirty_state(r300);
BEGIN_CS(dwords);
@@ -726,6 +931,40 @@ static void r300_render_draw(struct vbuf_render* render,
END_CS;
}
+static void r300_render_draw_arrays(struct vbuf_render* render,
+ unsigned start,
+ unsigned count)
+{
+ struct r300_context* r300 = r300_render(render)->r300;
+
+ if (!r300->stencil_ref_bf_fallback) {
+ r500_render_draw_arrays(render, start, count);
+ } else {
+ r300_begin_stencil_ref_fallback(r300);
+ r500_render_draw_arrays(render, start, count);
+ r300_switch_stencil_ref_side(r300);
+ r500_render_draw_arrays(render, start, count);
+ r300_end_stencil_ref_fallback(r300);
+ }
+}
+
+static void r300_render_draw(struct vbuf_render* render,
+ const ushort* indices,
+ uint count)
+{
+ struct r300_context* r300 = r300_render(render)->r300;
+
+ if (!r300->stencil_ref_bf_fallback) {
+ r500_render_draw(render, indices, count);
+ } else {
+ r300_begin_stencil_ref_fallback(r300);
+ r500_render_draw(render, indices, count);
+ r300_switch_stencil_ref_side(r300);
+ r500_render_draw(render, indices, count);
+ r300_end_stencil_ref_fallback(r300);
+ }
+}
+
static void r300_render_destroy(struct vbuf_render* render)
{
FREE(render);
@@ -746,8 +985,13 @@ static struct vbuf_render* r300_render_create(struct r300_context* r300)
r300render->base.map_vertices = r300_render_map_vertices;
r300render->base.unmap_vertices = r300_render_unmap_vertices;
r300render->base.set_primitive = r300_render_set_primitive;
- r300render->base.draw = r300_render_draw;
- r300render->base.draw_arrays = r300_render_draw_arrays;
+ if (r300->screen->caps.is_r500) {
+ r300render->base.draw = r500_render_draw;
+ r300render->base.draw_arrays = r500_render_draw_arrays;
+ } else {
+ r300render->base.draw = r300_render_draw;
+ r300render->base.draw_arrays = r300_render_draw_arrays;
+ }
r300render->base.release_vertices = r300_render_release_vertices;
r300render->base.destroy = r300_render_destroy;