summaryrefslogtreecommitdiff
path: root/src/gallium/drivers/r600/r600_state_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/r600/r600_state_common.c')
-rw-r--r--src/gallium/drivers/r600/r600_state_common.c358
1 files changed, 297 insertions, 61 deletions
diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c
index c647e77b37..43dad0c802 100644
--- a/src/gallium/drivers/r600/r600_state_common.c
+++ b/src/gallium/drivers/r600/r600_state_common.c
@@ -27,7 +27,9 @@
#include <util/u_memory.h>
#include <util/u_format.h>
#include <pipebuffer/pb_buffer.h>
+#include "pipe/p_shader_tokens.h"
#include "r600_pipe.h"
+#include "r600d.h"
/* common state between evergreen and r600 */
void r600_bind_blend_state(struct pipe_context *ctx, void *state)
@@ -121,17 +123,11 @@ void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
rctx->vertex_elements = v;
if (v) {
+ u_vbuf_mgr_bind_vertex_elements(rctx->vbuf_mgr, state,
+ v->vmgr_elements);
+
rctx->states[v->rstate.id] = &v->rstate;
r600_context_pipe_state_set(&rctx->ctx, &v->rstate);
- if (rctx->family >= CHIP_CEDAR) {
- evergreen_vertex_buffer_update(rctx);
- } else {
- r600_vertex_buffer_update(rctx);
- }
- }
-
- if (v) {
-// rctx->vs_rebuild = TRUE;
}
}
@@ -147,6 +143,7 @@ void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
rctx->vertex_elements = NULL;
r600_bo_reference(rctx->radeon, &v->fetch_shader, NULL);
+ u_vbuf_mgr_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
FREE(state);
}
@@ -171,42 +168,28 @@ void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
const struct pipe_vertex_buffer *buffers)
{
struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
- struct pipe_vertex_buffer *vbo;
- unsigned max_index = (unsigned)-1;
-
- for (int i = 0; i < rctx->nvertex_buffer; i++) {
- pipe_resource_reference(&rctx->vertex_buffer[i].buffer, NULL);
- }
- memcpy(rctx->vertex_buffer, buffers, sizeof(struct pipe_vertex_buffer) * count);
-
- for (int i = 0; i < count; i++) {
- vbo = (struct pipe_vertex_buffer*)&buffers[i];
-
- rctx->vertex_buffer[i].buffer = NULL;
- if (r600_buffer_is_user_buffer(buffers[i].buffer))
- rctx->any_user_vbs = TRUE;
- pipe_resource_reference(&rctx->vertex_buffer[i].buffer, buffers[i].buffer);
+ int i;
- if (vbo->max_index == ~0) {
- if (!vbo->stride)
- vbo->max_index = 1;
- else
- vbo->max_index = (vbo->buffer->width0 - vbo->buffer_offset) / vbo->stride;
+ /* Zero states. */
+ for (i = 0; i < count; i++) {
+ if (!buffers[i].buffer) {
+ if (rctx->family >= CHIP_CEDAR) {
+ evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
+ } else {
+ r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
+ }
}
- max_index = MIN2(vbo->max_index, max_index);
}
- rctx->nvertex_buffer = count;
- rctx->vb_max_index = max_index;
- if (rctx->family >= CHIP_CEDAR) {
- evergreen_vertex_buffer_update(rctx);
- } else {
- r600_vertex_buffer_update(rctx);
+ for (; i < rctx->vbuf_mgr->nr_real_vertex_buffers; i++) {
+ if (rctx->family >= CHIP_CEDAR) {
+ evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
+ } else {
+ r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
+ }
}
-}
-
-#define FORMAT_REPLACE(what, withwhat) \
- case PIPE_FORMAT_##what: *format = PIPE_FORMAT_##withwhat; break
+ u_vbuf_mgr_set_vertex_buffers(rctx->vbuf_mgr, count, buffers);
+}
void *r600_create_vertex_elements(struct pipe_context *ctx,
unsigned count,
@@ -214,33 +197,15 @@ void *r600_create_vertex_elements(struct pipe_context *ctx,
{
struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element);
- enum pipe_format *format;
- int i;
assert(count < 32);
if (!v)
return NULL;
v->count = count;
- memcpy(v->elements, elements, count * sizeof(struct pipe_vertex_element));
-
- for (i = 0; i < count; i++) {
- v->hw_format[i] = v->elements[i].src_format;
- format = &v->hw_format[i];
-
- switch (*format) {
- FORMAT_REPLACE(R64_FLOAT, R32_FLOAT);
- FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT);
- FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT);
- FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT);
- default:;
- }
- v->incompatible_layout =
- v->incompatible_layout ||
- v->elements[i].src_format != v->hw_format[i];
-
- v->hw_format_size[i] = align(util_format_get_blocksize(v->hw_format[i]), 4);
- }
+ v->vmgr_elements =
+ u_vbuf_mgr_create_vertex_elements(rctx->vbuf_mgr, count,
+ elements, v->elements);
if (r600_vertex_elements_build_fetch_shader(rctx, v)) {
FREE(v);
@@ -310,3 +275,274 @@ void r600_delete_vs_shader(struct pipe_context *ctx, void *state)
r600_pipe_shader_destroy(ctx, shader);
free(shader);
}
+
+/* FIXME optimize away spi update when it's not needed */
+void r600_spi_update(struct r600_pipe_context *rctx)
+{
+ struct r600_pipe_shader *shader = rctx->ps_shader;
+ struct r600_pipe_state rstate;
+ struct r600_shader *rshader = &shader->shader;
+ unsigned i, tmp;
+
+ rstate.nregs = 0;
+ for (i = 0; i < rshader->ninput; i++) {
+ tmp = S_028644_SEMANTIC(r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i));
+
+ if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
+ rshader->input[i].name == TGSI_SEMANTIC_BCOLOR ||
+ rshader->input[i].name == TGSI_SEMANTIC_POSITION) {
+ tmp |= S_028644_FLAT_SHADE(rctx->flatshade);
+ }
+
+ if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC &&
+ rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) {
+ tmp |= S_028644_PT_SPRITE_TEX(1);
+ }
+
+ if (rctx->family < CHIP_CEDAR) {
+ if (rshader->input[i].centroid)
+ tmp |= S_028644_SEL_CENTROID(1);
+
+ if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR)
+ tmp |= S_028644_SEL_LINEAR(1);
+ }
+
+ r600_pipe_state_add_reg(&rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, tmp, 0xFFFFFFFF, NULL);
+ }
+ r600_context_pipe_state_set(&rctx->ctx, &rstate);
+}
+
+void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
+ struct pipe_resource *buffer)
+{
+ struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
+ struct r600_resource_buffer *rbuffer = r600_buffer(buffer);
+ struct r600_pipe_state *rstate;
+ uint32_t offset;
+
+ /* Note that the state tracker can unbind constant buffers by
+ * passing NULL here.
+ */
+ if (buffer == NULL) {
+ return;
+ }
+
+ r600_upload_const_buffer(rctx, &rbuffer, &offset);
+ offset += r600_bo_offset(rbuffer->r.bo);
+
+ switch (shader) {
+ case PIPE_SHADER_VERTEX:
+ rctx->vs_const_buffer.nregs = 0;
+ r600_pipe_state_add_reg(&rctx->vs_const_buffer,
+ R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
+ ALIGN_DIVUP(buffer->width0 >> 4, 16),
+ 0xFFFFFFFF, NULL);
+ r600_pipe_state_add_reg(&rctx->vs_const_buffer,
+ R_028980_ALU_CONST_CACHE_VS_0,
+ offset >> 8, 0xFFFFFFFF, rbuffer->r.bo);
+ r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
+
+ rstate = &rctx->vs_const_buffer_resource[index];
+ rstate->id = R600_PIPE_STATE_RESOURCE;
+ rstate->nregs = 0;
+ if (rctx->family >= CHIP_CEDAR) {
+ evergreen_pipe_set_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16);
+ evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
+ } else {
+ r600_pipe_set_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16);
+ r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
+ }
+ break;
+ case PIPE_SHADER_FRAGMENT:
+ rctx->ps_const_buffer.nregs = 0;
+ r600_pipe_state_add_reg(&rctx->ps_const_buffer,
+ R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
+ ALIGN_DIVUP(buffer->width0 >> 4, 16),
+ 0xFFFFFFFF, NULL);
+ r600_pipe_state_add_reg(&rctx->ps_const_buffer,
+ R_028940_ALU_CONST_CACHE_PS_0,
+ offset >> 8, 0xFFFFFFFF, rbuffer->r.bo);
+ r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
+
+ rstate = &rctx->ps_const_buffer_resource[index];
+ rstate->id = R600_PIPE_STATE_RESOURCE;
+ rstate->nregs = 0;
+ if (rctx->family >= CHIP_CEDAR) {
+ evergreen_pipe_set_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16);
+ evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
+ } else {
+ r600_pipe_set_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16);
+ r600_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
+ }
+ break;
+ default:
+ R600_ERR("unsupported %d\n", shader);
+ return;
+ }
+
+ if (buffer != &rbuffer->r.b.b.b)
+ pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL);
+}
+
+static void r600_vertex_buffer_update(struct r600_pipe_context *rctx)
+{
+ struct r600_pipe_state *rstate;
+ struct r600_resource *rbuffer;
+ struct pipe_vertex_buffer *vertex_buffer;
+ unsigned i, count, offset;
+
+ if (rctx->vertex_elements->vbuffer_need_offset) {
+ /* one resource per vertex elements */
+ count = rctx->vertex_elements->count;
+ } else {
+ /* bind vertex buffer once */
+ count = rctx->vbuf_mgr->nr_real_vertex_buffers;
+ }
+
+ for (i = 0 ; i < count; i++) {
+ rstate = &rctx->fs_resource[i];
+ rstate->id = R600_PIPE_STATE_RESOURCE;
+ rstate->nregs = 0;
+
+ if (rctx->vertex_elements->vbuffer_need_offset) {
+ /* one resource per vertex elements */
+ unsigned vbuffer_index;
+ vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
+ vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[vbuffer_index];
+ rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index];
+ offset = rctx->vertex_elements->vbuffer_offset[i];
+ } else {
+ /* bind vertex buffer once */
+ vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[i];
+ rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[i];
+ offset = 0;
+ }
+ if (vertex_buffer == NULL || rbuffer == NULL)
+ continue;
+ offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo);
+
+ if (rctx->family >= CHIP_CEDAR) {
+ evergreen_pipe_set_buffer_resource(rctx, rstate, rbuffer, offset, vertex_buffer->stride);
+ evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
+ } else {
+ r600_pipe_set_buffer_resource(rctx, rstate, rbuffer, offset, vertex_buffer->stride);
+ r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
+ }
+ }
+}
+
+void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
+{
+ struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
+ struct r600_resource *rbuffer;
+ u32 vgt_dma_index_type, vgt_draw_initiator, mask;
+ struct r600_draw rdraw;
+ struct r600_pipe_state vgt;
+ struct r600_drawl draw = {};
+ unsigned prim;
+
+ r600_flush_depth_textures(rctx);
+ u_vbuf_mgr_draw_begin(rctx->vbuf_mgr, info, NULL, NULL);
+ r600_vertex_buffer_update(rctx);
+
+ draw.info = *info;
+ draw.ctx = ctx;
+ if (info->indexed && rctx->index_buffer.buffer) {
+ draw.info.start += rctx->index_buffer.offset / rctx->index_buffer.index_size;
+ pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer);
+
+ r600_translate_index_buffer(rctx, &draw.index_buffer,
+ &rctx->index_buffer.index_size,
+ &draw.info.start,
+ info->count);
+
+ draw.index_size = rctx->index_buffer.index_size;
+ draw.index_buffer_offset = draw.info.start * draw.index_size;
+ draw.info.start = 0;
+
+ if (u_vbuf_resource(draw.index_buffer)->user_ptr) {
+ r600_upload_index_buffer(rctx, &draw);
+ }
+ } else {
+ draw.info.index_bias = info->start;
+ }
+
+ switch (draw.index_size) {
+ case 2:
+ vgt_draw_initiator = 0;
+ vgt_dma_index_type = 0;
+ break;
+ case 4:
+ vgt_draw_initiator = 0;
+ vgt_dma_index_type = 1;
+ break;
+ case 0:
+ vgt_draw_initiator = 2;
+ vgt_dma_index_type = 0;
+ break;
+ default:
+ R600_ERR("unsupported index size %d\n", draw.index_size);
+ return;
+ }
+ if (r600_conv_pipe_prim(draw.info.mode, &prim))
+ return;
+ if (unlikely(rctx->ps_shader == NULL)) {
+ R600_ERR("missing vertex shader\n");
+ return;
+ }
+ if (unlikely(rctx->vs_shader == NULL)) {
+ R600_ERR("missing vertex shader\n");
+ return;
+ }
+ /* there should be enough input */
+ if (rctx->vertex_elements->count < rctx->vs_shader->shader.bc.nresource) {
+ R600_ERR("%d resources provided, expecting %d\n",
+ rctx->vertex_elements->count, rctx->vs_shader->shader.bc.nresource);
+ return;
+ }
+
+ r600_spi_update(rctx);
+
+ mask = 0;
+ for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
+ mask |= (0xF << (i * 4));
+ }
+
+ vgt.id = R600_PIPE_STATE_VGT;
+ vgt.nregs = 0;
+ r600_pipe_state_add_reg(&vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL);
+ r600_pipe_state_add_reg(&vgt, R_028408_VGT_INDX_OFFSET, draw.info.index_bias, 0xFFFFFFFF, NULL);
+ r600_pipe_state_add_reg(&vgt, R_028400_VGT_MAX_VTX_INDX, draw.info.max_index, 0xFFFFFFFF, NULL);
+ r600_pipe_state_add_reg(&vgt, R_028404_VGT_MIN_VTX_INDX, draw.info.min_index, 0xFFFFFFFF, NULL);
+ r600_pipe_state_add_reg(&vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL);
+ r600_pipe_state_add_reg(&vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL);
+ r600_pipe_state_add_reg(&vgt, R_03CFF4_SQ_VTX_START_INST_LOC, draw.info.start_instance, 0xFFFFFFFF, NULL);
+ r600_context_pipe_state_set(&rctx->ctx, &vgt);
+
+ rdraw.vgt_num_indices = draw.info.count;
+ rdraw.vgt_num_instances = draw.info.instance_count;
+ rdraw.vgt_index_type = vgt_dma_index_type;
+ rdraw.vgt_draw_initiator = vgt_draw_initiator;
+ rdraw.indices = NULL;
+ if (draw.index_buffer) {
+ rbuffer = (struct r600_resource*)draw.index_buffer;
+ rdraw.indices = rbuffer->bo;
+ rdraw.indices_bo_offset = draw.index_buffer_offset;
+ }
+
+ if (rctx->family >= CHIP_CEDAR) {
+ evergreen_context_draw(&rctx->ctx, &rdraw);
+ } else {
+ r600_context_draw(&rctx->ctx, &rdraw);
+ }
+
+ if (rctx->framebuffer.zsbuf)
+ {
+ struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
+ ((struct r600_resource_texture *)tex)->dirty_db = TRUE;
+ }
+
+ pipe_resource_reference(&draw.index_buffer, NULL);
+
+ u_vbuf_mgr_draw_end(rctx->vbuf_mgr);
+}