summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/r300/r300_render.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/r300/r300_render.c')
-rw-r--r--src/mesa/drivers/dri/r300/r300_render.c255
1 files changed, 161 insertions, 94 deletions
diff --git a/src/mesa/drivers/dri/r300/r300_render.c b/src/mesa/drivers/dri/r300/r300_render.c
index 16ce4a1199..f87fee4af6 100644
--- a/src/mesa/drivers/dri/r300/r300_render.c
+++ b/src/mesa/drivers/dri/r300/r300_render.c
@@ -66,15 +66,14 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#include "tnl/t_vp_build.h"
#include "radeon_reg.h"
#include "radeon_macros.h"
-#include "radeon_ioctl.h"
-#include "radeon_state.h"
#include "r300_context.h"
#include "r300_ioctl.h"
#include "r300_state.h"
#include "r300_reg.h"
#include "r300_tex.h"
#include "r300_emit.h"
-#include "r300_fragprog.h"
+#include "r300_fragprog_common.h"
+
extern int future_hw_tcl_on;
/**
@@ -175,85 +174,164 @@ int r300NumVerts(r300ContextPtr rmesa, int num_verts, int prim)
static void r300EmitElts(GLcontext * ctx, void *elts, unsigned long n_elts)
{
r300ContextPtr rmesa = R300_CONTEXT(ctx);
- struct r300_dma_region *rvb = &rmesa->state.elt_dma;
void *out;
- if (r300IsGartMemory(rmesa, elts, n_elts * 4)) {
- rvb->address = rmesa->radeon.radeonScreen->gartTextures.map;
- rvb->start = ((char *)elts) - rvb->address;
- rvb->aos_offset =
- rmesa->radeon.radeonScreen->gart_texture_offset +
- rvb->start;
- return;
- } else if (r300IsGartMemory(rmesa, elts, 1)) {
- WARN_ONCE("Pointer not within GART memory!\n");
- _mesa_exit(-1);
- }
-
- r300AllocDmaRegion(rmesa, rvb, n_elts * 4, 4);
- rvb->aos_offset = GET_START(rvb);
-
- out = rvb->address + rvb->start;
+ radeonAllocDmaRegion(&rmesa->radeon, &rmesa->radeon.tcl.elt_dma_bo,
+ &rmesa->radeon.tcl.elt_dma_offset, n_elts * 4, 4);
+ radeon_bo_map(rmesa->radeon.tcl.elt_dma_bo, 1);
+ out = rmesa->radeon.tcl.elt_dma_bo->ptr + rmesa->radeon.tcl.elt_dma_offset;
memcpy(out, elts, n_elts * 4);
+ radeon_bo_unmap(rmesa->radeon.tcl.elt_dma_bo);
}
-static void r300FireEB(r300ContextPtr rmesa, unsigned long addr,
- int vertex_count, int type)
+static void r300FireEB(r300ContextPtr rmesa, int vertex_count, int type)
{
- int cmd_reserved = 0;
- int cmd_written = 0;
- drm_radeon_cmd_header_t *cmd = NULL;
-
- start_packet3(CP_PACKET3(R300_PACKET3_3D_DRAW_INDX_2, 0), 0);
- e32(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (vertex_count << 16) | type | R300_VAP_VF_CNTL__INDEX_SIZE_32bit);
-
- start_packet3(CP_PACKET3(R300_PACKET3_INDX_BUFFER, 2), 2);
- e32(R300_INDX_BUFFER_ONE_REG_WR | (0 << R300_INDX_BUFFER_SKIP_SHIFT) |
- (R300_VAP_PORT_IDX0 >> 2));
- e32(addr);
- e32(vertex_count);
+ BATCH_LOCALS(&rmesa->radeon);
+
+ if (vertex_count > 0) {
+ BEGIN_BATCH(10);
+ OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_INDX_2, 0);
+ OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_INDICES |
+ ((vertex_count + 0) << 16) |
+ type |
+ R300_VAP_VF_CNTL__INDEX_SIZE_32bit);
+
+ if (!rmesa->radeon.radeonScreen->kernel_mm) {
+ OUT_BATCH_PACKET3(R300_PACKET3_INDX_BUFFER, 2);
+ OUT_BATCH(R300_INDX_BUFFER_ONE_REG_WR | (0 << R300_INDX_BUFFER_SKIP_SHIFT) |
+ (R300_VAP_PORT_IDX0 >> 2));
+ OUT_BATCH_RELOC(rmesa->radeon.tcl.elt_dma_offset,
+ rmesa->radeon.tcl.elt_dma_bo,
+ rmesa->radeon.tcl.elt_dma_offset,
+ RADEON_GEM_DOMAIN_GTT, 0, 0);
+ OUT_BATCH(vertex_count);
+ } else {
+ OUT_BATCH_PACKET3(R300_PACKET3_INDX_BUFFER, 2);
+ OUT_BATCH(R300_INDX_BUFFER_ONE_REG_WR | (0 << R300_INDX_BUFFER_SKIP_SHIFT) |
+ (R300_VAP_PORT_IDX0 >> 2));
+ OUT_BATCH(rmesa->radeon.tcl.elt_dma_offset);
+ OUT_BATCH(vertex_count);
+ radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
+ rmesa->radeon.tcl.elt_dma_bo,
+ RADEON_GEM_DOMAIN_GTT, 0, 0);
+ }
+ END_BATCH();
+ }
}
static void r300EmitAOS(r300ContextPtr rmesa, GLuint nr, GLuint offset)
{
+ BATCH_LOCALS(&rmesa->radeon);
+ uint32_t voffset;
int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
int i;
- int cmd_reserved = 0;
- int cmd_written = 0;
- drm_radeon_cmd_header_t *cmd = NULL;
if (RADEON_DEBUG & DEBUG_VERTS)
fprintf(stderr, "%s: nr=%d, ofs=0x%08x\n", __FUNCTION__, nr,
offset);
- start_packet3(CP_PACKET3(R300_PACKET3_3D_LOAD_VBPNTR, sz - 1), sz - 1);
- e32(nr);
- for (i = 0; i + 1 < nr; i += 2) {
- e32((rmesa->state.aos[i].aos_size << 0) |
- (rmesa->state.aos[i].aos_stride << 8) |
- (rmesa->state.aos[i + 1].aos_size << 16) |
- (rmesa->state.aos[i + 1].aos_stride << 24));
+ if (!rmesa->radeon.radeonScreen->kernel_mm) {
+ BEGIN_BATCH(sz+2+(nr * 2));
+ OUT_BATCH_PACKET3(R300_PACKET3_3D_LOAD_VBPNTR, sz - 1);
+ OUT_BATCH(nr);
+
+ for (i = 0; i + 1 < nr; i += 2) {
+ OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
+ (rmesa->radeon.tcl.aos[i].stride << 8) |
+ (rmesa->radeon.tcl.aos[i + 1].components << 16) |
+ (rmesa->radeon.tcl.aos[i + 1].stride << 24));
+
+ voffset = rmesa->radeon.tcl.aos[i + 0].offset +
+ offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
+ OUT_BATCH_RELOC(voffset,
+ rmesa->radeon.tcl.aos[i].bo,
+ voffset,
+ RADEON_GEM_DOMAIN_GTT,
+ 0, 0);
+ voffset = rmesa->radeon.tcl.aos[i + 1].offset +
+ offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
+ OUT_BATCH_RELOC(voffset,
+ rmesa->radeon.tcl.aos[i+1].bo,
+ voffset,
+ RADEON_GEM_DOMAIN_GTT,
+ 0, 0);
+ }
- e32(rmesa->state.aos[i].aos_offset + offset * 4 * rmesa->state.aos[i].aos_stride);
- e32(rmesa->state.aos[i + 1].aos_offset + offset * 4 * rmesa->state.aos[i + 1].aos_stride);
- }
+ if (nr & 1) {
+ OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
+ (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
+ voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
+ offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
+ OUT_BATCH_RELOC(voffset,
+ rmesa->radeon.tcl.aos[nr - 1].bo,
+ voffset,
+ RADEON_GEM_DOMAIN_GTT,
+ 0, 0);
+ }
+ END_BATCH();
+ } else {
- if (nr & 1) {
- e32((rmesa->state.aos[nr - 1].aos_size << 0) |
- (rmesa->state.aos[nr - 1].aos_stride << 8));
- e32(rmesa->state.aos[nr - 1].aos_offset + offset * 4 * rmesa->state.aos[nr - 1].aos_stride);
+ BEGIN_BATCH(sz+2+(nr * 2));
+ OUT_BATCH_PACKET3(R300_PACKET3_3D_LOAD_VBPNTR, sz - 1);
+ OUT_BATCH(nr);
+
+ for (i = 0; i + 1 < nr; i += 2) {
+ OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
+ (rmesa->radeon.tcl.aos[i].stride << 8) |
+ (rmesa->radeon.tcl.aos[i + 1].components << 16) |
+ (rmesa->radeon.tcl.aos[i + 1].stride << 24));
+
+ voffset = rmesa->radeon.tcl.aos[i + 0].offset +
+ offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
+ OUT_BATCH(voffset);
+ voffset = rmesa->radeon.tcl.aos[i + 1].offset +
+ offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
+ OUT_BATCH(voffset);
+ }
+
+ if (nr & 1) {
+ OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
+ (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
+ voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
+ offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
+ OUT_BATCH(voffset);
+ }
+ for (i = 0; i + 1 < nr; i += 2) {
+ voffset = rmesa->radeon.tcl.aos[i + 0].offset +
+ offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
+ radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
+ rmesa->radeon.tcl.aos[i+0].bo,
+ RADEON_GEM_DOMAIN_GTT,
+ 0, 0);
+ voffset = rmesa->radeon.tcl.aos[i + 1].offset +
+ offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
+ radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
+ rmesa->radeon.tcl.aos[i+1].bo,
+ RADEON_GEM_DOMAIN_GTT,
+ 0, 0);
+ }
+ if (nr & 1) {
+ voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
+ offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
+ radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
+ rmesa->radeon.tcl.aos[nr-1].bo,
+ RADEON_GEM_DOMAIN_GTT,
+ 0, 0);
+ }
+ END_BATCH();
}
+
}
static void r300FireAOS(r300ContextPtr rmesa, int vertex_count, int type)
{
- int cmd_reserved = 0;
- int cmd_written = 0;
- drm_radeon_cmd_header_t *cmd = NULL;
+ BATCH_LOCALS(&rmesa->radeon);
- start_packet3(CP_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0), 0);
- e32(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (vertex_count << 16) | type);
+ BEGIN_BATCH(3);
+ OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
+ OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (vertex_count << 16) | type);
+ END_BATCH();
}
static void r300RunRenderPrimitive(r300ContextPtr rmesa, GLcontext * ctx,
@@ -269,6 +347,12 @@ static void r300RunRenderPrimitive(r300ContextPtr rmesa, GLcontext * ctx,
if (type < 0 || num_verts <= 0)
return;
+ /* Make space for at least 64 dwords.
+ * This is supposed to ensure that we can get all rendering
+ * commands into a single command buffer.
+ */
+ rcommonEnsureCmdBufSpace(&rmesa->radeon, 64, __FUNCTION__);
+
if (vb->Elts) {
if (num_verts > 65535) {
/* not implemented yet */
@@ -287,12 +371,13 @@ static void r300RunRenderPrimitive(r300ContextPtr rmesa, GLcontext * ctx,
* arrays. *sigh*
*/
r300EmitElts(ctx, vb->Elts, num_verts);
- r300EmitAOS(rmesa, rmesa->state.aos_count, start);
- r300FireEB(rmesa, rmesa->state.elt_dma.aos_offset, num_verts, type);
+ r300EmitAOS(rmesa, rmesa->radeon.tcl.aos_count, start);
+ r300FireEB(rmesa, num_verts, type);
} else {
- r300EmitAOS(rmesa, rmesa->state.aos_count, start);
+ r300EmitAOS(rmesa, rmesa->radeon.tcl.aos_count, start);
r300FireAOS(rmesa, num_verts, type);
}
+ COMMIT_BATCH();
}
static GLboolean r300RunRender(GLcontext * ctx,
@@ -303,7 +388,6 @@ static GLboolean r300RunRender(GLcontext * ctx,
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *vb = &tnl->vb;
-
if (RADEON_DEBUG & DEBUG_PRIMS)
fprintf(stderr, "%s\n", __FUNCTION__);
@@ -314,7 +398,7 @@ static GLboolean r300RunRender(GLcontext * ctx,
r300UpdateShaderStates(rmesa);
r300EmitCacheFlush(rmesa);
- r300EmitState(rmesa);
+ radeonEmitState(&rmesa->radeon);
for (i = 0; i < vb->PrimitiveCount; i++) {
GLuint prim = _tnl_translate_prim(&vb->Primitive[i]);
@@ -325,11 +409,7 @@ static GLboolean r300RunRender(GLcontext * ctx,
r300EmitCacheFlush(rmesa);
-#ifdef USER_BUFFERS
- r300UseArrays(ctx);
-#endif
-
- r300ReleaseArrays(ctx);
+ radeonReleaseArrays(ctx, ~0);
return GL_FALSE;
}
@@ -349,38 +429,19 @@ static int r300Fallback(GLcontext * ctx)
r300ContextPtr r300 = R300_CONTEXT(ctx);
const unsigned back = ctx->Stencil._BackFace;
- /* Do we need to use new-style shaders?
- * Also is there a better way to do this? */
- if (r300->radeon.radeonScreen->chip_family >= CHIP_FAMILY_RV515) {
- struct r500_fragment_program *fp = (struct r500_fragment_program *)
- (char *)ctx->FragmentProgram._Current;
- if (fp) {
- if (!fp->translated) {
- r500TranslateFragmentShader(r300, fp);
- FALLBACK_IF(!fp->translated);
- }
- }
- } else {
- struct r300_fragment_program *fp = (struct r300_fragment_program *)
- (char *)ctx->FragmentProgram._Current;
- if (fp) {
- if (!fp->translated) {
- r300TranslateFragmentShader(r300, fp);
- FALLBACK_IF(!fp->translated);
- }
- }
+ FALLBACK_IF(r300->radeon.Fallback);
+
+ struct r300_fragment_program *fp = (struct r300_fragment_program *) ctx->FragmentProgram._Current;
+ if (fp && !fp->translated) {
+ r300TranslateFragmentShader(ctx, ctx->FragmentProgram._Current);
+ FALLBACK_IF(fp->error);
}
FALLBACK_IF(ctx->RenderMode != GL_RENDER);
- /* If GL_EXT_stencil_two_side is disabled, this fallback check can
- * be removed.
- */
- FALLBACK_IF(ctx->Stencil.Ref[0] != ctx->Stencil.Ref[back]
- || ctx->Stencil.ValueMask[0] !=
- ctx->Stencil.ValueMask[back]
- || ctx->Stencil.WriteMask[0] !=
- ctx->Stencil.WriteMask[back]);
+ FALLBACK_IF(ctx->Stencil.Enabled && (ctx->Stencil.Ref[0] != ctx->Stencil.Ref[back]
+ || ctx->Stencil.ValueMask[0] != ctx->Stencil.ValueMask[back]
+ || ctx->Stencil.WriteMask[0] != ctx->Stencil.WriteMask[back]));
if (ctx->Extensions.NV_point_sprite || ctx->Extensions.ARB_point_sprite)
FALLBACK_IF(ctx->Point.PointSprite);
@@ -410,6 +471,9 @@ static GLboolean r300RunNonTCLRender(GLcontext * ctx,
if (!(rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL))
return GL_TRUE;
+ if (!r300ValidateBuffers(ctx))
+ return GL_TRUE;
+
return r300RunRender(ctx, stage);
}
@@ -432,6 +496,9 @@ static GLboolean r300RunTCLRender(GLcontext * ctx,
return GL_TRUE;
}
+ if (!r300ValidateBuffers(ctx))
+ return GL_TRUE;
+
r300UpdateShaders(rmesa);
vp = (struct r300_vertex_program *)CURRENT_VERTEX_SHADER(ctx);