summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/r300/r300_render.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/r300/r300_render.c')
-rw-r--r--src/mesa/drivers/dri/r300/r300_render.c165
1 files changed, 28 insertions, 137 deletions
diff --git a/src/mesa/drivers/dri/r300/r300_render.c b/src/mesa/drivers/dri/r300/r300_render.c
index 0864558e8d..63b21b9379 100644
--- a/src/mesa/drivers/dri/r300/r300_render.c
+++ b/src/mesa/drivers/dri/r300/r300_render.c
@@ -202,23 +202,23 @@ static void inline fire_EB(r300ContextPtr rmesa, unsigned long addr, int vertex_
unsigned long t_addr;
unsigned long magic_1, magic_2;
GLcontext *ctx;
- ctx = rmesa->radeon.glCtx;
-
+ ctx = rmesa->radeon.glCtx;
+
assert(elt_size == 2 || elt_size == 4);
-
+
if(addr & (elt_size-1)){
WARN_ONCE("Badly aligned buffer\n");
return ;
}
#ifdef OPTIMIZE_ELTS
addr_a = 0;
-
+
magic_1 = (addr % 32) / 4;
t_addr = addr & (~0x1d);
magic_2 = (vertex_count + 1 + (t_addr & 0x2)) / 2 + magic_1;
-
+
check_space(6);
-
+
start_packet3(RADEON_CP_PACKET3_3D_DRAW_INDX_2, 0);
if(elt_size == 4){
e32(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (vertex_count<<16) | type | R300_VAP_VF_CNTL__INDEX_SIZE_32bit);
@@ -234,7 +234,7 @@ static void inline fire_EB(r300ContextPtr rmesa, unsigned long addr, int vertex_
e32(R300_EB_UNK1 | (magic_1 << 16) | R300_EB_UNK2);
e32(t_addr);
}
-
+
if(elt_size == 4){
e32(vertex_count /*+ addr_a/4*/); /* Total number of dwords needed? */
} else {
@@ -249,11 +249,11 @@ static void inline fire_EB(r300ContextPtr rmesa, unsigned long addr, int vertex_
#endif
#else
(void)magic_2, (void)magic_1, (void)t_addr;
-
+
addr_a = 0;
-
+
check_space(6);
-
+
start_packet3(RADEON_CP_PACKET3_3D_DRAW_INDX_2, 0);
if(elt_size == 4){
e32(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (vertex_count<<16) | type | R300_VAP_VF_CNTL__INDEX_SIZE_32bit);
@@ -264,14 +264,14 @@ static void inline fire_EB(r300ContextPtr rmesa, unsigned long addr, int vertex_
start_packet3(RADEON_CP_PACKET3_INDX_BUFFER, 2);
e32(R300_EB_UNK1 | (0 << 16) | R300_EB_UNK2);
e32(addr /*& 0xffffffe3*/);
-
+
if(elt_size == 4){
e32(vertex_count /*+ addr_a/4*/); /* Total number of dwords needed? */
} else {
e32((vertex_count+1)/2 /*+ addr_a/4*/); /* Total number of dwords needed? */
}
//cp_delay(rmesa, 1);
-#endif
+#endif
}
static void r300_render_vb_primitive(r300ContextPtr rmesa,
@@ -303,12 +303,12 @@ static void r300_render_vb_primitive(r300ContextPtr rmesa,
//e32(rmesa->state.Elts[start]);
return;
}
-
+
if(num_verts > 65535){ /* not implemented yet */
WARN_ONCE("Too many elts\n");
return;
}
-
+
r300EmitElts(ctx, rmesa->state.VB.Elts, num_verts, rmesa->state.VB.elt_size);
fire_EB(rmesa, rmesa->state.elt_dma.aos_offset, num_verts, type, rmesa->state.VB.elt_size);
#endif
@@ -328,7 +328,7 @@ GLboolean r300_run_vb_render(GLcontext *ctx,
int cmd_written = 0;
drm_radeon_cmd_header_t *cmd = NULL;
-
+
if (RADEON_DEBUG & DEBUG_PRIMS)
fprintf(stderr, "%s\n", __FUNCTION__);
@@ -336,26 +336,26 @@ GLboolean r300_run_vb_render(GLcontext *ctx,
TNLcontext *tnl = TNL_CONTEXT(ctx);
radeon_vb_to_rvb(rmesa, VB, &tnl->vb);
}
-
+
r300UpdateShaders(rmesa);
if (r300EmitArrays(ctx))
return GL_TRUE;
r300UpdateShaderStates(rmesa);
-
+
reg_start(R300_RB3D_DSTCACHE_CTLSTAT,0);
e32(R300_RB3D_DSTCACHE_UNKNOWN_0A);
reg_start(R300_RB3D_ZCACHE_CTLSTAT,0);
e32(R300_RB3D_ZCACHE_UNKNOWN_03);
-
+
r300EmitState(rmesa);
-
+
for(i=0; i < VB->PrimitiveCount; i++){
GLuint prim = _tnl_translate_prim(&VB->Primitive[i]);
GLuint start = VB->Primitive[i].start;
GLuint length = VB->Primitive[i].count;
-
+
r300_render_vb_primitive(rmesa, ctx, start, start + length, prim);
}
@@ -453,11 +453,6 @@ int r300Fallback(GLcontext *ctx)
/* GL_POINT_SPRITE_NV */
FALLBACK_IF(ctx->Point.PointSprite);
- /* Fallback for rectangular texture */
- for (i = 0; i < ctx->Const.MaxTextureUnits; i++)
- if (ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_RECT_BIT)
- return R300_FALLBACK_TCL;
-
return R300_FALLBACK_NONE;
}
@@ -494,19 +489,19 @@ static GLboolean r300_run_tcl_render(GLcontext *ctx,
{
r300ContextPtr rmesa = R300_CONTEXT(ctx);
struct r300_vertex_program *vp;
-
+
hw_tcl_on=future_hw_tcl_on;
-
+
if (RADEON_DEBUG & DEBUG_PRIMS)
fprintf(stderr, "%s\n", __FUNCTION__);
if(hw_tcl_on == GL_FALSE)
return GL_TRUE;
-
+
if (r300Fallback(ctx) >= R300_FALLBACK_TCL) {
hw_tcl_on = GL_FALSE;
return GL_TRUE;
}
-
+
r300UpdateShaders(rmesa);
vp = (struct r300_vertex_program *)CURRENT_VERTEX_SHADER(ctx);
@@ -520,13 +515,13 @@ static GLboolean r300_run_tcl_render(GLcontext *ctx,
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct tnl_cache *cache;
struct tnl_cache_item *c;
-
+
cache = tnl->vp_cache;
c = cache->items[0xc000cc0e % cache->size];
-
+
if(c && c->data == vp)
vp->native = GL_FALSE;
-
+
#endif
#if 0
vp->native = GL_FALSE;
@@ -536,7 +531,7 @@ static GLboolean r300_run_tcl_render(GLcontext *ctx,
return GL_TRUE;
}
//r300UpdateShaderStates(rmesa);
-
+
return r300_run_vb_render(ctx, stage);
}
@@ -549,107 +544,3 @@ const struct tnl_pipeline_stage _r300_tcl_stage = {
r300_run_tcl_render /* run */
};
-/* R300 texture rectangle expects coords in 0..1 range, not 0..dimension
- * as in the extension spec. Need to translate here.
- *
- * Note that swrast expects 0..dimension, so if a fallback is active,
- * don't do anything. (Maybe need to configure swrast to match hw)
- */
-struct texrect_stage_data {
- GLvector4f texcoord[MAX_TEXTURE_UNITS];
-};
-
-#define TEXRECT_STAGE_DATA(stage) ((struct texrect_stage_data *)stage->privatePtr)
-
-
-static GLboolean run_texrect_stage( GLcontext *ctx,
- struct tnl_pipeline_stage *stage )
-{
- struct texrect_stage_data *store = TEXRECT_STAGE_DATA(stage);
- r300ContextPtr rmesa = R300_CONTEXT(ctx);
- TNLcontext *tnl = TNL_CONTEXT(ctx);
- struct vertex_buffer *VB = &tnl->vb;
- GLuint i;
-
- if (rmesa->radeon.Fallback)
- return GL_TRUE;
-
- for (i = 0 ; i < ctx->Const.MaxTextureUnits ; i++) {
- if (ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_RECT_BIT) {
- struct gl_texture_object *texObj = ctx->Texture.Unit[i].CurrentRect;
- struct gl_texture_image *texImage = texObj->Image[0][texObj->BaseLevel];
- const GLfloat iw = 1.0/texImage->Width;
- const GLfloat ih = 1.0/texImage->Height;
- GLfloat *in = (GLfloat *)VB->TexCoordPtr[i]->data;
- GLint instride = VB->TexCoordPtr[i]->stride;
- GLfloat (*out)[4] = store->texcoord[i].data;
- GLint j;
-
- store->texcoord[i].size = VB->TexCoordPtr[i]->size;
- for (j = 0 ; j < VB->Count ; j++) {
- switch (VB->TexCoordPtr[i]->size) {
- case 4:
- out[j][3] = in[3];
- /* fallthrough */
- case 3:
- out[j][2] = in[2];
- /* fallthrough */
- default:
- out[j][0] = in[0] * iw;
- out[j][1] = in[1] * ih;
- }
- in = (GLfloat *)((GLubyte *)in + instride);
- }
-
- VB->AttribPtr[VERT_ATTRIB_TEX0+i] = VB->TexCoordPtr[i] = &store->texcoord[i];
- }
- }
-
- return GL_TRUE;
-}
-
-
-/* Called the first time stage->run() is invoked.
- */
-static GLboolean alloc_texrect_data( GLcontext *ctx,
- struct tnl_pipeline_stage *stage )
-{
- struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
- struct texrect_stage_data *store;
- GLuint i;
-
- stage->privatePtr = CALLOC(sizeof(*store));
- store = TEXRECT_STAGE_DATA(stage);
- if (!store)
- return GL_FALSE;
-
- for (i = 0 ; i < ctx->Const.MaxTextureUnits ; i++)
- _mesa_vector4f_alloc( &store->texcoord[i], 0, VB->Size, 32 );
-
- return GL_TRUE;
-}
-
-static void free_texrect_data( struct tnl_pipeline_stage *stage )
-{
- struct texrect_stage_data *store = TEXRECT_STAGE_DATA(stage);
- GLuint i;
-
- if (store) {
- for (i = 0 ; i < MAX_TEXTURE_UNITS ; i++)
- if (store->texcoord[i].data)
- _mesa_vector4f_free( &store->texcoord[i] );
- FREE( store );
- stage->privatePtr = NULL;
- }
-}
-
-const struct tnl_pipeline_stage _r300_texrect_stage =
-{
- "r300 texrect stage", /* name */
- NULL,
- alloc_texrect_data,
- free_texrect_data,
- NULL,
- run_texrect_stage
-};
-