summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/r600/r700_chip.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/r600/r700_chip.c')
-rw-r--r--src/mesa/drivers/dri/r600/r700_chip.c755
1 files changed, 428 insertions, 327 deletions
diff --git a/src/mesa/drivers/dri/r600/r700_chip.c b/src/mesa/drivers/dri/r600/r700_chip.c
index 0fb355a0b6..2d68f021df 100644
--- a/src/mesa/drivers/dri/r600/r700_chip.c
+++ b/src/mesa/drivers/dri/r600/r700_chip.c
@@ -27,6 +27,7 @@
#include "main/imports.h"
#include "main/glheader.h"
+#include "main/simple_list.h"
#include "r600_context.h"
#include "r600_cmdbuf.h"
@@ -40,222 +41,6 @@
#include "radeon_mipmap_tree.h"
-#define LINK_STATES(reg) \
-do \
-{ \
- pStateListWork->puiValue = (unsigned int*)&(r700->reg); \
- pStateListWork->unOffset = mm##reg - ASIC_CONTEXT_BASE_INDEX; \
- pStateListWork->pNext = pStateListWork + 1; \
- pStateListWork++; \
-}while(0)
-
-GLboolean r700InitChipObject(context_t *context)
-{
- ContextState * pStateListWork;
-
- R700_CHIP_CONTEXT *r700 = &context->hw;
-
- /* init state list */
- r700->pStateList = (ContextState*) MALLOC (sizeof(ContextState)*sizeof(R700_CHIP_CONTEXT)/sizeof(unsigned int));
- pStateListWork = r700->pStateList;
-
- // misc
- LINK_STATES(TA_CNTL_AUX);
- LINK_STATES(VC_ENHANCE);
- LINK_STATES(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ);
- LINK_STATES(DB_DEBUG);
- LINK_STATES(DB_WATERMARKS);
-
- // SC
- LINK_STATES(PA_SC_SCREEN_SCISSOR_TL);
- LINK_STATES(PA_SC_SCREEN_SCISSOR_BR);
- LINK_STATES(PA_SC_WINDOW_OFFSET);
- LINK_STATES(PA_SC_WINDOW_SCISSOR_TL);
- LINK_STATES(PA_SC_WINDOW_SCISSOR_BR);
- LINK_STATES(PA_SC_CLIPRECT_RULE);
- LINK_STATES(PA_SC_CLIPRECT_0_TL);
- LINK_STATES(PA_SC_CLIPRECT_0_BR);
- LINK_STATES(PA_SC_CLIPRECT_1_TL);
- LINK_STATES(PA_SC_CLIPRECT_1_BR);
- LINK_STATES(PA_SC_CLIPRECT_2_TL);
- LINK_STATES(PA_SC_CLIPRECT_2_BR);
- LINK_STATES(PA_SC_CLIPRECT_3_TL);
- LINK_STATES(PA_SC_CLIPRECT_3_BR);
- LINK_STATES(PA_SC_EDGERULE);
- LINK_STATES(PA_SC_GENERIC_SCISSOR_TL);
- LINK_STATES(PA_SC_GENERIC_SCISSOR_BR);
- LINK_STATES(PA_SC_LINE_STIPPLE);
- LINK_STATES(PA_SC_MPASS_PS_CNTL);
- LINK_STATES(PA_SC_MODE_CNTL);
- LINK_STATES(PA_SC_LINE_CNTL);
- LINK_STATES(PA_SC_AA_CONFIG);
- LINK_STATES(PA_SC_AA_SAMPLE_LOCS_MCTX);
- LINK_STATES(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX);
- LINK_STATES(PA_SC_AA_MASK);
-
- // SU
- LINK_STATES(PA_SU_POINT_SIZE);
- LINK_STATES(PA_SU_POINT_MINMAX);
- LINK_STATES(PA_SU_LINE_CNTL);
- LINK_STATES(PA_SU_SC_MODE_CNTL);
- LINK_STATES(PA_SU_VTX_CNTL);
- LINK_STATES(PA_SU_POLY_OFFSET_DB_FMT_CNTL);
- LINK_STATES(PA_SU_POLY_OFFSET_CLAMP);
- LINK_STATES(PA_SU_POLY_OFFSET_FRONT_SCALE);
- LINK_STATES(PA_SU_POLY_OFFSET_FRONT_OFFSET);
- LINK_STATES(PA_SU_POLY_OFFSET_BACK_SCALE);
- LINK_STATES(PA_SU_POLY_OFFSET_BACK_OFFSET);
-
- // CL
- LINK_STATES(PA_CL_CLIP_CNTL);
- LINK_STATES(PA_CL_VTE_CNTL);
- LINK_STATES(PA_CL_VS_OUT_CNTL);
- LINK_STATES(PA_CL_NANINF_CNTL);
- LINK_STATES(PA_CL_GB_VERT_CLIP_ADJ);
- LINK_STATES(PA_CL_GB_VERT_DISC_ADJ);
- LINK_STATES(PA_CL_GB_HORZ_CLIP_ADJ);
- LINK_STATES(PA_CL_GB_HORZ_DISC_ADJ);
-
- // CB
- LINK_STATES(CB_CLEAR_RED_R6XX);
- LINK_STATES(CB_CLEAR_GREEN_R6XX);
- LINK_STATES(CB_CLEAR_BLUE_R6XX);
- LINK_STATES(CB_CLEAR_ALPHA_R6XX);
- LINK_STATES(CB_TARGET_MASK);
- LINK_STATES(CB_SHADER_MASK);
- LINK_STATES(CB_BLEND_RED);
- LINK_STATES(CB_BLEND_GREEN);
- LINK_STATES(CB_BLEND_BLUE);
- LINK_STATES(CB_BLEND_ALPHA);
- LINK_STATES(CB_FOG_RED_R6XX);
- LINK_STATES(CB_FOG_GREEN_R6XX);
- LINK_STATES(CB_FOG_BLUE_R6XX);
- LINK_STATES(CB_SHADER_CONTROL);
- LINK_STATES(CB_COLOR_CONTROL);
- LINK_STATES(CB_CLRCMP_CONTROL);
- LINK_STATES(CB_CLRCMP_SRC);
- LINK_STATES(CB_CLRCMP_DST);
- LINK_STATES(CB_CLRCMP_MSK);
- LINK_STATES(CB_BLEND_CONTROL);
-
- //DB
- LINK_STATES(DB_HTILE_DATA_BASE);
- LINK_STATES(DB_STENCIL_CLEAR);
- LINK_STATES(DB_DEPTH_CLEAR);
- LINK_STATES(DB_STENCILREFMASK);
- LINK_STATES(DB_STENCILREFMASK_BF);
- LINK_STATES(DB_DEPTH_CONTROL);
- LINK_STATES(DB_SHADER_CONTROL);
- LINK_STATES(DB_RENDER_CONTROL);
- LINK_STATES(DB_RENDER_OVERRIDE);
- LINK_STATES(DB_HTILE_SURFACE);
- LINK_STATES(DB_ALPHA_TO_MASK);
-
- // SX
- LINK_STATES(SX_MISC);
- LINK_STATES(SX_ALPHA_TEST_CONTROL);
- LINK_STATES(SX_ALPHA_REF);
-
- // VGT
- LINK_STATES(VGT_MAX_VTX_INDX);
- LINK_STATES(VGT_MIN_VTX_INDX);
- LINK_STATES(VGT_INDX_OFFSET);
- LINK_STATES(VGT_MULTI_PRIM_IB_RESET_INDX);
- LINK_STATES(VGT_OUTPUT_PATH_CNTL);
- LINK_STATES(VGT_HOS_CNTL);
- LINK_STATES(VGT_HOS_MAX_TESS_LEVEL);
- LINK_STATES(VGT_HOS_MIN_TESS_LEVEL);
- LINK_STATES(VGT_HOS_REUSE_DEPTH);
- LINK_STATES(VGT_GROUP_PRIM_TYPE);
- LINK_STATES(VGT_GROUP_FIRST_DECR);
- LINK_STATES(VGT_GROUP_DECR);
- LINK_STATES(VGT_GROUP_VECT_0_CNTL);
- LINK_STATES(VGT_GROUP_VECT_1_CNTL);
- LINK_STATES(VGT_GROUP_VECT_0_FMT_CNTL);
- LINK_STATES(VGT_GROUP_VECT_1_FMT_CNTL);
- LINK_STATES(VGT_GS_MODE);
- LINK_STATES(VGT_PRIMITIVEID_EN);
- LINK_STATES(VGT_MULTI_PRIM_IB_RESET_EN);
- LINK_STATES(VGT_INSTANCE_STEP_RATE_0);
- LINK_STATES(VGT_INSTANCE_STEP_RATE_1);
- LINK_STATES(VGT_STRMOUT_EN);
- LINK_STATES(VGT_REUSE_OFF);
- LINK_STATES(VGT_VTX_CNT_EN);
- LINK_STATES(VGT_STRMOUT_BUFFER_EN);
-
- LINK_STATES(SQ_VTX_SEMANTIC_0);
- LINK_STATES(SQ_VTX_SEMANTIC_1);
- LINK_STATES(SQ_VTX_SEMANTIC_2);
- LINK_STATES(SQ_VTX_SEMANTIC_3);
- LINK_STATES(SQ_VTX_SEMANTIC_4);
- LINK_STATES(SQ_VTX_SEMANTIC_5);
- LINK_STATES(SQ_VTX_SEMANTIC_6);
- LINK_STATES(SQ_VTX_SEMANTIC_7);
- LINK_STATES(SQ_VTX_SEMANTIC_8);
- LINK_STATES(SQ_VTX_SEMANTIC_9);
- LINK_STATES(SQ_VTX_SEMANTIC_10);
- LINK_STATES(SQ_VTX_SEMANTIC_11);
- LINK_STATES(SQ_VTX_SEMANTIC_12);
- LINK_STATES(SQ_VTX_SEMANTIC_13);
- LINK_STATES(SQ_VTX_SEMANTIC_14);
- LINK_STATES(SQ_VTX_SEMANTIC_15);
- LINK_STATES(SQ_VTX_SEMANTIC_16);
- LINK_STATES(SQ_VTX_SEMANTIC_17);
- LINK_STATES(SQ_VTX_SEMANTIC_18);
- LINK_STATES(SQ_VTX_SEMANTIC_19);
- LINK_STATES(SQ_VTX_SEMANTIC_20);
- LINK_STATES(SQ_VTX_SEMANTIC_21);
- LINK_STATES(SQ_VTX_SEMANTIC_22);
- LINK_STATES(SQ_VTX_SEMANTIC_23);
- LINK_STATES(SQ_VTX_SEMANTIC_24);
- LINK_STATES(SQ_VTX_SEMANTIC_25);
- LINK_STATES(SQ_VTX_SEMANTIC_26);
- LINK_STATES(SQ_VTX_SEMANTIC_27);
- LINK_STATES(SQ_VTX_SEMANTIC_28);
- LINK_STATES(SQ_VTX_SEMANTIC_29);
- LINK_STATES(SQ_VTX_SEMANTIC_30);
- LINK_STATES(SQ_VTX_SEMANTIC_31);
-
- // SPI
- LINK_STATES(SPI_VS_OUT_ID_0);
- LINK_STATES(SPI_VS_OUT_ID_1);
- LINK_STATES(SPI_VS_OUT_ID_2);
- LINK_STATES(SPI_VS_OUT_ID_3);
- LINK_STATES(SPI_VS_OUT_ID_4);
- LINK_STATES(SPI_VS_OUT_ID_5);
- LINK_STATES(SPI_VS_OUT_ID_6);
- LINK_STATES(SPI_VS_OUT_ID_7);
- LINK_STATES(SPI_VS_OUT_ID_8);
- LINK_STATES(SPI_VS_OUT_ID_9);
-
- LINK_STATES(SPI_VS_OUT_CONFIG);
- LINK_STATES(SPI_THREAD_GROUPING);
- LINK_STATES(SPI_PS_IN_CONTROL_0);
- LINK_STATES(SPI_PS_IN_CONTROL_1);
- LINK_STATES(SPI_INTERP_CONTROL_0);
- LINK_STATES(SPI_INPUT_Z);
- LINK_STATES(SPI_FOG_CNTL);
- LINK_STATES(SPI_FOG_FUNC_SCALE);
- LINK_STATES(SPI_FOG_FUNC_BIAS);
-
- // SQ
- LINK_STATES(SQ_ESGS_RING_ITEMSIZE);
- LINK_STATES(SQ_GSVS_RING_ITEMSIZE);
- LINK_STATES(SQ_ESTMP_RING_ITEMSIZE);
- LINK_STATES(SQ_GSTMP_RING_ITEMSIZE);
- LINK_STATES(SQ_VSTMP_RING_ITEMSIZE);
- LINK_STATES(SQ_PSTMP_RING_ITEMSIZE);
- LINK_STATES(SQ_FBUF_RING_ITEMSIZE);
- LINK_STATES(SQ_REDUC_RING_ITEMSIZE);
- //LINK_STATES(SQ_GS_VERT_ITEMSIZE);
-
- pStateListWork->puiValue = (unsigned int*)&(r700->SQ_GS_VERT_ITEMSIZE);
- pStateListWork->unOffset = mmSQ_GS_VERT_ITEMSIZE - ASIC_CONTEXT_BASE_INDEX;
- pStateListWork->pNext = NULL; /* END OF STATE LIST */
-
- return GL_TRUE;
-}
-
GLboolean r700SendTextureState(context_t *context)
{
unsigned int i;
@@ -276,11 +61,16 @@ GLboolean r700SendTextureState(context_t *context)
RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
0, TC_ACTION_ENA_bit);
- BEGIN_BATCH_NO_AUTOSTATE(9);
+ BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
R600_OUT_BATCH(i * 7);
R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
+ R600_OUT_BATCH(0); /* r700->textures[i]->SQ_TEX_RESOURCE2 */
+ R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
+ R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
+ R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
+ R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
bo,
0,
@@ -289,9 +79,6 @@ GLboolean r700SendTextureState(context_t *context)
bo,
r700->textures[i]->SQ_TEX_RESOURCE3,
RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
- R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
- R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
- R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
END_BATCH();
BEGIN_BATCH_NO_AUTOSTATE(5);
@@ -362,22 +149,21 @@ void r700SetupVTXConstants(GLcontext * ctx,
SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
- BEGIN_BATCH_NO_AUTOSTATE(9);
+ BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
R600_OUT_BATCH((nStreamID + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
-
- R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
- paos->bo,
- uSQ_VTX_CONSTANT_WORD0_0,
- RADEON_GEM_DOMAIN_GTT, 0, 0);
+ R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
R600_OUT_BATCH(0);
R600_OUT_BATCH(0);
R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
-
+ R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
+ paos->bo,
+ uSQ_VTX_CONSTANT_WORD0_0,
+ RADEON_GEM_DOMAIN_GTT, 0, 0);
END_BATCH();
COMMIT_BATCH();
@@ -408,7 +194,6 @@ int r700SetupStreams(GLcontext * ctx)
END_BATCH();
COMMIT_BATCH();
- context->radeon.tcl.aos_count = 0;
for(i=0; i<VERT_ATTRIB_MAX; i++)
{
unBit = 1 << i;
@@ -429,83 +214,16 @@ int r700SetupStreams(GLcontext * ctx)
(unsigned int)context->radeon.tcl.aos[j].stride * 4,
(unsigned int)context->radeon.tcl.aos[j].count);
j++;
- context->radeon.tcl.aos_count++;
}
}
+ context->radeon.tcl.aos_count = j;
return R600_FALLBACK_NONE;
}
-GLboolean r700SendContextStates(context_t *context)
-{
- BATCH_LOCALS(&context->radeon);
-
- R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
-
- ContextState * pState = r700->pStateList;
- ContextState * pInit;
- unsigned int toSend;
- unsigned int ui;
-
- while(NULL != pState)
- {
- toSend = 1;
-
- pInit = pState;
-
- while(NULL != pState->pNext)
- {
- if ((pState->pNext->unOffset - pState->unOffset) > 1)
- {
- break;
- }
- else
- {
- pState = pState->pNext;
- toSend++;
- }
- }
-
- pState = pState->pNext;
-
- BEGIN_BATCH_NO_AUTOSTATE(toSend + 2);
- R600_OUT_BATCH_REGSEQ(((pInit->unOffset + ASIC_CONTEXT_BASE_INDEX)<<2), toSend);
- for(ui=0; ui<toSend; ui++)
- {
- R600_OUT_BATCH(*(pInit->puiValue));
- pInit = pInit->pNext;
- };
- END_BATCH();
- };
-
- /* todo:
- * - split this into a separate function?
- * - only emit the ones we use
- */
- BEGIN_BATCH_NO_AUTOSTATE(2 + R700_MAX_SHADER_EXPORTS);
- R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
- for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
- R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
- END_BATCH();
-
- if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
- for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
- if (r700->render_target[ui].enabled) {
- BEGIN_BATCH_NO_AUTOSTATE(3);
- R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
- r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
- END_BATCH();
- }
- }
- }
-
- COMMIT_BATCH();
-
- return GL_TRUE;
-}
-
-GLboolean r700SendDepthTargetState(context_t *context)
+static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
struct radeon_renderbuffer *rrb;
BATCH_LOCALS(&context->radeon);
@@ -513,19 +231,20 @@ GLboolean r700SendDepthTargetState(context_t *context)
rrb = radeon_get_depthbuffer(&context->radeon);
if (!rrb || !rrb->bo) {
fprintf(stderr, "no rrb\n");
- return GL_FALSE;
+ return;
}
- BEGIN_BATCH_NO_AUTOSTATE(8);
+ BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
+ R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
+ R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
rrb->bo,
r700->DB_DEPTH_BASE.u32All,
0, RADEON_GEM_DOMAIN_VRAM, 0);
- R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
END_BATCH();
if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
@@ -538,32 +257,31 @@ GLboolean r700SendDepthTargetState(context_t *context)
COMMIT_BATCH();
- r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
- DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
-
- return GL_TRUE;
}
-GLboolean r700SendRenderTargetState(context_t *context, int id)
+static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
struct radeon_renderbuffer *rrb;
BATCH_LOCALS(&context->radeon);
+ int id = 0;
rrb = radeon_get_colorbuffer(&context->radeon);
if (!rrb || !rrb->bo) {
fprintf(stderr, "no rrb\n");
- return GL_FALSE;
+ return;
}
if (id > R700_MAX_RENDER_TARGETS)
- return GL_FALSE;
+ return;
if (!r700->render_target[id].enabled)
- return GL_FALSE;
+ return;
- BEGIN_BATCH_NO_AUTOSTATE(3);
+ BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
+ R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
rrb->bo,
r700->render_target[id].CB_COLOR0_BASE.u32All,
@@ -589,10 +307,6 @@ GLboolean r700SendRenderTargetState(context_t *context, int id)
COMMIT_BATCH();
- r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
- CB_ACTION_ENA_bit | (1 << (id + 6)));
-
- return GL_TRUE;
}
GLboolean r700SendPSState(context_t *context)
@@ -608,8 +322,9 @@ GLboolean r700SendPSState(context_t *context)
r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
- BEGIN_BATCH_NO_AUTOSTATE(3);
+ BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
+ R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
pbo,
r700->ps.SQ_PGM_START_PS.u32All,
@@ -624,6 +339,8 @@ GLboolean r700SendPSState(context_t *context)
COMMIT_BATCH();
+ r700->ps.dirty = GL_FALSE;
+
return GL_TRUE;
}
@@ -640,8 +357,9 @@ GLboolean r700SendVSState(context_t *context)
r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
- BEGIN_BATCH_NO_AUTOSTATE(3);
+ BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
+ R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
pbo,
r700->vs.SQ_PGM_START_VS.u32All,
@@ -655,6 +373,8 @@ GLboolean r700SendVSState(context_t *context)
COMMIT_BATCH();
+ r700->vs.dirty = GL_FALSE;
+
return GL_TRUE;
}
@@ -680,8 +400,9 @@ GLboolean r700SendFSState(context_t *context)
r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
- BEGIN_BATCH_NO_AUTOSTATE(3);
+ BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
+ R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
pbo,
r700->fs.SQ_PGM_START_FS.u32All,
@@ -695,19 +416,23 @@ GLboolean r700SendFSState(context_t *context)
COMMIT_BATCH();
+ r700->fs.dirty = GL_FALSE;
+
return GL_TRUE;
}
-GLboolean r700SendViewportState(context_t *context, int id)
+static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
+ int id = 0;
if (id > R700_MAX_VIEWPORTS)
- return GL_FALSE;
+ return;
if (!r700->viewport[id].enabled)
- return GL_FALSE;
+ return;
BEGIN_BATCH_NO_AUTOSTATE(16);
R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
@@ -727,15 +452,15 @@ GLboolean r700SendViewportState(context_t *context, int id)
COMMIT_BATCH();
- return GL_TRUE;
}
-GLboolean r700SendSQConfig(context_t *context)
+static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
- BEGIN_BATCH_NO_AUTOSTATE(8);
+ BEGIN_BATCH_NO_AUTOSTATE(34);
R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
@@ -743,14 +468,31 @@ GLboolean r700SendSQConfig(context_t *context)
R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
+
+ R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
+ R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
+ R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
+ R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
+ R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
+
+ R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
+ R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
+ R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
+ R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
+ R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
+ R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
+ R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
+ R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
+ R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
+ R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
END_BATCH();
- COMMIT_BATCH();
- return GL_TRUE;
+ COMMIT_BATCH();
}
-GLboolean r700SendUCPState(context_t *context)
+static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
int i;
@@ -767,7 +509,366 @@ GLboolean r700SendUCPState(context_t *context)
COMMIT_BATCH();
}
}
+}
- return GL_TRUE;
+static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ context_t *context = R700_CONTEXT(ctx);
+ R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
+ BATCH_LOCALS(&context->radeon);
+ unsigned int ui;
+
+ BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
+
+ R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
+ R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
+
+ R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
+
+ R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
+ R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
+ R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
+ R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
+ R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
+ R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
+ R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
+ R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
+ R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
+ R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
+
+ R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
+ for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
+ R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
+
+ END_BATCH();
+ COMMIT_BATCH();
}
+static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ context_t *context = R700_CONTEXT(ctx);
+ R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
+ BATCH_LOCALS(&context->radeon);
+
+ BEGIN_BATCH_NO_AUTOSTATE(41);
+
+ R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
+ R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
+ R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
+ R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
+ R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
+
+ R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
+ R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
+ R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
+ R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
+ R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
+ R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
+ R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
+ R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
+ R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
+ R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
+ R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
+ R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
+ R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
+ R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
+
+ R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
+ R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
+ R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
+ R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
+
+ R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
+ R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
+ R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
+ R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
+
+ R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
+
+ END_BATCH();
+ COMMIT_BATCH();
+}
+
+static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ context_t *context = R700_CONTEXT(ctx);
+ R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
+ BATCH_LOCALS(&context->radeon);
+
+ BEGIN_BATCH_NO_AUTOSTATE(9);
+ R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
+ R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
+ R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
+ END_BATCH();
+ COMMIT_BATCH();
+}
+
+static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ context_t *context = R700_CONTEXT(ctx);
+ R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
+ BATCH_LOCALS(&context->radeon);
+
+ BEGIN_BATCH_NO_AUTOSTATE(27);
+ R600_OUT_BATCH_REGVAL(DB_HTILE_DATA_BASE, r700->DB_HTILE_DATA_BASE.u32All);
+
+ R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
+ R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
+ R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
+
+ R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
+ R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
+ R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
+
+ R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
+ R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
+
+ R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
+ R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
+ R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
+
+ R600_OUT_BATCH_REGVAL(DB_HTILE_SURFACE, r700->DB_HTILE_SURFACE.u32All);
+ R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
+
+ END_BATCH();
+ COMMIT_BATCH();
+}
+
+static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ context_t *context = R700_CONTEXT(ctx);
+ R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
+ BATCH_LOCALS(&context->radeon);
+ unsigned int ui;
+
+ if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
+ BEGIN_BATCH_NO_AUTOSTATE(14);
+ R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
+ R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
+ R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
+ R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
+ R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
+ R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
+ R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
+ R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
+ R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
+ /* R600 does not have per-MRT blend */
+ R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
+ END_BATCH();
+ }
+
+ BEGIN_BATCH_NO_AUTOSTATE(22);
+ R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
+ R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
+ R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
+
+ R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
+ R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
+ R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
+ R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
+ R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
+
+ R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
+ R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
+
+ R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
+ R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
+ R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
+ R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
+ R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
+ END_BATCH();
+
+ if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
+ for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
+ if (r700->render_target[ui].enabled) {
+ BEGIN_BATCH_NO_AUTOSTATE(3);
+ R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
+ r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
+ END_BATCH();
+ }
+ }
+ }
+
+ COMMIT_BATCH();
+
+}
+
+static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ context_t *context = R700_CONTEXT(ctx);
+ R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
+ BATCH_LOCALS(&context->radeon);
+
+ BEGIN_BATCH_NO_AUTOSTATE(19);
+ R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
+
+ R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
+ R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
+ R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
+ R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
+ R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
+
+ R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
+ R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
+ R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
+
+ R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
+ R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
+ R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
+ R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
+ R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
+
+ END_BATCH();
+ COMMIT_BATCH();
+
+}
+
+static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ context_t *context = R700_CONTEXT(ctx);
+ R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
+ BATCH_LOCALS(&context->radeon);
+
+ BEGIN_BATCH_NO_AUTOSTATE(18);
+ R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
+ R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
+ R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
+ R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
+
+ R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
+ R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
+ R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
+ R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
+ R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
+
+ END_BATCH();
+ COMMIT_BATCH();
+}
+
+// XXX need to split this up
+static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ context_t *context = R700_CONTEXT(ctx);
+ R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
+ BATCH_LOCALS(&context->radeon);
+
+ BEGIN_BATCH_NO_AUTOSTATE(47);
+ R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
+ R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
+ R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
+
+ R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 13);
+ R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
+ R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
+ R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
+ R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
+ R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
+ R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
+ R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
+ R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
+ R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
+ R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
+ R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
+ R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
+ R600_OUT_BATCH(r700->PA_SC_EDGERULE.u32All);
+
+ R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
+ R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
+ R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
+
+ R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
+ R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
+ R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
+ R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
+ R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
+ R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
+ R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
+ R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
+
+ END_BATCH();
+ COMMIT_BATCH();
+}
+
+static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ return atom->cmd_size;
+}
+
+#define ALLOC_STATE( ATOM, SZ, EMIT ) \
+do { \
+ context->atoms.ATOM.cmd_size = (SZ); \
+ context->atoms.ATOM.cmd = NULL; \
+ context->atoms.ATOM.name = #ATOM; \
+ context->atoms.ATOM.idx = 0; \
+ context->atoms.ATOM.check = check_always; \
+ context->atoms.ATOM.dirty = GL_FALSE; \
+ context->atoms.ATOM.emit = (EMIT); \
+ context->radeon.hw.max_state_size += (SZ); \
+ insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
+} while (0)
+
+void r600InitAtoms(context_t *context)
+{
+
+ /* Setup the atom linked list */
+ make_empty_list(&context->radeon.hw.atomlist);
+ context->radeon.hw.atomlist.name = "atom-list";
+
+ ALLOC_STATE(sq, 34, r700SendSQConfig);
+ ALLOC_STATE(db, 27, r700SendDBState);
+ ALLOC_STATE(db_target, 19, r700SendDepthTargetState);
+ ALLOC_STATE(sc, 47, r700SendSCState);
+ ALLOC_STATE(cl, 18, r700SendCLState);
+ ALLOC_STATE(ucp, 36, r700SendUCPState);
+ ALLOC_STATE(su, 19, r700SendSUState);
+ ALLOC_STATE(cb, 39, r700SendCBState);
+ ALLOC_STATE(cb_target, 32, r700SendRenderTargetState);
+ ALLOC_STATE(sx, 9, r700SendSXState);
+ ALLOC_STATE(vgt, 41, r700SendVGTState);
+ ALLOC_STATE(spi, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
+ ALLOC_STATE(vpt, 16, r700SendViewportState);
+
+ context->radeon.hw.is_dirty = GL_TRUE;
+ context->radeon.hw.all_dirty = GL_TRUE;
+}