/* * Copyright 2008 Corbin Simpson * Copyright 2009 Marek Olšák * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * on the rights to use, copy, modify, merge, publish, distribute, sub * license, and/or sell copies of the Software, and to permit persons to whom * the Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "draw/draw_context.h" #include "util/u_math.h" #include "util/u_memory.h" #include "r300_context.h" #include "r300_fs.h" #include "r300_screen.h" #include "r300_shader_semantics.h" #include "r300_state_derived.h" #include "r300_state_inlines.h" #include "r300_vs.h" /* r300_state_derived: Various bits of state which are dependent upon * currently bound CSO data. */ static void r300_rs_col(struct r300_rs_block* rs, int id, int ptr, boolean swizzle_0001) { rs->ip[id] |= R300_RS_COL_PTR(ptr); if (swizzle_0001) { rs->ip[id] |= R300_RS_COL_FMT(R300_RS_COL_FMT_0001); } else { rs->ip[id] |= R300_RS_COL_FMT(R300_RS_COL_FMT_RGBA); } rs->inst[id] |= R300_RS_INST_COL_ID(id); } static void r300_rs_col_write(struct r300_rs_block* rs, int id, int fp_offset) { rs->inst[id] |= R300_RS_INST_COL_CN_WRITE | R300_RS_INST_COL_ADDR(fp_offset); } static void r300_rs_tex(struct r300_rs_block* rs, int id, int ptr, boolean swizzle_X001) { if (swizzle_X001) { rs->ip[id] |= R300_RS_TEX_PTR(ptr*4) | R300_RS_SEL_S(R300_RS_SEL_C0) | R300_RS_SEL_T(R300_RS_SEL_K0) | R300_RS_SEL_R(R300_RS_SEL_K0) | R300_RS_SEL_Q(R300_RS_SEL_K1); } else { rs->ip[id] |= R300_RS_TEX_PTR(ptr*4) | R300_RS_SEL_S(R300_RS_SEL_C0) | R300_RS_SEL_T(R300_RS_SEL_C1) | R300_RS_SEL_R(R300_RS_SEL_C2) | R300_RS_SEL_Q(R300_RS_SEL_C3); } rs->inst[id] |= R300_RS_INST_TEX_ID(id); } static void r300_rs_tex_write(struct r300_rs_block* rs, int id, int fp_offset) { rs->inst[id] |= R300_RS_INST_TEX_CN_WRITE | R300_RS_INST_TEX_ADDR(fp_offset); } static void r500_rs_col(struct r300_rs_block* rs, int id, int ptr, boolean swizzle_0001) { rs->ip[id] |= R500_RS_COL_PTR(ptr); if (swizzle_0001) { rs->ip[id] |= R500_RS_COL_FMT(R300_RS_COL_FMT_0001); } else { rs->ip[id] |= R500_RS_COL_FMT(R300_RS_COL_FMT_RGBA); } rs->inst[id] |= R500_RS_INST_COL_ID(id); } static void r500_rs_col_write(struct r300_rs_block* rs, int id, int fp_offset) { rs->inst[id] |= R500_RS_INST_COL_CN_WRITE | R500_RS_INST_COL_ADDR(fp_offset); } static void r500_rs_tex(struct r300_rs_block* rs, int id, int ptr, boolean swizzle_X001) { int rs_tex_comp = ptr*4; if (swizzle_X001) { rs->ip[id] |= R500_RS_SEL_S(rs_tex_comp) | R500_RS_SEL_T(R500_RS_IP_PTR_K0) | R500_RS_SEL_R(R500_RS_IP_PTR_K0) | R500_RS_SEL_Q(R500_RS_IP_PTR_K1); } else { rs->ip[id] |= R500_RS_SEL_S(rs_tex_comp) | R500_RS_SEL_T(rs_tex_comp + 1) | R500_RS_SEL_R(rs_tex_comp + 2) | R500_RS_SEL_Q(rs_tex_comp + 3); } rs->inst[id] |= R500_RS_INST_TEX_ID(id); } static void r500_rs_tex_write(struct r300_rs_block* rs, int id, int fp_offset) { rs->inst[id] |= R500_RS_INST_TEX_CN_WRITE | R500_RS_INST_TEX_ADDR(fp_offset); } /* Set up the RS block. * * This is the part of the chipset that actually does the rasterization * of vertices into fragments. This is also the part of the chipset that * locks up if any part of it is even slightly wrong. */ static void r300_update_rs_block(struct r300_context* r300, struct r300_shader_semantics* vs_outputs, struct r300_shader_semantics* fs_inputs) { struct r300_rs_block rs = { { 0 } }; int i, col_count = 0, tex_count = 0, fp_offset = 0, count; void (*rX00_rs_col)(struct r300_rs_block*, int, int, boolean); void (*rX00_rs_col_write)(struct r300_rs_block*, int, int); void (*rX00_rs_tex)(struct r300_rs_block*, int, int, boolean); void (*rX00_rs_tex_write)(struct r300_rs_block*, int, int); boolean any_bcolor_used = vs_outputs->bcolor[0] != ATTR_UNUSED || vs_outputs->bcolor[1] != ATTR_UNUSED; if (r300_screen(r300->context.screen)->caps->is_r500) { rX00_rs_col = r500_rs_col; rX00_rs_col_write = r500_rs_col_write; rX00_rs_tex = r500_rs_tex; rX00_rs_tex_write = r500_rs_tex_write; } else { rX00_rs_col = r300_rs_col; rX00_rs_col_write = r300_rs_col_write; rX00_rs_tex = r300_rs_tex; rX00_rs_tex_write = r300_rs_tex_write; } /* Rasterize colors. */ for (i = 0; i < ATTR_COLOR_COUNT; i++) { if (vs_outputs->color[i] != ATTR_UNUSED || any_bcolor_used || vs_outputs->color[1] != ATTR_UNUSED) { /* Always rasterize if it's written by the VS, * otherwise it locks up. */ rX00_rs_col(&rs, col_count, i, FALSE); /* Write it to the FS input register if it's used by the FS. */ if (fs_inputs->color[i] != ATTR_UNUSED) { rX00_rs_col_write(&rs, col_count, fp_offset); fp_offset++; } col_count++; } else { /* Skip the FS input register, leave it uninitialized. */ /* If we try to set it to (0,0,0,1), it will lock up. */ if (fs_inputs->color[i] != ATTR_UNUSED) { fp_offset++; } } } /* Rasterize texture coordinates. */ for (i = 0; i < ATTR_GENERIC_COUNT; i++) { if (vs_outputs->generic[i] != ATTR_UNUSED) { /* Always rasterize if it's written by the VS, * otherwise it locks up. */ rX00_rs_tex(&rs, tex_count, tex_count, FALSE); /* Write it to the FS input register if it's used by the FS. */ if (fs_inputs->generic[i] != ATTR_UNUSED) { rX00_rs_tex_write(&rs, tex_count, fp_offset); fp_offset++; } tex_count++; } else { /* Skip the FS input register, leave it uninitialized. */ /* If we try to set it to (0,0,0,1), it will lock up. */ if (fs_inputs->generic[i] != ATTR_UNUSED) { fp_offset++; } } } /* Rasterize fog coordinates. */ if (vs_outputs->fog != ATTR_UNUSED) { /* Always rasterize if it's written by the VS, * otherwise it locks up. */ rX00_rs_tex(&rs, tex_count, tex_count, TRUE); /* Write it to the FS input register if it's used by the FS. */ if (fs_inputs->fog != ATTR_UNUSED) { rX00_rs_tex_write(&rs, tex_count, fp_offset); fp_offset++; } tex_count++; } else { /* Skip the FS input register, leave it uninitialized. */ /* If we try to set it to (0,0,0,1), it will lock up. */ if (fs_inputs->fog != ATTR_UNUSED) { fp_offset++; } } /* Rasterize WPOS. */ /* If the FS doesn't need it, it's not written by the VS. */ if (fs_inputs->wpos != ATTR_UNUSED) { rX00_rs_tex(&rs, tex_count, tex_count, FALSE); rX00_rs_tex_write(&rs, tex_count, fp_offset); fp_offset++; tex_count++; } /* Rasterize at least one color, or bad things happen. */ if (col_count == 0 && tex_count == 0) { rX00_rs_col(&rs, 0, 0, TRUE); col_count++; } rs.count = (tex_count*4) | (col_count << R300_IC_COUNT_SHIFT) | R300_HIRES_EN; count = MAX3(col_count, tex_count, 1); rs.inst_count = count - 1; /* Now, after all that, see if we actually need to update the state. */ if (memcmp(r300->rs_block_state.state, &rs, sizeof(struct r300_rs_block))) { memcpy(r300->rs_block_state.state, &rs, sizeof(struct r300_rs_block)); r300->rs_block_state.size = 5 + count*2; } } /* Update the shader-dependant states. */ static void r300_update_derived_shader_state(struct r300_context* r300) { struct r300_vertex_shader* vs = r300->vs_state.state; r300_update_rs_block(r300, &vs->outputs, &r300->fs->inputs); } static boolean r300_dsa_writes_depth_stencil(struct r300_dsa_state* dsa) { /* We are interested only in the cases when a new depth or stencil value * can be written and changed. */ /* We might optionally check for [Z func: never] and inspect the stencil * state in a similar fashion, but it's not terribly important. */ return (dsa->z_buffer_control & R300_Z_WRITE_ENABLE) || (dsa->stencil_ref_mask & R300_STENCILWRITEMASK_MASK) || ((dsa->z_buffer_control & R500_STENCIL_REFMASK_FRONT_BACK) && (dsa->stencil_ref_bf & R300_STENCILWRITEMASK_MASK)); } static boolean r300_dsa_alpha_test_enabled(struct r300_dsa_state* dsa) { /* We are interested only in the cases when alpha testing can kill * a fragment. */ uint32_t af = dsa->alpha_function; return (af & R300_FG_ALPHA_FUNC_ENABLE) && (af & R300_FG_ALPHA_FUNC_ALWAYS) != R300_FG_ALPHA_FUNC_ALWAYS; } static void r300_update_ztop(struct r300_context* r300) { struct r300_ztop_state* ztop_state = (struct r300_ztop_state*)r300->ztop_state.state; /* This is important enough that I felt it warranted a comment. * * According to the docs, these are the conditions where ZTOP must be * disabled: * 1) Alpha testing enabled * 2) Texture kill instructions in fragment shader * 3) Chroma key culling enabled * 4) W-buffering enabled * * The docs claim that for the first three cases, if no ZS writes happen, * then ZTOP can be used. * * (3) will never apply since we do not support chroma-keyed operations. * (4) will need to be re-examined (and this comment updated) if/when * Hyper-Z becomes supported. * * Additionally, the following conditions require disabled ZTOP: * 5) Depth writes in fragment shader * 6) Outstanding occlusion queries * * This register causes stalls all the way from SC to CB when changed, * but it is buffered on-chip so it does not hurt to write it if it has * not changed. * * ~C. */ /* ZS writes */ if (r300_dsa_writes_depth_stencil(r300->dsa_state.state) && (r300_dsa_alpha_test_enabled(r300->dsa_state.state) ||/* (1) */ r300->fs->info.uses_kill)) { /* (2) */ ztop_state->z_buffer_top = R300_ZTOP_DISABLE; } else if (r300_fragment_shader_writes_depth(r300->fs)) { /* (5) */ ztop_state->z_buffer_top = R300_ZTOP_DISABLE; } else if (r300->query_current) { /* (6) */ ztop_state->z_buffer_top = R300_ZTOP_DISABLE; } else { ztop_state->z_buffer_top = R300_ZTOP_ENABLE; } r300->ztop_state.dirty = TRUE; } static void r300_merge_textures_and_samplers(struct r300_context* r300) { struct r300_textures_state *state = (struct r300_textures_state*)r300->textures_state.state; struct r300_texture_sampler_state *texstate; struct r300_sampler_state *sampler; struct r300_texture *tex; unsigned min_level, max_level, i, size; unsigned count = MIN2(state->texture_count, state->sampler_count); state->tx_enable = 0; size = 2; for (i = 0; i < count; i++) { if (state->textures[i] && state->sampler_states[i]) { state->tx_enable |= 1 << i; tex = state->textures[i]; sampler = state->sampler_states[i]; texstate = &state->regs[i]; memcpy(texstate->format, &tex->state, sizeof(uint32_t)*3); texstate->filter[0] = sampler->filter0; texstate->filter[1] = sampler->filter1; texstate->border_color = sampler->border_color; texstate->tile_config = R300_TXO_MACRO_TILE(tex->macrotile) | R300_TXO_MICRO_TILE(tex->microtile); /* to emulate 1D textures through 2D ones correctly */ if (tex->tex.target == PIPE_TEXTURE_1D) { texstate->filter[0] &= ~R300_TX_WRAP_T_MASK; texstate->filter[0] |= R300_TX_WRAP_T(R300_TX_CLAMP_TO_EDGE); } if (tex->is_npot) { /* NPOT textures don't support mip filter, unfortunately. * This prevents incorrect rendering. */ texstate->filter[0] &= ~R300_TX_MIN_FILTER_MIP_MASK; } else { /* determine min/max levels */ /* the MAX_MIP level is the largest (finest) one */ max_level = MIN2(sampler->max_lod, tex->tex.last_level); min_level = MIN2(sampler->min_lod, max_level); texstate->format[0] |= R300_TX_NUM_LEVELS(max_level); texstate->filter[0] |= R300_TX_MAX_MIP_LEVEL(min_level); } texstate->filter[0] |= i << 28; size += 16; state->count = i+1; } } r300->textures_state.size = size; } void r300_update_derived_state(struct r300_context* r300) { if (r300->rs_block_state.dirty) { r300_update_derived_shader_state(r300); } if (r300->textures_state.dirty) { r300_merge_textures_and_samplers(r300); } r300_update_ztop(r300); }