summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/i965/brw_vtbl.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/i965/brw_vtbl.c')
-rw-r--r--src/mesa/drivers/dri/i965/brw_vtbl.c177
1 files changed, 117 insertions, 60 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_vtbl.c b/src/mesa/drivers/dri/i965/brw_vtbl.c
index 786f30e641..15ceac6ba1 100644
--- a/src/mesa/drivers/dri/i965/brw_vtbl.c
+++ b/src/mesa/drivers/dri/i965/brw_vtbl.c
@@ -23,20 +23,18 @@
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- **********************************************************************/
- /*
- * Authors:
- * Keith Whitwell <keith@tungstengraphics.com>
- */
-
-
+**********************************************************************/
+/*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
-#include "glheader.h"
-#include "mtypes.h"
-#include "imports.h"
-#include "macros.h"
-#include "colormac.h"
+#include "main/glheader.h"
+#include "main/mtypes.h"
+#include "main/imports.h"
+#include "main/macros.h"
+#include "main/colormac.h"
#include "intel_batchbuffer.h"
#include "intel_regions.h"
@@ -44,88 +42,149 @@
#include "brw_context.h"
#include "brw_defines.h"
#include "brw_state.h"
-
#include "brw_draw.h"
#include "brw_state.h"
-#include "brw_aub.h"
#include "brw_fallback.h"
#include "brw_vs.h"
+static void
+dri_bo_release(dri_bo **bo)
+{
+ dri_bo_unreference(*bo);
+ *bo = NULL;
+}
+
-/* called from intelDestroyContext()
+/**
+ * called from intelDestroyContext()
*/
static void brw_destroy_context( struct intel_context *intel )
{
- GLcontext *ctx = &intel->ctx;
struct brw_context *brw = brw_context(&intel->ctx);
+ int i;
- brw_aub_destroy(brw);
-
- brw_destroy_metaops(brw);
brw_destroy_state(brw);
brw_draw_destroy( brw );
- brw_ProgramCacheDestroy( ctx );
+ _mesa_free(brw->wm.compile_data);
+
brw_FrameBufferTexDestroy( brw );
+
+ for (i = 0; i < brw->state.nr_draw_regions; i++)
+ intel_region_release(&brw->state.draw_regions[i]);
+ brw->state.nr_draw_regions = 0;
+ intel_region_release(&brw->state.depth_region);
+
+ dri_bo_release(&brw->curbe.curbe_bo);
+ dri_bo_release(&brw->vs.prog_bo);
+ dri_bo_release(&brw->vs.state_bo);
+ dri_bo_release(&brw->gs.prog_bo);
+ dri_bo_release(&brw->gs.state_bo);
+ dri_bo_release(&brw->clip.prog_bo);
+ dri_bo_release(&brw->clip.state_bo);
+ dri_bo_release(&brw->clip.vp_bo);
+ dri_bo_release(&brw->sf.prog_bo);
+ dri_bo_release(&brw->sf.state_bo);
+ dri_bo_release(&brw->sf.vp_bo);
+ for (i = 0; i < BRW_MAX_TEX_UNIT; i++)
+ dri_bo_release(&brw->wm.sdc_bo[i]);
+ dri_bo_release(&brw->wm.bind_bo);
+ for (i = 0; i < BRW_WM_MAX_SURF; i++)
+ dri_bo_release(&brw->wm.surf_bo[i]);
+ dri_bo_release(&brw->wm.sampler_bo);
+ dri_bo_release(&brw->wm.prog_bo);
+ dri_bo_release(&brw->wm.state_bo);
+ dri_bo_release(&brw->cc.prog_bo);
+ dri_bo_release(&brw->cc.state_bo);
+ dri_bo_release(&brw->cc.vp_bo);
}
-/* called from intelDrawBuffer()
+
+/**
+ * called from intelDrawBuffer()
*/
static void brw_set_draw_region( struct intel_context *intel,
- struct intel_region *draw_region,
- struct intel_region *depth_region)
+ struct intel_region *draw_regions[],
+ struct intel_region *depth_region,
+ GLuint num_regions)
{
struct brw_context *brw = brw_context(&intel->ctx);
-
- intel_region_release(intel, &brw->state.draw_region);
- intel_region_release(intel, &brw->state.depth_region);
- intel_region_reference(&brw->state.draw_region, draw_region);
+ int i;
+
+ /* release old color/depth regions */
+ if (brw->state.depth_region != depth_region)
+ brw->state.dirty.brw |= BRW_NEW_DEPTH_BUFFER;
+ for (i = 0; i < brw->state.nr_draw_regions; i++)
+ intel_region_release(&brw->state.draw_regions[i]);
+ intel_region_release(&brw->state.depth_region);
+
+ /* reference new color/depth regions */
+ for (i = 0; i < num_regions; i++)
+ intel_region_reference(&brw->state.draw_regions[i], draw_regions[i]);
intel_region_reference(&brw->state.depth_region, depth_region);
+ brw->state.nr_draw_regions = num_regions;
}
-/* called from intelFlushBatchLocked
+/**
+ * called from intel_batchbuffer_flush and children before sending a
+ * batchbuffer off.
*/
-static void brw_lost_hardware( struct intel_context *intel )
+static void brw_finish_batch(struct intel_context *intel)
{
struct brw_context *brw = brw_context(&intel->ctx);
+ brw_emit_query_end(brw);
+}
- /* Note that we effectively lose the context after this.
- *
- * Setting this flag provokes a state buffer wrap and also flushes
- * the hardware caches.
- */
- brw->state.dirty.brw |= BRW_NEW_CONTEXT;
- /* Which means there shouldn't be any commands already queued:
+/**
+ * called from intelFlushBatchLocked
+ */
+static void brw_new_batch( struct intel_context *intel )
+{
+ struct brw_context *brw = brw_context(&intel->ctx);
+
+ /* Check that we didn't just wrap our batchbuffer at a bad time. */
+ assert(!brw->no_batch_wrap);
+
+ brw->curbe.need_new_bo = GL_TRUE;
+
+ /* Mark all context state as needing to be re-emitted.
+ * This is probably not as severe as on 915, since almost all of our state
+ * is just in referenced buffers.
*/
- assert(intel->batch->ptr == intel->batch->map + intel->batch->offset);
+ brw->state.dirty.brw |= BRW_NEW_CONTEXT;
brw->state.dirty.mesa |= ~0;
brw->state.dirty.brw |= ~0;
brw->state.dirty.cache |= ~0;
+
+ /* Move to the end of the current upload buffer so that we'll force choosing
+ * a new buffer next time.
+ */
+ if (brw->vb.upload.bo != NULL) {
+ dri_bo_unreference(brw->vb.upload.bo);
+ brw->vb.upload.bo = NULL;
+ brw->vb.upload.offset = 0;
+ }
}
-static void brw_note_fence( struct intel_context *intel,
- GLuint fence )
+
+static void brw_note_fence( struct intel_context *intel, GLuint fence )
{
brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_FENCE;
}
-
-static void brw_note_unlock( struct intel_context *intel )
-{
- struct brw_context *brw = brw_context(&intel->ctx);
- brw_pool_check_wrap(brw, &brw->pool[BRW_GS_POOL]);
- brw_pool_check_wrap(brw, &brw->pool[BRW_SS_POOL]);
- brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_LOCK;
+static void brw_note_unlock( struct intel_context *intel )
+{
+ struct brw_context *brw = brw_context(&intel->ctx);
+ brw_state_cache_check_size(brw);
}
-void brw_do_flush( struct brw_context *brw,
- GLuint flags )
+void brw_do_flush( struct brw_context *brw, GLuint flags )
{
struct brw_mi_flush flush;
memset(&flush, 0, sizeof(flush));
@@ -135,8 +194,7 @@ void brw_do_flush( struct brw_context *brw,
}
-static void brw_emit_flush( struct intel_context *intel,
- GLuint unused )
+static void brw_emit_flush( struct intel_context *intel, GLuint unused )
{
brw_do_flush(brw_context(&intel->ctx),
BRW_FLUSH_STATE_CACHE|BRW_FLUSH_READ_CACHE);
@@ -157,8 +215,6 @@ static GLuint brw_flush_cmd( void )
}
-
-
static void brw_invalidate_state( struct intel_context *intel, GLuint new_state )
{
/* nothing */
@@ -168,18 +224,19 @@ static void brw_invalidate_state( struct intel_context *intel, GLuint new_state
void brwInitVtbl( struct brw_context *brw )
{
brw->intel.vtbl.check_vertex_size = 0;
- brw->intel.vtbl.emit_state = 0;
- brw->intel.vtbl.reduced_primitive_state = 0;
+ brw->intel.vtbl.emit_state = 0;
+ brw->intel.vtbl.reduced_primitive_state = 0;
brw->intel.vtbl.render_start = 0;
- brw->intel.vtbl.update_texture_state = 0;
+ brw->intel.vtbl.update_texture_state = 0;
- brw->intel.vtbl.invalidate_state = brw_invalidate_state;
- brw->intel.vtbl.note_fence = brw_note_fence;
- brw->intel.vtbl.note_unlock = brw_note_unlock;
- brw->intel.vtbl.lost_hardware = brw_lost_hardware;
+ brw->intel.vtbl.invalidate_state = brw_invalidate_state;
+ brw->intel.vtbl.note_fence = brw_note_fence;
+ brw->intel.vtbl.note_unlock = brw_note_unlock;
+ brw->intel.vtbl.new_batch = brw_new_batch;
+ brw->intel.vtbl.finish_batch = brw_finish_batch;
brw->intel.vtbl.destroy = brw_destroy_context;
brw->intel.vtbl.set_draw_region = brw_set_draw_region;
brw->intel.vtbl.flush_cmd = brw_flush_cmd;
brw->intel.vtbl.emit_flush = brw_emit_flush;
+ brw->intel.vtbl.debug_batch = brw_debug_batch;
}
-