summaryrefslogtreecommitdiff
path: root/src/gallium/winsys/r600/drm/r600_hw_context.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/winsys/r600/drm/r600_hw_context.c')
-rw-r--r--src/gallium/winsys/r600/drm/r600_hw_context.c238
1 files changed, 164 insertions, 74 deletions
diff --git a/src/gallium/winsys/r600/drm/r600_hw_context.c b/src/gallium/winsys/r600/drm/r600_hw_context.c
index 0f2724f61c..a7c21784e5 100644
--- a/src/gallium/winsys/r600/drm/r600_hw_context.c
+++ b/src/gallium/winsys/r600/drm/r600_hw_context.c
@@ -28,39 +28,25 @@
#include <string.h>
#include <stdlib.h>
#include <assert.h>
+#include <pipe/p_compiler.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <pipebuffer/pb_bufmgr.h>
#include "xf86drm.h"
-#include "r600.h"
-#include "r600d.h"
#include "radeon_drm.h"
-#include "bof.h"
-#include "pipe/p_compiler.h"
-#include "util/u_inlines.h"
-#include "util/u_memory.h"
-#include <pipebuffer/pb_bufmgr.h>
#include "r600_priv.h"
+#include "bof.h"
+#include "r600d.h"
#define GROUP_FORCE_NEW_BLOCK 0
-int r600_context_init_fence(struct r600_context *ctx)
-{
- ctx->fence = 1;
- ctx->fence_bo = r600_bo(ctx->radeon, 4096, 0, 0, 0);
- if (ctx->fence_bo == NULL) {
- return -ENOMEM;
- }
- ctx->cfence = r600_bo_map(ctx->radeon, ctx->fence_bo, PB_USAGE_UNSYNCHRONIZED, NULL);
- *ctx->cfence = 0;
- LIST_INITHEAD(&ctx->fenced_bo);
- return 0;
-}
-
static void INLINE r600_context_update_fenced_list(struct r600_context *ctx)
{
for (int i = 0; i < ctx->creloc; i++) {
if (!LIST_IS_EMPTY(&ctx->bo[i]->fencedlist))
LIST_DELINIT(&ctx->bo[i]->fencedlist);
LIST_ADDTAIL(&ctx->bo[i]->fencedlist, &ctx->fenced_bo);
- ctx->bo[i]->fence = ctx->fence;
+ ctx->bo[i]->fence = ctx->radeon->fence;
ctx->bo[i]->ctx = ctx;
}
}
@@ -71,7 +57,7 @@ static void INLINE r600_context_fence_wraparound(struct r600_context *ctx, unsig
struct radeon_bo *tmp;
LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &ctx->fenced_bo, fencedlist) {
- if (bo->fence <= *ctx->cfence) {
+ if (bo->fence <= *ctx->radeon->cfence) {
LIST_DELINIT(&bo->fencedlist);
bo->fence = 0;
} else {
@@ -121,7 +107,7 @@ int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg,
/* initialize block */
block->start_offset = reg[i].offset;
- block->pm4[block->pm4_ndwords++] = PKT3(reg[i].opcode, n);
+ block->pm4[block->pm4_ndwords++] = PKT3(reg[i].opcode, n, 0);
block->pm4[block->pm4_ndwords++] = (block->start_offset - reg[i].offset_base) >> 2;
block->reg = &block->pm4[block->pm4_ndwords];
block->pm4_ndwords += n;
@@ -133,7 +119,7 @@ int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg,
block->nbo++;
assert(block->nbo < R600_BLOCK_MAX_BO);
block->pm4_bo_index[j] = block->nbo;
- block->pm4[block->pm4_ndwords++] = PKT3(PKT3_NOP, 0);
+ block->pm4[block->pm4_ndwords++] = PKT3(PKT3_NOP, 0, 0);
block->pm4[block->pm4_ndwords++] = 0x00000000;
block->reloc[block->nbo].flush_flags = reg[i+j].flush_flags;
block->reloc[block->nbo].flush_mask = reg[i+j].flush_mask;
@@ -632,9 +618,6 @@ void r600_context_fini(struct r600_context *ctx)
free(ctx->pm4);
r600_context_clear_fenced_bo(ctx);
- if (ctx->fence_bo) {
- r600_bo_reference(ctx->radeon, &ctx->fence_bo, NULL);
- }
memset(ctx, 0, sizeof(struct r600_context));
}
@@ -654,7 +637,8 @@ int r600_context_init(struct r600_context *ctx, struct radeon *radeon)
ctx->range[i].end_offset = ((i + 1) << ctx->hash_shift) - 1;
ctx->range[i].blocks = calloc(1 << ctx->hash_shift, sizeof(void*));
if (ctx->range[i].blocks == NULL) {
- return -ENOMEM;
+ r = -ENOMEM;
+ goto out_err;
}
}
@@ -763,19 +747,30 @@ int r600_context_init(struct r600_context *ctx, struct radeon *radeon)
/* save 16dwords space for fence mecanism */
ctx->pm4_ndwords -= 16;
- r = r600_context_init_fence(ctx);
- if (r) {
- goto out_err;
- }
+ LIST_INITHEAD(&ctx->fenced_bo);
/* init dirty list */
LIST_INITHEAD(&ctx->dirty);
+
+ ctx->max_db = 4;
+
return 0;
out_err:
r600_context_fini(ctx);
return r;
}
+static void rv6xx_context_surface_base_update(struct r600_context *ctx,
+ unsigned base_update_flags)
+{
+ /* need to emit surface base update on rv6xx */
+ if ((ctx->radeon->family > CHIP_R600) &&
+ (ctx->radeon->family < CHIP_RV770)) {
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = base_update_flags;
+ }
+}
+
void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
unsigned flush_mask, struct r600_bo *rbo)
{
@@ -787,12 +782,12 @@ void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
bo->last_flush &= flush_mask;
return;
}
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SURFACE_SYNC, 3);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SURFACE_SYNC, 3, ctx->predicate_drawing);
ctx->pm4[ctx->pm4_cdwords++] = flush_flags;
ctx->pm4[ctx->pm4_cdwords++] = (bo->size + 255) >> 8;
ctx->pm4[ctx->pm4_cdwords++] = 0x00000000;
ctx->pm4[ctx->pm4_cdwords++] = 0x0000000A;
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, ctx->predicate_drawing);
ctx->pm4[ctx->pm4_cdwords++] = bo->reloc_id;
bo->last_flush = (bo->last_flush | flush_flags) & flush_mask;
}
@@ -814,6 +809,7 @@ void r600_context_bo_reloc(struct r600_context *ctx, u32 *pm4, struct r600_bo *r
ctx->reloc[ctx->creloc].write_domain = rbo->domains & (RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM);
ctx->reloc[ctx->creloc].flags = 0;
radeon_bo_reference(ctx->radeon, &ctx->bo[ctx->creloc], bo);
+ rbo->fence = ctx->radeon->fence;
ctx->creloc++;
/* set PKT3 to point to proper reloc */
*pm4 = bo->reloc_id;
@@ -836,6 +832,7 @@ void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_stat
/* find relocation */
id = block->pm4_bo_index[id];
r600_bo_reference(ctx->radeon, &block->reloc[id].bo, state->regs[i].bo);
+ state->regs[i].bo->fence = ctx->radeon->fence;
}
if (!(block->status & R600_BLOCK_STATUS_DIRTY)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
@@ -875,10 +872,13 @@ static inline void r600_context_pipe_state_set_resource(struct r600_context *ctx
*/
r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[0].bo);
r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[0].bo);
+ state->regs[0].bo->fence = ctx->radeon->fence;
} else {
/* TEXTURE RESOURCE */
r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[2].bo);
r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[3].bo);
+ state->regs[2].bo->fence = ctx->radeon->fence;
+ state->regs[3].bo->fence = ctx->radeon->fence;
}
if (!(block->status & R600_BLOCK_STATUS_DIRTY)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
@@ -1002,6 +1002,7 @@ void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw)
unsigned ndwords = 9;
struct r600_block *dirty_block = NULL;
struct r600_block *next_block;
+ unsigned rv6xx_surface_base_update = 0;
if (draw->indices) {
ndwords = 13;
@@ -1024,10 +1025,14 @@ void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw)
for (int i = 0; i < 8; i++) {
if (cb[i]) {
ndwords += 7;
+ rv6xx_surface_base_update |= SURFACE_BASE_UPDATE_COLOR(i);
}
}
- if (db)
+ if (db) {
ndwords += 7;
+ rv6xx_surface_base_update |= SURFACE_BASE_UPDATE_DEPTH;
+ }
+ /* XXX also need to update SURFACE_BASE_UPDATE_STRMOUT when we support it */
/* queries need some special values */
if (ctx->num_query_running) {
@@ -1054,30 +1059,34 @@ void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw)
}
/* enough room to copy packet */
- LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &ctx->dirty,list) {
+ LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &ctx->dirty, list) {
r600_context_block_emit_dirty(ctx, dirty_block);
}
+ /* rv6xx surface base udpate */
+ if (rv6xx_surface_base_update)
+ rv6xx_context_surface_base_update(ctx, rv6xx_surface_base_update);
+
/* draw packet */
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_INDEX_TYPE, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_INDEX_TYPE, 0, ctx->predicate_drawing);
ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_index_type;
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NUM_INSTANCES, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NUM_INSTANCES, 0, ctx->predicate_drawing);
ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_num_instances;
if (draw->indices) {
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_DRAW_INDEX, 3);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_DRAW_INDEX, 3, ctx->predicate_drawing);
ctx->pm4[ctx->pm4_cdwords++] = draw->indices_bo_offset + r600_bo_offset(draw->indices);
ctx->pm4[ctx->pm4_cdwords++] = 0;
ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_num_indices;
ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_draw_initiator;
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, ctx->predicate_drawing);
ctx->pm4[ctx->pm4_cdwords++] = 0;
r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], draw->indices);
} else {
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, ctx->predicate_drawing);
ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_num_indices;
ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_draw_initiator;
}
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0, ctx->predicate_drawing);
ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
/* flush color buffer */
@@ -1099,7 +1108,7 @@ void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw)
void r600_context_flush(struct r600_context *ctx)
{
- struct drm_radeon_cs drmib;
+ struct drm_radeon_cs drmib = {};
struct drm_radeon_cs_chunk chunks[2];
uint64_t chunk_array[2];
unsigned fence;
@@ -1112,17 +1121,17 @@ void r600_context_flush(struct r600_context *ctx)
r600_context_queries_suspend(ctx);
/* emit fence */
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
ctx->pm4[ctx->pm4_cdwords++] = 0;
ctx->pm4[ctx->pm4_cdwords++] = (1 << 29) | (0 << 24);
- ctx->pm4[ctx->pm4_cdwords++] = ctx->fence;
+ ctx->pm4[ctx->pm4_cdwords++] = ctx->radeon->fence;
ctx->pm4[ctx->pm4_cdwords++] = 0;
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
ctx->pm4[ctx->pm4_cdwords++] = 0;
- r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], ctx->fence_bo);
+ r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], ctx->radeon->fence_bo);
#if 1
/* emit cs */
@@ -1139,18 +1148,18 @@ void r600_context_flush(struct r600_context *ctx)
r = drmCommandWriteRead(ctx->radeon->fd, DRM_RADEON_CS, &drmib,
sizeof(struct drm_radeon_cs));
#else
- *ctx->cfence = ctx->fence;
+ *ctx->radeon->cfence = ctx->radeon->fence;
#endif
r600_context_update_fenced_list(ctx);
- fence = ctx->fence + 1;
- if (fence < ctx->fence) {
+ fence = ctx->radeon->fence + 1;
+ if (fence < ctx->radeon->fence) {
/* wrap around */
fence = 1;
r600_context_fence_wraparound(ctx, fence);
}
- ctx->fence = fence;
+ ctx->radeon->fence = fence;
/* restart */
for (int i = 0; i < ctx->creloc; i++) {
@@ -1261,44 +1270,90 @@ out_err:
bof_decref(root);
}
-static void r600_query_result(struct r600_context *ctx, struct r600_query *query)
+static boolean r600_query_result(struct r600_context *ctx, struct r600_query *query, boolean wait)
{
u64 start, end;
u32 *results;
int i;
+ int size;
+
+ if (wait)
+ results = r600_bo_map(ctx->radeon, query->buffer, PB_USAGE_CPU_READ, NULL);
+ else
+ results = r600_bo_map(ctx->radeon, query->buffer, PB_USAGE_DONTBLOCK | PB_USAGE_CPU_READ, NULL);
+ if (!results)
+ return FALSE;
- results = r600_bo_map(ctx->radeon, query->buffer, 0, NULL);
- for (i = 0; i < query->num_results; i += 4) {
+ size = query->num_results * (query->type == PIPE_QUERY_OCCLUSION_COUNTER ? ctx->max_db : 1);
+ for (i = 0; i < size; i += 4) {
start = (u64)results[i] | (u64)results[i + 1] << 32;
end = (u64)results[i + 2] | (u64)results[i + 3] << 32;
- if ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL)) {
+ if (((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))
+ || query->type == PIPE_QUERY_TIME_ELAPSED) {
query->result += end - start;
}
}
r600_bo_unmap(ctx->radeon, query->buffer);
query->num_results = 0;
+
+ return TRUE;
}
void r600_query_begin(struct r600_context *ctx, struct r600_query *query)
{
- /* query request needs 6 dwords for begin + 6 dwords for end */
- if ((12 + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
+ unsigned required_space;
+ int num_backends = r600_get_num_backends(ctx->radeon);
+
+ /* query request needs 6/8 dwords for begin + 6/8 dwords for end */
+ if (query->type == PIPE_QUERY_TIME_ELAPSED)
+ required_space = 16;
+ else
+ required_space = 12;
+
+ if ((required_space + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
/* need to flush */
r600_context_flush(ctx);
}
/* if query buffer is full force a flush */
- if (query->num_results >= ((query->buffer_size >> 2) - 2)) {
+ if (query->num_results*4 >= query->buffer_size - 16) {
r600_context_flush(ctx);
- r600_query_result(ctx, query);
+ r600_query_result(ctx, query, TRUE);
}
+ if (query->type == PIPE_QUERY_OCCLUSION_COUNTER &&
+ num_backends > 0 && num_backends < ctx->max_db) {
+ /* as per info on ZPASS the driver must set the unusued DB top bits */
+ u32 *results;
+ int i;
+
+ results = r600_bo_map(ctx->radeon, query->buffer, PB_USAGE_DONTBLOCK | PB_USAGE_CPU_WRITE, NULL);
+ if (results) {
+ memset(results + (query->num_results * 4), 0, ctx->max_db * 4 * 4);
+
+ for (i = num_backends; i < ctx->max_db; i++) {
+ results[(i * 4)+1] = 0x80000000;
+ results[(i * 4)+3] = 0x80000000;
+ }
+ r600_bo_unmap(ctx->radeon, query->buffer);
+ }
+ }
+
/* emit begin query */
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 2);
- ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
- ctx->pm4[ctx->pm4_cdwords++] = query->num_results + r600_bo_offset(query->buffer);
- ctx->pm4[ctx->pm4_cdwords++] = 0;
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0);
+ if (query->type == PIPE_QUERY_TIME_ELAPSED) {
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
+ ctx->pm4[ctx->pm4_cdwords++] = query->num_results*4 + r600_bo_offset(query->buffer);
+ ctx->pm4[ctx->pm4_cdwords++] = (3 << 29);
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ } else {
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
+ ctx->pm4[ctx->pm4_cdwords++] = query->num_results*4 + r600_bo_offset(query->buffer);
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ }
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
ctx->pm4[ctx->pm4_cdwords++] = 0;
r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], query->buffer);
@@ -1310,25 +1365,56 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query)
void r600_query_end(struct r600_context *ctx, struct r600_query *query)
{
/* emit begin query */
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 2);
- ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
- ctx->pm4[ctx->pm4_cdwords++] = query->num_results + 8 + r600_bo_offset(query->buffer);
- ctx->pm4[ctx->pm4_cdwords++] = 0;
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0);
+ if (query->type == PIPE_QUERY_TIME_ELAPSED) {
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
+ ctx->pm4[ctx->pm4_cdwords++] = query->num_results*4 + 8 + r600_bo_offset(query->buffer);
+ ctx->pm4[ctx->pm4_cdwords++] = (3 << 29);
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ } else {
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
+ ctx->pm4[ctx->pm4_cdwords++] = query->num_results*4 + 8 + r600_bo_offset(query->buffer);
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ }
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
ctx->pm4[ctx->pm4_cdwords++] = 0;
r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], query->buffer);
- query->num_results += 16;
+ query->num_results += 4 * (query->type == PIPE_QUERY_OCCLUSION_COUNTER ? ctx->max_db : 1);
query->state ^= R600_QUERY_STATE_STARTED;
query->state |= R600_QUERY_STATE_ENDED;
ctx->num_query_running--;
}
+void r600_query_predication(struct r600_context *ctx, struct r600_query *query, int operation,
+ int flag_wait)
+{
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SET_PREDICATION, 1, 0);
+
+ if (operation == PREDICATION_OP_CLEAR) {
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ ctx->pm4[ctx->pm4_cdwords++] = PRED_OP(PREDICATION_OP_CLEAR);
+ } else {
+ int results_base = query->num_results - (4 * ctx->max_db);
+
+ if (results_base < 0)
+ results_base = 0;
+
+ ctx->pm4[ctx->pm4_cdwords++] = results_base*4 + r600_bo_offset(query->buffer);
+ ctx->pm4[ctx->pm4_cdwords++] = PRED_OP(operation) | (flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW) | PREDICATION_DRAW_VISIBLE;
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], query->buffer);
+ }
+}
+
struct r600_query *r600_context_query_create(struct r600_context *ctx, unsigned query_type)
{
struct r600_query *query;
- if (query_type != PIPE_QUERY_OCCLUSION_COUNTER)
+ if (query_type != PIPE_QUERY_OCCLUSION_COUNTER && query_type != PIPE_QUERY_TIME_ELAPSED)
return NULL;
query = calloc(1, sizeof(struct r600_query));
@@ -1370,8 +1456,12 @@ boolean r600_context_query_result(struct r600_context *ctx,
if (query->num_results) {
r600_context_flush(ctx);
}
- r600_query_result(ctx, query);
- *result = query->result;
+ if (!r600_query_result(ctx, query, wait))
+ return FALSE;
+ if (query->type == PIPE_QUERY_TIME_ELAPSED)
+ *result = (1000000*query->result)/r600_get_clock_crystal_freq(ctx->radeon);
+ else
+ *result = query->result;
query->result = 0;
return TRUE;
}