summaryrefslogtreecommitdiff
path: root/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c')
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c143
1 files changed, 118 insertions, 25 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
index f9e6226436..1bdf7a0b2d 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -36,14 +36,14 @@
#include "pipe/p_config.h"
-#if defined(PIPE_OS_LINUX)
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD)
#include <unistd.h>
#include <sched.h>
#endif
#include "pipe/p_compiler.h"
#include "pipe/p_error.h"
-#include "pipe/p_debug.h"
+#include "util/u_debug.h"
#include "pipe/p_thread.h"
#include "util/u_memory.h"
#include "util/u_double_list.h"
@@ -66,8 +66,12 @@ struct fenced_buffer_list
struct pb_fence_ops *ops;
size_t numDelayed;
-
struct list_head delayed;
+
+#ifdef DEBUG
+ size_t numUnfenced;
+ struct list_head unfenced;
+#endif
};
@@ -111,12 +115,15 @@ _fenced_buffer_add(struct fenced_buffer *fenced_buf)
{
struct fenced_buffer_list *fenced_list = fenced_buf->list;
- assert(fenced_buf->base.base.refcount);
+ assert(p_atomic_read(&fenced_buf->base.base.reference.count));
assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
assert(fenced_buf->fence);
- assert(!fenced_buf->head.prev);
- assert(!fenced_buf->head.next);
+#ifdef DEBUG
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_list->numUnfenced);
+ --fenced_list->numUnfenced;
+#endif
LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
++fenced_list->numDelayed;
}
@@ -128,8 +135,19 @@ _fenced_buffer_add(struct fenced_buffer *fenced_buf)
static INLINE void
_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
{
- assert(!fenced_buf->base.base.refcount);
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+
+ assert(p_atomic_read(&fenced_buf->base.base.reference.count) == 0);
assert(!fenced_buf->fence);
+#ifdef DEBUG
+ assert(fenced_buf->head.prev);
+ assert(fenced_buf->head.next);
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_list->numUnfenced);
+ --fenced_list->numUnfenced;
+#else
+ (void)fenced_list;
+#endif
pb_reference(&fenced_buf->buffer, NULL);
FREE(fenced_buf);
}
@@ -149,16 +167,21 @@ _fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
assert(fenced_buf->head.prev);
assert(fenced_buf->head.next);
- LIST_DEL(&fenced_buf->head);
-#ifdef DEBUG
- fenced_buf->head.prev = NULL;
- fenced_buf->head.next = NULL;
-#endif
+ LIST_DEL(&fenced_buf->head);
assert(fenced_list->numDelayed);
--fenced_list->numDelayed;
- if(!fenced_buf->base.base.refcount)
+#ifdef DEBUG
+ LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
+ ++fenced_list->numUnfenced;
+#endif
+
+ /**
+ * FIXME!!!
+ */
+
+ if(!p_atomic_read(&fenced_buf->base.base.reference.count))
_fenced_buffer_destroy(fenced_buf);
}
@@ -234,7 +257,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
struct fenced_buffer_list *fenced_list = fenced_buf->list;
pipe_mutex_lock(fenced_list->mutex);
- assert(fenced_buf->base.base.refcount == 0);
+ assert(p_atomic_read(&fenced_buf->base.base.reference.count) == 0);
if (fenced_buf->fence) {
struct pb_fence_ops *ops = fenced_list->ops;
if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
@@ -265,16 +288,26 @@ fenced_buffer_map(struct pb_buffer *buf,
unsigned flags)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+ struct pb_fence_ops *ops = fenced_list->ops;
void *map;
- assert(flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE);
- assert(!(flags & ~PIPE_BUFFER_USAGE_CPU_READ_WRITE));
- flags &= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
- /* Check for GPU read/write access */
- if(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) {
- /* Wait for the GPU to finish writing */
- _fenced_buffer_finish(fenced_buf);
+ /* Serialize writes */
+ if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
+ ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
+ if(flags & PIPE_BUFFER_USAGE_DONTBLOCK) {
+ /* Don't wait for the GPU to finish writing */
+ if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0)
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ else
+ return NULL;
+ }
+ else {
+ /* Wait for the GPU to finish writing */
+ _fenced_buffer_finish(fenced_buf);
+ }
}
#if 0
@@ -288,7 +321,7 @@ fenced_buffer_map(struct pb_buffer *buf,
map = pb_map(fenced_buf->buffer, flags);
if(map) {
++fenced_buf->mapcount;
- fenced_buf->flags |= flags;
+ fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
}
return map;
@@ -432,7 +465,7 @@ fenced_buffer_create(struct fenced_buffer_list *fenced_list,
return NULL;
}
- buf->base.base.refcount = 1;
+ pipe_reference_init(&buf->base.base.reference, 1);
buf->base.base.alignment = buffer->base.alignment;
buf->base.base.usage = buffer->base.usage;
buf->base.base.size = buffer->base.size;
@@ -441,6 +474,13 @@ fenced_buffer_create(struct fenced_buffer_list *fenced_list,
buf->buffer = buffer;
buf->list = fenced_list;
+#ifdef DEBUG
+ pipe_mutex_lock(fenced_list->mutex);
+ LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
+ ++fenced_list->numUnfenced;
+ pipe_mutex_unlock(fenced_list->mutex);
+#endif
+
return &buf->base;
}
@@ -457,9 +497,13 @@ fenced_buffer_list_create(struct pb_fence_ops *ops)
fenced_list->ops = ops;
LIST_INITHEAD(&fenced_list->delayed);
-
fenced_list->numDelayed = 0;
+#ifdef DEBUG
+ LIST_INITHEAD(&fenced_list->unfenced);
+ fenced_list->numUnfenced = 0;
+#endif
+
pipe_mutex_init(fenced_list->mutex);
return fenced_list;
@@ -476,6 +520,51 @@ fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
}
+#ifdef DEBUG
+void
+fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
+{
+ struct pb_fence_ops *ops = fenced_list->ops;
+ struct list_head *curr, *next;
+ struct fenced_buffer *fenced_buf;
+
+ pipe_mutex_lock(fenced_list->mutex);
+
+ debug_printf("%10s %7s %10s %s\n",
+ "buffer", "reference.count", "fence", "signalled");
+
+ curr = fenced_list->unfenced.next;
+ next = curr->next;
+ while(curr != &fenced_list->unfenced) {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ assert(!fenced_buf->fence);
+ debug_printf("%10p %7u\n",
+ fenced_buf,
+ fenced_buf->base.base.reference.count);
+ curr = next;
+ next = curr->next;
+ }
+
+ curr = fenced_list->delayed.next;
+ next = curr->next;
+ while(curr != &fenced_list->delayed) {
+ int signaled;
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
+ debug_printf("%10p %7u %10p %s\n",
+ fenced_buf,
+ fenced_buf->base.base.reference.count,
+ fenced_buf->fence,
+ signaled == 0 ? "y" : "n");
+ curr = next;
+ next = curr->next;
+ }
+
+ pipe_mutex_unlock(fenced_list->mutex);
+}
+#endif
+
+
void
fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
{
@@ -484,13 +573,17 @@ fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
/* Wait on outstanding fences */
while (fenced_list->numDelayed) {
pipe_mutex_unlock(fenced_list->mutex);
-#if defined(PIPE_OS_LINUX)
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD)
sched_yield();
#endif
_fenced_buffer_list_check_free(fenced_list, 1);
pipe_mutex_lock(fenced_list->mutex);
}
+#ifdef DEBUG
+ //assert(!fenced_list->numUnfenced);
+#endif
+
pipe_mutex_unlock(fenced_list->mutex);
fenced_list->ops->destroy(fenced_list->ops);