summaryrefslogtreecommitdiff
path: root/src/gallium/drivers/nouveau
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/nouveau')
-rw-r--r--src/gallium/drivers/nouveau/Makefile5
-rw-r--r--src/gallium/drivers/nouveau/nouveau_buffer.c488
-rw-r--r--src/gallium/drivers/nouveau/nouveau_buffer.h139
-rw-r--r--src/gallium/drivers/nouveau/nouveau_context.h26
-rw-r--r--src/gallium/drivers/nouveau/nouveau_fence.c220
-rw-r--r--src/gallium/drivers/nouveau/nouveau_fence.h59
-rw-r--r--src/gallium/drivers/nouveau/nouveau_mm.c288
-rw-r--r--src/gallium/drivers/nouveau/nouveau_mm.h32
-rw-r--r--src/gallium/drivers/nouveau/nouveau_screen.c16
-rw-r--r--src/gallium/drivers/nouveau/nouveau_screen.h18
-rw-r--r--src/gallium/drivers/nouveau/nouveau_stateobj.h316
-rw-r--r--src/gallium/drivers/nouveau/nouveau_winsys.h1
12 files changed, 1287 insertions, 321 deletions
diff --git a/src/gallium/drivers/nouveau/Makefile b/src/gallium/drivers/nouveau/Makefile
index a33bf5ebc2..3210d1ff77 100644
--- a/src/gallium/drivers/nouveau/Makefile
+++ b/src/gallium/drivers/nouveau/Makefile
@@ -7,6 +7,9 @@ LIBRARY_INCLUDES = \
$(LIBDRM_CFLAGS) \
-I$(TOP)/src/gallium/drivers/nouveau/include
-C_SOURCES = nouveau_screen.c
+C_SOURCES = nouveau_screen.c \
+ nouveau_fence.c \
+ nouveau_mm.c \
+ nouveau_buffer.c
include ../../Makefile.template
diff --git a/src/gallium/drivers/nouveau/nouveau_buffer.c b/src/gallium/drivers/nouveau/nouveau_buffer.c
new file mode 100644
index 0000000000..d0cc29104b
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_buffer.c
@@ -0,0 +1,488 @@
+
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "nouveau_screen.h"
+#include "nouveau_context.h"
+#include "nouveau_winsys.h"
+#include "nouveau_fence.h"
+#include "nouveau_buffer.h"
+#include "nouveau_mm.h"
+
+struct nouveau_transfer {
+ struct pipe_transfer base;
+};
+
+static INLINE struct nouveau_transfer *
+nouveau_transfer(struct pipe_transfer *transfer)
+{
+ return (struct nouveau_transfer *)transfer;
+}
+
+static INLINE boolean
+nouveau_buffer_allocate(struct nouveau_screen *screen,
+ struct nv04_resource *buf, unsigned domain)
+{
+ if (domain == NOUVEAU_BO_VRAM) {
+ buf->mm = nouveau_mm_allocate(screen->mm_VRAM, buf->base.width0,
+ &buf->bo, &buf->offset);
+ if (!buf->bo)
+ return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
+ } else
+ if (domain == NOUVEAU_BO_GART) {
+ buf->mm = nouveau_mm_allocate(screen->mm_GART, buf->base.width0,
+ &buf->bo, &buf->offset);
+ if (!buf->bo)
+ return FALSE;
+ }
+ if (domain != NOUVEAU_BO_GART) {
+ if (!buf->data) {
+ buf->data = MALLOC(buf->base.width0);
+ if (!buf->data)
+ return FALSE;
+ }
+ }
+ buf->domain = domain;
+ return TRUE;
+}
+
+static INLINE void
+release_allocation(struct nouveau_mm_allocation **mm,
+ struct nouveau_fence *fence)
+{
+ nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
+ (*mm) = NULL;
+}
+
+INLINE void
+nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
+{
+ nouveau_bo_ref(NULL, &buf->bo);
+
+ if (buf->mm)
+ release_allocation(&buf->mm, buf->fence);
+
+ buf->domain = 0;
+}
+
+static INLINE boolean
+nouveau_buffer_reallocate(struct nouveau_screen *screen,
+ struct nv04_resource *buf, unsigned domain)
+{
+ nouveau_buffer_release_gpu_storage(buf);
+
+ return nouveau_buffer_allocate(screen, buf, domain);
+}
+
+static void
+nouveau_buffer_destroy(struct pipe_screen *pscreen,
+ struct pipe_resource *presource)
+{
+ struct nv04_resource *res = nv04_resource(presource);
+
+ nouveau_buffer_release_gpu_storage(res);
+
+ if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
+ FREE(res->data);
+
+ FREE(res);
+}
+
+/* Maybe just migrate to GART right away if we actually need to do this. */
+boolean
+nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf,
+ unsigned start, unsigned size)
+{
+ struct nouveau_mm_allocation *mm;
+ struct nouveau_bo *bounce = NULL;
+ uint32_t offset;
+
+ assert(buf->domain == NOUVEAU_BO_VRAM);
+
+ mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
+ if (!bounce)
+ return FALSE;
+
+ nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART,
+ buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size);
+
+ if (nouveau_bo_map_range(bounce, offset, size, NOUVEAU_BO_RD))
+ return FALSE;
+ memcpy(buf->data + start, bounce->map, size);
+ nouveau_bo_unmap(bounce);
+
+ buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+
+ nouveau_bo_ref(NULL, &bounce);
+ if (mm)
+ nouveau_mm_free(mm);
+ return TRUE;
+}
+
+static boolean
+nouveau_buffer_upload(struct nouveau_context *nv, struct nv04_resource *buf,
+ unsigned start, unsigned size)
+{
+ struct nouveau_mm_allocation *mm;
+ struct nouveau_bo *bounce = NULL;
+ uint32_t offset;
+
+ if (size <= 192) {
+ nv->push_data(nv, buf->bo, buf->offset + start, buf->domain,
+ size, buf->data + start);
+ return TRUE;
+ }
+
+ mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
+ if (!bounce)
+ return FALSE;
+
+ nouveau_bo_map_range(bounce, offset, size,
+ NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
+ memcpy(bounce->map, buf->data + start, size);
+ nouveau_bo_unmap(bounce);
+
+ nv->copy_data(nv, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
+ bounce, offset, NOUVEAU_BO_GART, size);
+
+ nouveau_bo_ref(NULL, &bounce);
+ if (mm)
+ release_allocation(&mm, nv->screen->fence.current);
+
+ if (start == 0 && size == buf->base.width0)
+ buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+ return TRUE;
+}
+
+static struct pipe_transfer *
+nouveau_buffer_transfer_get(struct pipe_context *pipe,
+ struct pipe_resource *resource,
+ unsigned level, unsigned usage,
+ const struct pipe_box *box)
+{
+ struct nv04_resource *buf = nv04_resource(resource);
+ struct nouveau_context *nv = nouveau_context(pipe);
+ struct nouveau_transfer *xfr = CALLOC_STRUCT(nouveau_transfer);
+ if (!xfr)
+ return NULL;
+
+ xfr->base.resource = resource;
+ xfr->base.box.x = box->x;
+ xfr->base.box.width = box->width;
+ xfr->base.usage = usage;
+
+ if (buf->domain == NOUVEAU_BO_VRAM) {
+ if (usage & PIPE_TRANSFER_READ) {
+ if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING)
+ nouveau_buffer_download(nv, buf, 0, buf->base.width0);
+ }
+ }
+
+ return &xfr->base;
+}
+
+static void
+nouveau_buffer_transfer_destroy(struct pipe_context *pipe,
+ struct pipe_transfer *transfer)
+{
+ struct nv04_resource *buf = nv04_resource(transfer->resource);
+ struct nouveau_transfer *xfr = nouveau_transfer(transfer);
+ struct nouveau_context *nv = nouveau_context(pipe);
+
+ if (xfr->base.usage & PIPE_TRANSFER_WRITE) {
+ /* writing is worse */
+ nouveau_buffer_adjust_score(nv, buf, -5000);
+
+ if (buf->domain == NOUVEAU_BO_VRAM) {
+ nouveau_buffer_upload(nv, buf, transfer->box.x, transfer->box.width);
+ }
+
+ if (buf->domain != 0 && (buf->base.bind & (PIPE_BIND_VERTEX_BUFFER |
+ PIPE_BIND_INDEX_BUFFER)))
+ nouveau_context(pipe)->vbo_dirty = TRUE;
+ }
+
+ FREE(xfr);
+}
+
+static INLINE boolean
+nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
+{
+ if (rw == PIPE_TRANSFER_READ) {
+ if (!buf->fence_wr)
+ return TRUE;
+ if (!nouveau_fence_wait(buf->fence_wr))
+ return FALSE;
+ } else {
+ if (!buf->fence)
+ return TRUE;
+ if (!nouveau_fence_wait(buf->fence))
+ return FALSE;
+
+ nouveau_fence_ref(NULL, &buf->fence);
+ }
+ nouveau_fence_ref(NULL, &buf->fence_wr);
+
+ return TRUE;
+}
+
+static INLINE boolean
+nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
+{
+ if (rw == PIPE_TRANSFER_READ)
+ return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
+ else
+ return (buf->fence && !nouveau_fence_signalled(buf->fence));
+}
+
+static void *
+nouveau_buffer_transfer_map(struct pipe_context *pipe,
+ struct pipe_transfer *transfer)
+{
+ struct nouveau_transfer *xfr = nouveau_transfer(transfer);
+ struct nv04_resource *buf = nv04_resource(transfer->resource);
+ struct nouveau_bo *bo = buf->bo;
+ uint8_t *map;
+ int ret;
+ uint32_t offset = xfr->base.box.x;
+ uint32_t flags;
+
+ nouveau_buffer_adjust_score(nouveau_context(pipe), buf, -250);
+
+ if (buf->domain != NOUVEAU_BO_GART)
+ return buf->data + offset;
+
+ if (buf->mm)
+ flags = NOUVEAU_BO_NOSYNC | NOUVEAU_BO_RDWR;
+ else
+ flags = nouveau_screen_transfer_flags(xfr->base.usage);
+
+ offset += buf->offset;
+
+ ret = nouveau_bo_map_range(buf->bo, offset, xfr->base.box.width, flags);
+ if (ret)
+ return NULL;
+ map = bo->map;
+
+ /* Unmap right now. Since multiple buffers can share a single nouveau_bo,
+ * not doing so might make future maps fail or trigger "reloc while mapped"
+ * errors. For now, mappings to userspace are guaranteed to be persistent.
+ */
+ nouveau_bo_unmap(bo);
+
+ if (buf->mm) {
+ if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE))
+ return NULL;
+ } else
+ if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
+ }
+ }
+ return map;
+}
+
+
+
+static void
+nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
+{
+ struct nv04_resource *res = nv04_resource(transfer->resource);
+ struct nouveau_bo *bo = res->bo;
+ unsigned offset = res->offset + transfer->box.x + box->x;
+
+ /* not using non-snoop system memory yet, no need for cflush */
+ if (1)
+ return;
+
+ /* XXX: maybe need to upload for VRAM buffers here */
+
+ nouveau_screen_bo_map_flush_range(pipe->screen, bo, offset, box->width);
+}
+
+static void
+nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
+ struct pipe_transfer *transfer)
+{
+ /* we've called nouveau_bo_unmap right after map */
+}
+
+const struct u_resource_vtbl nouveau_buffer_vtbl =
+{
+ u_default_resource_get_handle, /* get_handle */
+ nouveau_buffer_destroy, /* resource_destroy */
+ NULL, /* is_resource_referenced */
+ nouveau_buffer_transfer_get, /* get_transfer */
+ nouveau_buffer_transfer_destroy, /* transfer_destroy */
+ nouveau_buffer_transfer_map, /* transfer_map */
+ nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
+ nouveau_buffer_transfer_unmap, /* transfer_unmap */
+ u_default_transfer_inline_write /* transfer_inline_write */
+};
+
+struct pipe_resource *
+nouveau_buffer_create(struct pipe_screen *pscreen,
+ const struct pipe_resource *templ)
+{
+ struct nouveau_screen *screen = nouveau_screen(pscreen);
+ struct nv04_resource *buffer;
+ boolean ret;
+
+ buffer = CALLOC_STRUCT(nv04_resource);
+ if (!buffer)
+ return NULL;
+
+ buffer->base = *templ;
+ buffer->vtbl = &nouveau_buffer_vtbl;
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.screen = pscreen;
+
+ if ((buffer->base.bind & screen->sysmem_bindings) == screen->sysmem_bindings)
+ ret = nouveau_buffer_allocate(screen, buffer, 0);
+ else
+ ret = nouveau_buffer_allocate(screen, buffer, NOUVEAU_BO_GART);
+
+ if (ret == FALSE)
+ goto fail;
+
+ return &buffer->base;
+
+fail:
+ FREE(buffer);
+ return NULL;
+}
+
+
+struct pipe_resource *
+nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
+ unsigned bytes, unsigned bind)
+{
+ struct nv04_resource *buffer;
+
+ buffer = CALLOC_STRUCT(nv04_resource);
+ if (!buffer)
+ return NULL;
+
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->vtbl = &nouveau_buffer_vtbl;
+ buffer->base.screen = pscreen;
+ buffer->base.format = PIPE_FORMAT_R8_UNORM;
+ buffer->base.usage = PIPE_USAGE_IMMUTABLE;
+ buffer->base.bind = bind;
+ buffer->base.width0 = bytes;
+ buffer->base.height0 = 1;
+ buffer->base.depth0 = 1;
+
+ buffer->data = ptr;
+ buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
+
+ return &buffer->base;
+}
+
+/* Like download, but for GART buffers. Merge ? */
+static INLINE boolean
+nouveau_buffer_data_fetch(struct nv04_resource *buf, struct nouveau_bo *bo,
+ unsigned offset, unsigned size)
+{
+ if (!buf->data) {
+ buf->data = MALLOC(size);
+ if (!buf->data)
+ return FALSE;
+ }
+ if (nouveau_bo_map_range(bo, offset, size, NOUVEAU_BO_RD))
+ return FALSE;
+ memcpy(buf->data, bo->map, size);
+ nouveau_bo_unmap(bo);
+
+ return TRUE;
+}
+
+/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
+boolean
+nouveau_buffer_migrate(struct nouveau_context *nv,
+ struct nv04_resource *buf, const unsigned new_domain)
+{
+ struct nouveau_screen *screen = nv->screen;
+ struct nouveau_bo *bo;
+ const unsigned old_domain = buf->domain;
+ unsigned size = buf->base.width0;
+ unsigned offset;
+ int ret;
+
+ assert(new_domain != old_domain);
+
+ if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
+ if (!nouveau_buffer_allocate(screen, buf, new_domain))
+ return FALSE;
+ ret = nouveau_bo_map_range(buf->bo, buf->offset, size, NOUVEAU_BO_WR |
+ NOUVEAU_BO_NOSYNC);
+ if (ret)
+ return ret;
+ memcpy(buf->bo->map, buf->data, size);
+ nouveau_bo_unmap(buf->bo);
+ FREE(buf->data);
+ } else
+ if (old_domain != 0 && new_domain != 0) {
+ struct nouveau_mm_allocation *mm = buf->mm;
+
+ if (new_domain == NOUVEAU_BO_VRAM) {
+ /* keep a system memory copy of our data in case we hit a fallback */
+ if (!nouveau_buffer_data_fetch(buf, buf->bo, buf->offset, size))
+ return FALSE;
+ debug_printf("migrating %u KiB to VRAM\n", size / 1024);
+ }
+
+ offset = buf->offset;
+ bo = buf->bo;
+ buf->bo = NULL;
+ buf->mm = NULL;
+ nouveau_buffer_allocate(screen, buf, new_domain);
+
+ nv->copy_data(nv, buf->bo, buf->offset, new_domain,
+ bo, offset, old_domain, buf->base.width0);
+
+ nouveau_bo_ref(NULL, &bo);
+ if (mm)
+ release_allocation(&mm, screen->fence.current);
+ } else
+ if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
+ if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
+ return FALSE;
+ if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0))
+ return FALSE;
+ } else
+ return FALSE;
+
+ assert(buf->domain == new_domain);
+ return TRUE;
+}
+
+/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
+ * We'd like to only allocate @size bytes here, but then we'd have to rebase
+ * the vertex indices ...
+ */
+boolean
+nouveau_user_buffer_upload(struct nv04_resource *buf,
+ unsigned base, unsigned size)
+{
+ struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
+ int ret;
+
+ assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
+
+ buf->base.width0 = base + size;
+ if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
+ return FALSE;
+
+ ret = nouveau_bo_map_range(buf->bo, buf->offset + base, size,
+ NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
+ if (ret)
+ return FALSE;
+ memcpy(buf->bo->map, buf->data + base, size);
+ nouveau_bo_unmap(buf->bo);
+
+ return TRUE;
+}
diff --git a/src/gallium/drivers/nouveau/nouveau_buffer.h b/src/gallium/drivers/nouveau/nouveau_buffer.h
new file mode 100644
index 0000000000..46e3554bdf
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_buffer.h
@@ -0,0 +1,139 @@
+#ifndef __NOUVEAU_BUFFER_H__
+#define __NOUVEAU_BUFFER_H__
+
+#include "util/u_transfer.h"
+#include "util/u_double_list.h"
+
+struct pipe_resource;
+struct nouveau_context;
+struct nouveau_bo;
+
+#define NOUVEAU_BUFFER_SCORE_MIN -25000
+#define NOUVEAU_BUFFER_SCORE_MAX 25000
+#define NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD 20000
+
+/* DIRTY: buffer was (or will be after the next flush) written to by GPU and
+ * resource->data has not been updated to reflect modified VRAM contents
+ *
+ * USER_MEMORY: resource->data is a pointer to client memory and may change
+ * between GL calls
+ */
+#define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
+#define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
+#define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
+
+/* Resources, if mapped into the GPU's address space, are guaranteed to
+ * have constant virtual addresses (nv50+).
+ *
+ * The address of a resource will lie within the nouveau_bo referenced,
+ * and this bo should be added to the memory manager's validation list.
+ */
+struct nv04_resource {
+ struct pipe_resource base;
+ const struct u_resource_vtbl *vtbl;
+
+ uint8_t *data;
+ struct nouveau_bo *bo;
+ uint32_t offset;
+
+ uint8_t status;
+ uint8_t domain;
+
+ int16_t score; /* low if mapped very often, if high can move to VRAM */
+
+ struct nouveau_fence *fence;
+ struct nouveau_fence *fence_wr;
+
+ struct nouveau_mm_allocation *mm;
+};
+
+void
+nouveau_buffer_release_gpu_storage(struct nv04_resource *);
+
+boolean
+nouveau_buffer_download(struct nouveau_context *, struct nv04_resource *,
+ unsigned start, unsigned size);
+
+boolean
+nouveau_buffer_migrate(struct nouveau_context *,
+ struct nv04_resource *, unsigned domain);
+
+static INLINE void
+nouveau_buffer_adjust_score(struct nouveau_context *pipe,
+ struct nv04_resource *res, int16_t score)
+{
+ if (score < 0) {
+ if (res->score > NOUVEAU_BUFFER_SCORE_MIN)
+ res->score += score;
+ } else
+ if (score > 0){
+ if (res->score < NOUVEAU_BUFFER_SCORE_MAX)
+ res->score += score;
+ if (res->domain == NOUVEAU_BO_GART &&
+ res->score > NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD)
+ nouveau_buffer_migrate(pipe, res, NOUVEAU_BO_VRAM);
+ }
+}
+
+/* XXX: wait for fence (atm only using this for vertex push) */
+static INLINE void *
+nouveau_resource_map_offset(struct nouveau_context *pipe,
+ struct nv04_resource *res, uint32_t offset,
+ uint32_t flags)
+{
+ void *map;
+
+ nouveau_buffer_adjust_score(pipe, res, -250);
+
+ if ((res->domain == NOUVEAU_BO_VRAM) &&
+ (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
+ nouveau_buffer_download(pipe, res, 0, res->base.width0);
+
+ if ((res->domain != NOUVEAU_BO_GART) ||
+ (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
+ return res->data + offset;
+
+ if (res->mm)
+ flags |= NOUVEAU_BO_NOSYNC;
+
+ if (nouveau_bo_map_range(res->bo, res->offset + offset,
+ res->base.width0, flags))
+ return NULL;
+
+ map = res->bo->map;
+ nouveau_bo_unmap(res->bo);
+ return map;
+}
+
+static INLINE void
+nouveau_resource_unmap(struct nv04_resource *res)
+{
+ /* no-op */
+}
+
+static INLINE struct nv04_resource *
+nv04_resource(struct pipe_resource *resource)
+{
+ return (struct nv04_resource *)resource;
+}
+
+/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
+static INLINE boolean
+nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
+{
+ return nv04_resource(resource)->domain != 0;
+}
+
+struct pipe_resource *
+nouveau_buffer_create(struct pipe_screen *pscreen,
+ const struct pipe_resource *templ);
+
+struct pipe_resource *
+nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
+ unsigned bytes, unsigned usage);
+
+boolean
+nouveau_user_buffer_upload(struct nv04_resource *, unsigned base,
+ unsigned size);
+
+#endif
diff --git a/src/gallium/drivers/nouveau/nouveau_context.h b/src/gallium/drivers/nouveau/nouveau_context.h
new file mode 100644
index 0000000000..696e0d3f24
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_context.h
@@ -0,0 +1,26 @@
+#ifndef __NOUVEAU_CONTEXT_H__
+#define __NOUVEAU_CONTEXT_H__
+
+#include "pipe/p_context.h"
+
+struct nouveau_context {
+ struct pipe_context pipe;
+ struct nouveau_screen *screen;
+
+ boolean vbo_dirty;
+
+ void (*copy_data)(struct nouveau_context *,
+ struct nouveau_bo *dst, unsigned, unsigned,
+ struct nouveau_bo *src, unsigned, unsigned, unsigned);
+ void (*push_data)(struct nouveau_context *,
+ struct nouveau_bo *dst, unsigned, unsigned,
+ unsigned, void *);
+};
+
+static INLINE struct nouveau_context *
+nouveau_context(struct pipe_context *pipe)
+{
+ return (struct nouveau_context *)pipe;
+}
+
+#endif
diff --git a/src/gallium/drivers/nouveau/nouveau_fence.c b/src/gallium/drivers/nouveau/nouveau_fence.c
new file mode 100644
index 0000000000..18bdb18ad4
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_fence.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2010 Christoph Bumiller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "util/u_double_list.h"
+
+#include "nouveau_screen.h"
+#include "nouveau_fence.h"
+
+#include "nouveau/nouveau_pushbuf.h"
+
+#ifdef PIPE_OS_UNIX
+#include <sched.h>
+#endif
+
+boolean
+nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence,
+ boolean emit)
+{
+ *fence = CALLOC_STRUCT(nouveau_fence);
+ if (!*fence)
+ return FALSE;
+
+ (*fence)->screen = screen;
+ (*fence)->ref = 1;
+ LIST_INITHEAD(&(*fence)->work);
+
+ if (emit)
+ nouveau_fence_emit(*fence);
+
+ return TRUE;
+}
+
+static void
+nouveau_fence_trigger_work(struct nouveau_fence *fence)
+{
+ struct nouveau_fence_work *work, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(work, tmp, &fence->work, list) {
+ work->func(work->data);
+ LIST_DEL(&work->list);
+ FREE(work);
+ }
+}
+
+boolean
+nouveau_fence_work(struct nouveau_fence *fence,
+ void (*func)(void *), void *data)
+{
+ struct nouveau_fence_work *work;
+
+ if (!fence || fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
+ func(data);
+ return TRUE;
+ }
+
+ work = CALLOC_STRUCT(nouveau_fence_work);
+ if (!work)
+ return FALSE;
+ work->func = func;
+ work->data = data;
+ LIST_ADD(&work->list, &fence->work);
+ return TRUE;
+}
+
+void
+nouveau_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_screen *screen = fence->screen;
+
+ fence->sequence = ++screen->fence.sequence;
+
+ assert(fence->state == NOUVEAU_FENCE_STATE_AVAILABLE);
+
+ screen->fence.emit(&screen->base, fence->sequence);
+
+ ++fence->ref;
+
+ if (screen->fence.tail)
+ screen->fence.tail->next = fence;
+ else
+ screen->fence.head = fence;
+
+ screen->fence.tail = fence;
+
+ fence->state = NOUVEAU_FENCE_STATE_EMITTED;
+}
+
+void
+nouveau_fence_del(struct nouveau_fence *fence)
+{
+ struct nouveau_fence *it;
+ struct nouveau_screen *screen = fence->screen;
+
+ if (fence->state == NOUVEAU_FENCE_STATE_EMITTED ||
+ fence->state == NOUVEAU_FENCE_STATE_FLUSHED) {
+ if (fence == screen->fence.head) {
+ screen->fence.head = fence->next;
+ if (!screen->fence.head)
+ screen->fence.tail = NULL;
+ } else {
+ for (it = screen->fence.head; it && it->next != fence; it = it->next);
+ it->next = fence->next;
+ if (screen->fence.tail == fence)
+ screen->fence.tail = it;
+ }
+ }
+
+ if (!LIST_IS_EMPTY(&fence->work)) {
+ debug_printf("WARNING: deleting fence with work still pending !\n");
+ nouveau_fence_trigger_work(fence);
+ }
+
+ FREE(fence);
+}
+
+void
+nouveau_fence_update(struct nouveau_screen *screen, boolean flushed)
+{
+ struct nouveau_fence *fence;
+ struct nouveau_fence *next = NULL;
+ u32 sequence = screen->fence.update(&screen->base);
+
+ if (screen->fence.sequence_ack == sequence)
+ return;
+ screen->fence.sequence_ack = sequence;
+
+ for (fence = screen->fence.head; fence; fence = next) {
+ next = fence->next;
+ sequence = fence->sequence;
+
+ fence->state = NOUVEAU_FENCE_STATE_SIGNALLED;
+
+ nouveau_fence_trigger_work(fence);
+ nouveau_fence_ref(NULL, &fence);
+
+ if (sequence == screen->fence.sequence_ack)
+ break;
+ }
+ screen->fence.head = next;
+ if (!next)
+ screen->fence.tail = NULL;
+
+ if (flushed) {
+ for (fence = next; fence; fence = fence->next)
+ fence->state = NOUVEAU_FENCE_STATE_FLUSHED;
+ }
+}
+
+#define NOUVEAU_FENCE_MAX_SPINS (1 << 31)
+
+boolean
+nouveau_fence_signalled(struct nouveau_fence *fence)
+{
+ struct nouveau_screen *screen = fence->screen;
+
+ if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED)
+ nouveau_fence_update(screen, FALSE);
+
+ return fence->state == NOUVEAU_FENCE_STATE_SIGNALLED;
+}
+
+boolean
+nouveau_fence_wait(struct nouveau_fence *fence)
+{
+ struct nouveau_screen *screen = fence->screen;
+ uint32_t spins = 0;
+
+ if (fence->state < NOUVEAU_FENCE_STATE_EMITTED) {
+ nouveau_fence_emit(fence);
+
+ if (fence == screen->fence.current)
+ nouveau_fence_new(screen, &screen->fence.current, FALSE);
+ }
+ if (fence->state < NOUVEAU_FENCE_STATE_FLUSHED)
+ FIRE_RING(screen->channel);
+
+ do {
+ nouveau_fence_update(screen, FALSE);
+
+ if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
+ return TRUE;
+ spins++;
+#ifdef PIPE_OS_UNIX
+ if (!(spins % 8)) /* donate a few cycles */
+ sched_yield();
+#endif
+ } while (spins < NOUVEAU_FENCE_MAX_SPINS);
+
+ debug_printf("Wait on fence %u (ack = %u, next = %u) timed out !\n",
+ fence->sequence,
+ screen->fence.sequence_ack, screen->fence.sequence);
+
+ return FALSE;
+}
+
+void
+nouveau_fence_next(struct nouveau_screen *screen)
+{
+ nouveau_fence_emit(screen->fence.current);
+ nouveau_fence_new(screen, &screen->fence.current, FALSE);
+}
diff --git a/src/gallium/drivers/nouveau/nouveau_fence.h b/src/gallium/drivers/nouveau/nouveau_fence.h
new file mode 100644
index 0000000000..680c75e99f
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_fence.h
@@ -0,0 +1,59 @@
+
+#ifndef __NOUVEAU_FENCE_H__
+#define __NOUVEAU_FENCE_H__
+
+#include "util/u_inlines.h"
+#include "util/u_double_list.h"
+
+#define NOUVEAU_FENCE_STATE_AVAILABLE 0
+#define NOUVEAU_FENCE_STATE_EMITTED 1
+#define NOUVEAU_FENCE_STATE_FLUSHED 2
+#define NOUVEAU_FENCE_STATE_SIGNALLED 3
+
+struct nouveau_fence_work {
+ struct list_head list;
+ void (*func)(void *);
+ void *data;
+};
+
+struct nouveau_fence {
+ struct nouveau_fence *next;
+ struct nouveau_screen *screen;
+ int state;
+ int ref;
+ uint32_t sequence;
+ struct list_head work;
+};
+
+void nouveau_fence_emit(struct nouveau_fence *);
+void nouveau_fence_del(struct nouveau_fence *);
+
+boolean nouveau_fence_new(struct nouveau_screen *, struct nouveau_fence **,
+ boolean emit);
+boolean nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
+void nouveau_fence_update(struct nouveau_screen *, boolean flushed);
+void nouveau_fence_next(struct nouveau_screen *);
+boolean nouveau_fence_wait(struct nouveau_fence *);
+boolean nouveau_fence_signalled(struct nouveau_fence *);
+
+static INLINE void
+nouveau_fence_ref(struct nouveau_fence *fence, struct nouveau_fence **ref)
+{
+ if (fence)
+ ++fence->ref;
+
+ if (*ref) {
+ if (--(*ref)->ref == 0)
+ nouveau_fence_del(*ref);
+ }
+
+ *ref = fence;
+}
+
+static INLINE struct nouveau_fence *
+nouveau_fence(struct pipe_fence_handle *fence)
+{
+ return (struct nouveau_fence *)fence;
+}
+
+#endif // __NOUVEAU_FENCE_H__
diff --git a/src/gallium/drivers/nouveau/nouveau_mm.c b/src/gallium/drivers/nouveau/nouveau_mm.c
new file mode 100644
index 0000000000..7edeb4d21d
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_mm.c
@@ -0,0 +1,288 @@
+
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+
+#include "nouveau_screen.h"
+#include "nouveau_mm.h"
+
+#include "nouveau/nouveau_bo.h"
+
+#define MM_MIN_ORDER 7
+#define MM_MAX_ORDER 20
+
+#define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
+
+#define MM_MIN_SIZE (1 << MM_MIN_ORDER)
+#define MM_MAX_SIZE (1 << MM_MAX_ORDER)
+
+struct mm_bucket {
+ struct list_head free;
+ struct list_head used;
+ struct list_head full;
+ int num_free;
+};
+
+struct nouveau_mman {
+ struct nouveau_device *dev;
+ struct mm_bucket bucket[MM_NUM_BUCKETS];
+ uint32_t storage_type;
+ uint32_t domain;
+ uint64_t allocated;
+};
+
+struct mm_slab {
+ struct list_head head;
+ struct nouveau_bo *bo;
+ struct nouveau_mman *cache;
+ int order;
+ int count;
+ int free;
+ uint32_t bits[0];
+};
+
+static int
+mm_slab_alloc(struct mm_slab *slab)
+{
+ int i, n, b;
+
+ if (slab->free == 0)
+ return -1;
+
+ for (i = 0; i < (slab->count + 31) / 32; ++i) {
+ b = ffs(slab->bits[i]) - 1;
+ if (b >= 0) {
+ n = i * 32 + b;
+ assert(n < slab->count);
+ slab->free--;
+ slab->bits[i] &= ~(1 << b);
+ return n;
+ }
+ }
+ return -1;
+}
+
+static INLINE void
+mm_slab_free(struct mm_slab *slab, int i)
+{
+ assert(i < slab->count);
+ slab->bits[i / 32] |= 1 << (i % 32);
+ slab->free++;
+ assert(slab->free <= slab->count);
+}
+
+static INLINE int
+mm_get_order(uint32_t size)
+{
+ int s = __builtin_clz(size) ^ 31;
+
+ if (size > (1 << s))
+ s += 1;
+ return s;
+}
+
+static struct mm_bucket *
+mm_bucket_by_order(struct nouveau_mman *cache, int order)
+{
+ if (order > MM_MAX_ORDER)
+ return NULL;
+ return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
+}
+
+static struct mm_bucket *
+mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
+{
+ return mm_bucket_by_order(cache, mm_get_order(size));
+}
+
+/* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
+static INLINE uint32_t
+mm_default_slab_size(unsigned chunk_order)
+{
+ static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
+ {
+ 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
+ };
+
+ assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
+
+ return 1 << slab_order[chunk_order - MM_MIN_ORDER];
+}
+
+static int
+mm_slab_new(struct nouveau_mman *cache, int chunk_order)
+{
+ struct mm_slab *slab;
+ int words, ret;
+ const uint32_t size = mm_default_slab_size(chunk_order);
+
+ words = ((size >> chunk_order) + 31) / 32;
+ assert(words);
+
+ slab = MALLOC(sizeof(struct mm_slab) + words * 4);
+ if (!slab)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memset(&slab->bits[0], ~0, words * 4);
+
+ slab->bo = NULL;
+ ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
+ 0, cache->storage_type, &slab->bo);
+ if (ret) {
+ FREE(slab);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ LIST_INITHEAD(&slab->head);
+
+ slab->cache = cache;
+ slab->order = chunk_order;
+ slab->count = slab->free = size >> chunk_order;
+
+ LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
+
+ cache->allocated += size;
+
+ debug_printf("MM: new slab, total memory = %lu KiB\n",
+ cache->allocated / 1024);
+
+ return PIPE_OK;
+}
+
+/* @return token to identify slab or NULL if we just allocated a new bo */
+struct nouveau_mm_allocation *
+nouveau_mm_allocate(struct nouveau_mman *cache,
+ uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
+{
+ struct mm_bucket *bucket;
+ struct mm_slab *slab;
+ struct nouveau_mm_allocation *alloc;
+ int ret;
+
+ bucket = mm_bucket_by_size(cache, size);
+ if (!bucket) {
+ ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
+ 0, cache->storage_type, bo);
+ if (ret)
+ debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
+
+ *offset = 0;
+ return NULL;
+ }
+
+ if (!LIST_IS_EMPTY(&bucket->used)) {
+ slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
+ } else {
+ if (LIST_IS_EMPTY(&bucket->free)) {
+ mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
+ }
+ slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
+
+ LIST_DEL(&slab->head);
+ LIST_ADD(&slab->head, &bucket->used);
+ }
+
+ *offset = mm_slab_alloc(slab) << slab->order;
+
+ alloc = MALLOC_STRUCT(nouveau_mm_allocation);
+ if (!alloc)
+ return NULL;
+
+ nouveau_bo_ref(slab->bo, bo);
+
+ if (slab->free == 0) {
+ LIST_DEL(&slab->head);
+ LIST_ADD(&slab->head, &bucket->full);
+ }
+
+ alloc->next = NULL;
+ alloc->offset = *offset;
+ alloc->priv = (void *)slab;
+
+ return alloc;
+}
+
+void
+nouveau_mm_free(struct nouveau_mm_allocation *alloc)
+{
+ struct mm_slab *slab = (struct mm_slab *)alloc->priv;
+ struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
+
+ mm_slab_free(slab, alloc->offset >> slab->order);
+
+ if (slab->free == 1) {
+ LIST_DEL(&slab->head);
+
+ if (slab->count > 1)
+ LIST_ADDTAIL(&slab->head, &bucket->used);
+ else
+ LIST_ADDTAIL(&slab->head, &bucket->free);
+ }
+
+ FREE(alloc);
+}
+
+void
+nouveau_mm_free_work(void *data)
+{
+ nouveau_mm_free(data);
+}
+
+struct nouveau_mman *
+nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
+ uint32_t storage_type)
+{
+ struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
+ int i;
+
+ if (!cache)
+ return NULL;
+
+ cache->dev = dev;
+ cache->domain = domain;
+ cache->storage_type = storage_type;
+ cache->allocated = 0;
+
+ for (i = 0; i < MM_NUM_BUCKETS; ++i) {
+ LIST_INITHEAD(&cache->bucket[i].free);
+ LIST_INITHEAD(&cache->bucket[i].used);
+ LIST_INITHEAD(&cache->bucket[i].full);
+ }
+
+ return cache;
+}
+
+static INLINE void
+nouveau_mm_free_slabs(struct list_head *head)
+{
+ struct mm_slab *slab, *next;
+
+ LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
+ LIST_DEL(&slab->head);
+ nouveau_bo_ref(NULL, &slab->bo);
+ FREE(slab);
+ }
+}
+
+void
+nouveau_mm_destroy(struct nouveau_mman *cache)
+{
+ int i;
+
+ if (!cache)
+ return;
+
+ for (i = 0; i < MM_NUM_BUCKETS; ++i) {
+ if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
+ !LIST_IS_EMPTY(&cache->bucket[i].full))
+ debug_printf("WARNING: destroying GPU memory cache "
+ "with some buffers still in use\n");
+
+ nouveau_mm_free_slabs(&cache->bucket[i].free);
+ nouveau_mm_free_slabs(&cache->bucket[i].used);
+ nouveau_mm_free_slabs(&cache->bucket[i].full);
+ }
+
+ FREE(cache);
+}
+
diff --git a/src/gallium/drivers/nouveau/nouveau_mm.h b/src/gallium/drivers/nouveau/nouveau_mm.h
new file mode 100644
index 0000000000..5b57c8ba4f
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_mm.h
@@ -0,0 +1,32 @@
+#ifndef __NOUVEAU_MM_H__
+#define __NOUVEAU_MM_H__
+
+struct nouveau_mman;
+
+/* Since a resource can be migrated, we need to decouple allocations from
+ * them. This struct is linked with fences for delayed freeing of allocs.
+ */
+struct nouveau_mm_allocation {
+ struct nouveau_mm_allocation *next;
+ void *priv;
+ uint32_t offset;
+};
+
+extern struct nouveau_mman *
+nouveau_mm_create(struct nouveau_device *, uint32_t domain,
+ uint32_t storage_type);
+
+extern void
+nouveau_mm_destroy(struct nouveau_mman *);
+
+extern struct nouveau_mm_allocation *
+nouveau_mm_allocate(struct nouveau_mman *, uint32_t size,
+ struct nouveau_bo **, uint32_t *offset);
+
+extern void
+nouveau_mm_free(struct nouveau_mm_allocation *);
+
+extern void
+nouveau_mm_free_work(void *);
+
+#endif // __NOUVEAU_MM_H__
diff --git a/src/gallium/drivers/nouveau/nouveau_screen.c b/src/gallium/drivers/nouveau/nouveau_screen.c
index a9426df686..e6cd3064c9 100644
--- a/src/gallium/drivers/nouveau/nouveau_screen.c
+++ b/src/gallium/drivers/nouveau/nouveau_screen.c
@@ -14,6 +14,7 @@
#include "nouveau/nouveau_bo.h"
#include "nouveau_winsys.h"
#include "nouveau_screen.h"
+#include "nouveau_fence.h"
/* XXX this should go away */
#include "state_tracker/drm_driver.h"
@@ -150,7 +151,7 @@ nouveau_screen_fence_ref(struct pipe_screen *pscreen,
struct pipe_fence_handle **ptr,
struct pipe_fence_handle *pfence)
{
- *ptr = pfence;
+ nouveau_fence_ref(nouveau_fence(pfence), (struct nouveau_fence **)ptr);
}
static int
@@ -158,7 +159,7 @@ nouveau_screen_fence_signalled(struct pipe_screen *screen,
struct pipe_fence_handle *pfence,
unsigned flags)
{
- return 0;
+ return !nouveau_fence_signalled(nouveau_fence(pfence));
}
static int
@@ -166,7 +167,7 @@ nouveau_screen_fence_finish(struct pipe_screen *screen,
struct pipe_fence_handle *pfence,
unsigned flags)
{
- return 0;
+ return !nouveau_fence_wait(nouveau_fence(pfence));
}
@@ -250,6 +251,10 @@ nouveau_screen_init(struct nouveau_screen *screen, struct nouveau_device *dev)
util_format_s3tc_init();
+ screen->mm_GART = nouveau_mm_create(dev,
+ NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
+ 0x000);
+ screen->mm_VRAM = nouveau_mm_create(dev, NOUVEAU_BO_VRAM, 0x000);
return 0;
}
@@ -257,7 +262,12 @@ void
nouveau_screen_fini(struct nouveau_screen *screen)
{
struct pipe_winsys *ws = screen->base.winsys;
+
+ nouveau_mm_destroy(screen->mm_GART);
+ nouveau_mm_destroy(screen->mm_VRAM);
+
nouveau_channel_free(&screen->channel);
+
if (ws)
ws->destroy(ws);
}
diff --git a/src/gallium/drivers/nouveau/nouveau_screen.h b/src/gallium/drivers/nouveau/nouveau_screen.h
index 1f4e5171c0..c091abf278 100644
--- a/src/gallium/drivers/nouveau/nouveau_screen.h
+++ b/src/gallium/drivers/nouveau/nouveau_screen.h
@@ -2,6 +2,10 @@
#define __NOUVEAU_SCREEN_H__
#include "pipe/p_screen.h"
+#include "util/u_memory.h"
+typedef uint32_t u32;
+
+struct nouveau_bo;
struct nouveau_screen {
struct pipe_screen base;
@@ -12,6 +16,20 @@ struct nouveau_screen {
* these almost always should be set to the same value */
unsigned vertex_buffer_flags;
unsigned index_buffer_flags;
+ unsigned sysmem_bindings;
+
+ struct {
+ struct nouveau_fence *head;
+ struct nouveau_fence *tail;
+ struct nouveau_fence *current;
+ u32 sequence;
+ u32 sequence_ack;
+ void (*emit)(struct pipe_screen *, u32 sequence);
+ u32 (*update)(struct pipe_screen *);
+ } fence;
+
+ struct nouveau_mman *mm_VRAM;
+ struct nouveau_mman *mm_GART;
};
static INLINE struct nouveau_screen *
diff --git a/src/gallium/drivers/nouveau/nouveau_stateobj.h b/src/gallium/drivers/nouveau/nouveau_stateobj.h
deleted file mode 100644
index e920cf9f3b..0000000000
--- a/src/gallium/drivers/nouveau/nouveau_stateobj.h
+++ /dev/null
@@ -1,316 +0,0 @@
-#ifndef __NOUVEAU_STATEOBJ_H__
-#define __NOUVEAU_STATEOBJ_H__
-
-#include "util/u_debug.h"
-
-#ifdef DEBUG
-#define DEBUG_NOUVEAU_STATEOBJ
-#endif /* DEBUG */
-
-struct nouveau_stateobj_reloc {
- struct nouveau_bo *bo;
-
- struct nouveau_grobj *gr;
- uint32_t push_offset;
- uint32_t mthd;
-
- uint32_t data;
- unsigned flags;
- unsigned vor;
- unsigned tor;
-};
-
-struct nouveau_stateobj_start {
- struct nouveau_grobj *gr;
- uint32_t mthd;
- uint32_t size;
- unsigned offset;
-};
-
-struct nouveau_stateobj {
- struct pipe_reference reference;
-
- struct nouveau_stateobj_start *start;
- struct nouveau_stateobj_reloc *reloc;
-
- /* Common memory pool for data. */
- uint32_t *pool;
- unsigned pool_cur;
-
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- unsigned start_alloc;
- unsigned reloc_alloc;
- unsigned pool_alloc;
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
-
- unsigned total; /* includes begin_ring */
- unsigned cur; /* excludes begin_ring, offset from "cur_start" */
- unsigned cur_start;
- unsigned cur_reloc;
-};
-
-static INLINE void
-so_dump(struct nouveau_stateobj *so)
-{
- unsigned i, nr, total = 0;
-
- for (i = 0; i < so->cur_start; i++) {
- if (so->start[i].gr->subc > -1)
- debug_printf("+0x%04x: 0x%08x\n", total++,
- (so->start[i].size << 18) | (so->start[i].gr->subc << 13)
- | so->start[i].mthd);
- else
- debug_printf("+0x%04x: 0x%08x\n", total++,
- (so->start[i].size << 18) | so->start[i].mthd);
- for (nr = 0; nr < so->start[i].size; nr++, total++)
- debug_printf("+0x%04x: 0x%08x\n", total,
- so->pool[so->start[i].offset + nr]);
- }
-}
-
-static INLINE struct nouveau_stateobj *
-so_new(unsigned start, unsigned push, unsigned reloc)
-{
- struct nouveau_stateobj *so;
-
- so = MALLOC(sizeof(struct nouveau_stateobj));
- pipe_reference_init(&so->reference, 1);
- so->total = so->cur = so->cur_start = so->cur_reloc = 0;
-
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- so->start_alloc = start;
- so->reloc_alloc = reloc;
- so->pool_alloc = push;
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
-
- so->start = MALLOC(start * sizeof(struct nouveau_stateobj_start));
- so->reloc = MALLOC(reloc * sizeof(struct nouveau_stateobj_reloc));
- so->pool = MALLOC(push * sizeof(uint32_t));
- so->pool_cur = 0;
-
- if (!so->start || !so->reloc || !so->pool) {
- debug_printf("malloc failed\n");
- assert(0);
- }
-
- return so;
-}
-
-static INLINE void
-so_ref(struct nouveau_stateobj *ref, struct nouveau_stateobj **pso)
-{
- struct nouveau_stateobj *so = *pso;
- int i;
-
- if (pipe_reference(&(*pso)->reference, &ref->reference)) {
- FREE(so->start);
- for (i = 0; i < so->cur_reloc; i++)
- nouveau_bo_ref(NULL, &so->reloc[i].bo);
- FREE(so->reloc);
- FREE(so->pool);
- FREE(so);
- }
- *pso = ref;
-}
-
-static INLINE void
-so_data(struct nouveau_stateobj *so, uint32_t data)
-{
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- if (so->cur >= so->start[so->cur_start - 1].size) {
- debug_printf("exceeding specified size\n");
- assert(0);
- }
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
-
- so->pool[so->start[so->cur_start - 1].offset + so->cur++] = data;
-}
-
-static INLINE void
-so_datap(struct nouveau_stateobj *so, uint32_t *data, unsigned size)
-{
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- if ((so->cur + size) > so->start[so->cur_start - 1].size) {
- debug_printf("exceeding specified size\n");
- assert(0);
- }
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
-
- while (size--)
- so->pool[so->start[so->cur_start - 1].offset + so->cur++] =
- *data++;
-}
-
-static INLINE void
-so_method(struct nouveau_stateobj *so, struct nouveau_grobj *gr,
- unsigned mthd, unsigned size)
-{
- struct nouveau_stateobj_start *start;
-
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- if (so->start_alloc <= so->cur_start) {
- debug_printf("exceeding num_start size\n");
- assert(0);
- }
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
- start = so->start;
-
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- if (so->cur_start > 0 && start[so->cur_start - 1].size > so->cur) {
- debug_printf("previous so_method was not filled\n");
- assert(0);
- }
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
-
- start[so->cur_start].gr = gr;
- start[so->cur_start].mthd = mthd;
- start[so->cur_start].size = size;
-
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- if (so->pool_alloc < (size + so->pool_cur)) {
- debug_printf("exceeding num_pool size\n");
- assert(0);
- }
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
-
- start[so->cur_start].offset = so->pool_cur;
- so->pool_cur += size;
-
- so->cur_start++;
- /* The 1 is for *this* begin_ring. */
- so->total += so->cur + 1;
- so->cur = 0;
-}
-
-static INLINE void
-so_reloc(struct nouveau_stateobj *so, struct nouveau_bo *bo,
- unsigned data, unsigned flags, unsigned vor, unsigned tor)
-{
- struct nouveau_stateobj_reloc *r;
-
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- if (so->reloc_alloc <= so->cur_reloc) {
- debug_printf("exceeding num_reloc size\n");
- assert(0);
- }
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
- r = so->reloc;
-
- r[so->cur_reloc].bo = NULL;
- nouveau_bo_ref(bo, &(r[so->cur_reloc].bo));
- r[so->cur_reloc].gr = so->start[so->cur_start-1].gr;
- r[so->cur_reloc].push_offset = so->total + so->cur;
- r[so->cur_reloc].data = data;
- r[so->cur_reloc].flags = flags;
- r[so->cur_reloc].mthd = so->start[so->cur_start-1].mthd +
- (so->cur << 2);
- r[so->cur_reloc].vor = vor;
- r[so->cur_reloc].tor = tor;
-
- so_data(so, data);
- so->cur_reloc++;
-}
-
-/* Determine if this buffer object is referenced by this state object. */
-static INLINE boolean
-so_bo_is_reloc(struct nouveau_stateobj *so, struct nouveau_bo *bo)
-{
- int i;
-
- for (i = 0; i < so->cur_reloc; i++)
- if (so->reloc[i].bo == bo)
- return true;
-
- return false;
-}
-
-static INLINE void
-so_emit(struct nouveau_channel *chan, struct nouveau_stateobj *so)
-{
- unsigned nr, i;
- int ret = 0;
-
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- if (so->start[so->cur_start - 1].size > so->cur) {
- debug_printf("emit: previous so_method was not filled\n");
- assert(0);
- }
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
-
- /* We cannot update total in case we so_emit again. */
- nr = so->total + so->cur;
-
- /* This will flush if we need space.
- * We don't actually need the marker.
- */
- if ((ret = nouveau_pushbuf_marker_emit(chan, nr, so->cur_reloc))) {
- debug_printf("so_emit failed marker emit with error %d\n", ret);
- assert(0);
- }
-
- /* Submit data. This will ensure proper binding of objects. */
- for (i = 0; i < so->cur_start; i++) {
- BEGIN_RING(chan, so->start[i].gr, so->start[i].mthd, so->start[i].size);
- OUT_RINGp(chan, &(so->pool[so->start[i].offset]), so->start[i].size);
- }
-
- for (i = 0; i < so->cur_reloc; i++) {
- struct nouveau_stateobj_reloc *r = &so->reloc[i];
-
- if ((ret = nouveau_pushbuf_emit_reloc(chan, chan->cur - nr +
- r->push_offset, r->bo, r->data,
- 0, r->flags, r->vor, r->tor))) {
- debug_printf("so_emit failed reloc with error %d\n", ret);
- assert(0);
- }
- }
-}
-
-static INLINE void
-so_emit_reloc_markers(struct nouveau_channel *chan, struct nouveau_stateobj *so)
-{
- unsigned i;
- int ret = 0;
-
- if (!so)
- return;
-
- /* If we need to flush in flush notify, then we have a problem anyway. */
- for (i = 0; i < so->cur_reloc; i++) {
- struct nouveau_stateobj_reloc *r = &so->reloc[i];
-
-#ifdef DEBUG_NOUVEAU_STATEOBJ
- if (r->mthd & 0x40000000) {
- debug_printf("error: NI mthd 0x%08X\n", r->mthd);
- continue;
- }
-#endif /* DEBUG_NOUVEAU_STATEOBJ */
-
- /* We don't need to autobind, since there are enough subchannels
- * for all objects we use. If this is changed, account for the extra
- * space in callers of this function.
- */
- assert(r->gr->bound != NOUVEAU_GROBJ_UNBOUND);
-
- /* Some relocs really don't like to be hammered,
- * NOUVEAU_BO_DUMMY makes sure it only
- * happens when needed.
- */
- ret = OUT_RELOC(chan, r->bo, (r->gr->subc << 13) | (1<< 18) |
- r->mthd, (r->flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART
- | NOUVEAU_BO_RDWR)) | NOUVEAU_BO_DUMMY, 0, 0);
- if (ret) {
- debug_printf("OUT_RELOC failed %d\n", ret);
- assert(0);
- }
-
- ret = OUT_RELOC(chan, r->bo, r->data, r->flags |
- NOUVEAU_BO_DUMMY, r->vor, r->tor);
- if (ret) {
- debug_printf("OUT_RELOC failed %d\n", ret);
- assert(0);
- }
- }
-}
-
-#endif
diff --git a/src/gallium/drivers/nouveau/nouveau_winsys.h b/src/gallium/drivers/nouveau/nouveau_winsys.h
index 8dfb84a596..484f870bd8 100644
--- a/src/gallium/drivers/nouveau/nouveau_winsys.h
+++ b/src/gallium/drivers/nouveau/nouveau_winsys.h
@@ -9,7 +9,6 @@
#include "nouveau/nouveau_device.h"
#include "nouveau/nouveau_grobj.h"
#include "nouveau/nouveau_notifier.h"
-#include "nouveau/nouveau_resource.h"
#ifndef NOUVEAU_NVC0
#include "nouveau/nv04_pushbuf.h"
#endif