summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/nvc0
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/nvc0')
-rw-r--r--src/gallium/drivers/nvc0/nvc0_buffer.c30
-rw-r--r--src/gallium/drivers/nvc0/nvc0_context.h3
-rw-r--r--src/gallium/drivers/nvc0/nvc0_resource.h11
-rw-r--r--src/gallium/drivers/nvc0/nvc0_vbo.c112
4 files changed, 119 insertions, 37 deletions
diff --git a/src/gallium/drivers/nvc0/nvc0_buffer.c b/src/gallium/drivers/nvc0/nvc0_buffer.c
index dad69e17ef7..ea3e642a448 100644
--- a/src/gallium/drivers/nvc0/nvc0_buffer.c
+++ b/src/gallium/drivers/nvc0/nvc0_buffer.c
@@ -59,6 +59,18 @@ release_allocation(struct nvc0_mm_allocation **mm, struct nvc0_fence *fence)
(*mm) = NULL;
}
+static INLINE boolean
+nvc0_buffer_reallocate(struct nvc0_screen *screen, struct nvc0_resource *buf,
+ unsigned domain)
+{
+ nouveau_bo_ref(NULL, &buf->bo);
+
+ if (buf->mm)
+ release_allocation(&buf->mm, buf->fence);
+
+ return nvc0_buffer_allocate(screen, buf, domain);
+}
+
static void
nvc0_buffer_destroy(struct pipe_screen *pscreen,
struct pipe_resource *presource)
@@ -372,8 +384,9 @@ nvc0_user_buffer_create(struct pipe_screen *pscreen,
return &buffer->base;
}
+/* Like download, but for GART buffers. Merge ? */
static INLINE boolean
-nvc0_buffer_fetch_data(struct nvc0_resource *buf,
+nvc0_buffer_data_fetch(struct nvc0_resource *buf,
struct nouveau_bo *bo, unsigned offset, unsigned size)
{
if (!buf->data) {
@@ -419,7 +432,7 @@ nvc0_buffer_migrate(struct nvc0_context *nvc0,
if (new_domain == NOUVEAU_BO_VRAM) {
/* keep a system memory copy of our data in case we hit a fallback */
- if (!nvc0_buffer_fetch_data(buf, buf->bo, buf->offset, size))
+ if (!nvc0_buffer_data_fetch(buf, buf->bo, buf->offset, size))
return FALSE;
debug_printf("migrating %u KiB to VRAM\n", size / 1024);
}
@@ -450,19 +463,22 @@ nvc0_buffer_migrate(struct nvc0_context *nvc0,
}
/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
- * MUST NOT FLUSH THE PUSH BUFFER, we could be in the middle of a method.
+ * We'd like to only allocate @size bytes here, but then we'd have to rebase
+ * the vertex indices ...
*/
boolean
-nvc0_migrate_vertices(struct nvc0_resource *buf, unsigned base, unsigned size)
+nvc0_user_buffer_upload(struct nvc0_resource *buf, unsigned base, unsigned size)
{
struct nvc0_screen *screen = nvc0_screen(buf->base.screen);
int ret;
- assert(buf->data && !buf->domain);
+ assert(buf->status & NVC0_BUFFER_STATUS_USER_MEMORY);
- if (!nvc0_buffer_allocate(screen, buf, NOUVEAU_BO_GART))
+ buf->base.width0 = base + size;
+ if (!nvc0_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
return FALSE;
- ret = nouveau_bo_map_range(buf->bo, base + buf->offset, size,
+
+ ret = nouveau_bo_map_range(buf->bo, buf->offset + base, size,
NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
if (ret)
return FALSE;
diff --git a/src/gallium/drivers/nvc0/nvc0_context.h b/src/gallium/drivers/nvc0/nvc0_context.h
index 0f340beb35a..eeb5beff7a7 100644
--- a/src/gallium/drivers/nvc0/nvc0_context.h
+++ b/src/gallium/drivers/nvc0/nvc0_context.h
@@ -100,7 +100,8 @@ struct nvc0_context {
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs;
struct pipe_index_buffer idxbuf;
- uint32_t vbo_fifo;
+ uint32_t vbo_fifo; /* bitmask of vertex elements to be pushed to FIFO */
+ uint32_t vbo_user; /* bitmask of vertex buffers pointing to user memory */
unsigned vbo_min_index; /* from pipe_draw_info, for vertex upload */
unsigned vbo_max_index;
diff --git a/src/gallium/drivers/nvc0/nvc0_resource.h b/src/gallium/drivers/nvc0/nvc0_resource.h
index d33e2f0ed0a..17e79642a6d 100644
--- a/src/gallium/drivers/nvc0/nvc0_resource.h
+++ b/src/gallium/drivers/nvc0/nvc0_resource.h
@@ -18,6 +18,12 @@ struct nvc0_context;
#define NVC0_BUFFER_SCORE_MAX 25000
#define NVC0_BUFFER_SCORE_VRAM_THRESHOLD 20000
+/* DIRTY: buffer was (or will be after the next flush) written to by GPU and
+ * resource->data has not been updated to reflect modified VRAM contents
+ *
+ * USER_MEMORY: resource->data is a pointer to client memory and may change
+ * between GL calls
+ */
#define NVC0_BUFFER_STATUS_DIRTY (1 << 0)
#define NVC0_BUFFER_STATUS_USER_MEMORY (1 << 7)
@@ -84,7 +90,8 @@ nvc0_resource_map_offset(struct nvc0_context *nvc0,
(res->status & NVC0_BUFFER_STATUS_DIRTY))
nvc0_buffer_download(nvc0, res, 0, res->base.width0);
- if (res->domain != NOUVEAU_BO_GART)
+ if ((res->domain != NOUVEAU_BO_GART) ||
+ (res->status & NVC0_BUFFER_STATUS_USER_MEMORY))
return res->data + offset;
if (res->mm)
@@ -189,6 +196,6 @@ void
nvc0_miptree_surface_del(struct pipe_context *, struct pipe_surface *);
boolean
-nvc0_migrate_vertices(struct nvc0_resource *buf, unsigned base, unsigned size);
+nvc0_user_buffer_upload(struct nvc0_resource *, unsigned base, unsigned size);
#endif
diff --git a/src/gallium/drivers/nvc0/nvc0_vbo.c b/src/gallium/drivers/nvc0/nvc0_vbo.c
index fd7a7942cb8..a14e9557382 100644
--- a/src/gallium/drivers/nvc0/nvc0_vbo.c
+++ b/src/gallium/drivers/nvc0/nvc0_vbo.c
@@ -141,52 +141,107 @@ nvc0_emit_vtxattr(struct nvc0_context *nvc0, struct pipe_vertex_buffer *vb,
OUT_RINGf(chan, v[i]);
}
-void
-nvc0_vertex_arrays_validate(struct nvc0_context *nvc0)
+static void
+nvc0_prevalidate_vbufs(struct nvc0_context *nvc0)
{
- struct nouveau_channel *chan = nvc0->screen->base.channel;
- struct nvc0_vertex_stateobj *vertex = nvc0->vertex;
struct pipe_vertex_buffer *vb;
- struct nvc0_vertex_element *ve;
- unsigned i;
- boolean push = FALSE;
+ struct nvc0_resource *buf;
+ int i;
+ uint32_t base, size;
- nvc0->vbo_fifo = 0;
+ nvc0->vbo_fifo = nvc0->vbo_user = 0;
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
vb = &nvc0->vtxbuf[i];
+ if (!vb->stride)
+ continue;
+ buf = nvc0_resource(vb->buffer);
if (!nvc0_resource_mapped_by_gpu(vb->buffer)) {
- if (vb->stride == 0)
+ if (nvc0->vbo_push_hint) {
+ nvc0->vbo_fifo = ~0;
continue;
- push = nvc0->vbo_push_hint;
- if (!push) {
- unsigned base, size;
- base = vb->buffer_offset + nvc0->vbo_min_index * vb->stride;
- size = (nvc0->vbo_max_index - nvc0->vbo_min_index + 1) * vb->stride;
- nvc0_migrate_vertices(nvc0_resource(vb->buffer), base, size);
+ } else {
+ if (buf->status & NVC0_BUFFER_STATUS_USER_MEMORY) {
+ nvc0->vbo_user |= 1 << i;
+ assert(vb->stride > vb->buffer_offset);
+ size = vb->stride * (nvc0->vbo_max_index -
+ nvc0->vbo_min_index + 1);
+ base = vb->stride * nvc0->vbo_min_index;
+ nvc0_user_buffer_upload(buf, base, size);
+ } else {
+ nvc0_buffer_migrate(nvc0, buf, NOUVEAU_BO_GART);
+ }
nvc0->vbo_dirty = TRUE;
- } else
- continue;
+ }
}
- nvc0_buffer_adjust_score(nvc0, nvc0_resource(vb->buffer), 1);
+ nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_VERTEX, buf, NOUVEAU_BO_RD);
+ nvc0_buffer_adjust_score(nvc0, buf, 1);
+ }
+}
- nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_VERTEX,
- nvc0_resource(vb->buffer), NOUVEAU_BO_RD);
+static void
+nvc0_update_user_vbufs(struct nvc0_context *nvc0)
+{
+ struct nouveau_channel *chan = nvc0->screen->base.channel;
+ const uint32_t vertex_count = nvc0->vbo_max_index - nvc0->vbo_min_index + 1;
+ uint32_t base, offset, size;
+ int i;
+ uint32_t written = 0;
+
+ for (i = 0; i < nvc0->vertex->num_elements; ++i) {
+ struct pipe_vertex_element *ve = &nvc0->vertex->element[i].pipe;
+ const int b = ve->vertex_buffer_index;
+ struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[b];
+ struct nvc0_resource *buf = nvc0_resource(vb->buffer);
+
+ if (!(nvc0->vbo_user & (1 << b)))
+ continue;
+
+ if (!vb->stride) {
+ nvc0_emit_vtxattr(nvc0, vb, ve, i);
+ continue;
+ }
+ size = vb->stride * vertex_count;
+ base = vb->stride * nvc0->vbo_min_index;
+
+ if (!(written & (1 << b))) {
+ written |= 1 << b;
+ nvc0_user_buffer_upload(buf, base, size);
+ }
+ offset = vb->buffer_offset + ve->src_offset;
+
+ BEGIN_RING_1I(chan, RING_3D(VERTEX_ARRAY_SELECT), 5);
+ OUT_RING (chan, i);
+ OUT_RESRCh(chan, buf, size - 1, NOUVEAU_BO_RD);
+ OUT_RESRCl(chan, buf, size - 1, NOUVEAU_BO_RD);
+ OUT_RESRCh(chan, buf, offset, NOUVEAU_BO_RD);
+ OUT_RESRCl(chan, buf, offset, NOUVEAU_BO_RD);
}
+ nvc0->vbo_dirty = TRUE;
+}
+
+void
+nvc0_vertex_arrays_validate(struct nvc0_context *nvc0)
+{
+ struct nouveau_channel *chan = nvc0->screen->base.channel;
+ struct nvc0_vertex_stateobj *vertex = nvc0->vertex;
+ struct pipe_vertex_buffer *vb;
+ struct nvc0_vertex_element *ve;
+ unsigned i;
+
+ nvc0_prevalidate_vbufs(nvc0);
BEGIN_RING(chan, RING_3D(VERTEX_ATTRIB_FORMAT(0)), vertex->num_elements);
for (i = 0; i < vertex->num_elements; ++i) {
ve = &vertex->element[i];
vb = &nvc0->vtxbuf[ve->pipe.vertex_buffer_index];
- if (push)
- nvc0->vbo_fifo |= 1 << i;
-
- if (likely(vb->stride) || push) {
+ if (likely(vb->stride) || nvc0->vbo_fifo) {
OUT_RING(chan, ve->state);
} else {
OUT_RING(chan, ve->state | NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST);
+ nvc0->vbo_fifo &= ~(1 << i);
}
}
@@ -210,8 +265,8 @@ nvc0_vertex_arrays_validate(struct nvc0_context *nvc0)
res = nvc0_resource(vb->buffer);
- if (push || unlikely(vb->stride == 0)) {
- if (!push)
+ if (nvc0->vbo_fifo || unlikely(vb->stride == 0)) {
+ if (!nvc0->vbo_fifo)
nvc0_emit_vtxattr(nvc0, vb, &ve->pipe, i);
BEGIN_RING(chan, RING_3D(VERTEX_ARRAY_FETCH(i)), 1);
OUT_RING (chan, 0);
@@ -540,6 +595,9 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
nvc0->vbo_min_index = info->min_index;
nvc0->vbo_max_index = info->max_index;
+ if (nvc0->vbo_user && !(nvc0->dirty & (NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS)))
+ nvc0_update_user_vbufs(nvc0);
+
nvc0_state_validate(nvc0);
if (nvc0->state.instance_base != info->start_instance) {
@@ -554,7 +612,7 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
}
if (nvc0->vbo_dirty) {
- BEGIN_RING(chan, RING_3D_(0x142c), 1);
+ BEGIN_RING(chan, RING_3D(VERTEX_ARRAY_FLUSH), 1);
OUT_RING (chan, 0);
nvc0->vbo_dirty = FALSE;
}