summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/nouveau/nouveau_buffer.c
diff options
context:
space:
mode:
authorSamuel Pitoiset <[email protected]>2015-07-20 21:32:43 +0200
committerSamuel Pitoiset <[email protected]>2015-07-21 00:42:53 +0200
commitcd0dec0d9dfab642c51774c3f5788cbdf00b8c9b (patch)
treed5dfc6bd848e0b7c926500f37e29d6641dcd621f /src/gallium/drivers/nouveau/nouveau_buffer.c
parent4be30fcd058209966fc72fbfa51bbe881c307ed5 (diff)
nouveau: use bool instead of boolean
Signed-off-by: Samuel Pitoiset <[email protected]> Acked-by: Ilia Mirkin <[email protected]>
Diffstat (limited to 'src/gallium/drivers/nouveau/nouveau_buffer.c')
-rw-r--r--src/gallium/drivers/nouveau/nouveau_buffer.c118
1 files changed, 59 insertions, 59 deletions
diff --git a/src/gallium/drivers/nouveau/nouveau_buffer.c b/src/gallium/drivers/nouveau/nouveau_buffer.c
index 83d528846fd..ad05fdc3e76 100644
--- a/src/gallium/drivers/nouveau/nouveau_buffer.c
+++ b/src/gallium/drivers/nouveau/nouveau_buffer.c
@@ -28,7 +28,7 @@ nouveau_transfer(struct pipe_transfer *transfer)
return (struct nouveau_transfer *)transfer;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_malloc(struct nv04_resource *buf)
{
if (!buf->data)
@@ -36,7 +36,7 @@ nouveau_buffer_malloc(struct nv04_resource *buf)
return !!buf->data;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_allocate(struct nouveau_screen *screen,
struct nv04_resource *buf, unsigned domain)
{
@@ -53,12 +53,12 @@ nouveau_buffer_allocate(struct nouveau_screen *screen,
buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
&buf->bo, &buf->offset);
if (!buf->bo)
- return FALSE;
+ return false;
NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
} else {
assert(domain == 0);
if (!nouveau_buffer_malloc(buf))
- return FALSE;
+ return false;
}
buf->domain = domain;
if (buf->bo)
@@ -66,7 +66,7 @@ nouveau_buffer_allocate(struct nouveau_screen *screen,
util_range_set_empty(&buf->valid_buffer_range);
- return TRUE;
+ return true;
}
static INLINE void
@@ -93,7 +93,7 @@ nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
buf->domain = 0;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_reallocate(struct nouveau_screen *screen,
struct nv04_resource *buf, unsigned domain)
{
@@ -134,13 +134,13 @@ nouveau_buffer_destroy(struct pipe_screen *pscreen,
*/
static uint8_t *
nouveau_transfer_staging(struct nouveau_context *nv,
- struct nouveau_transfer *tx, boolean permit_pb)
+ struct nouveau_transfer *tx, bool permit_pb)
{
const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK;
const unsigned size = align(tx->base.box.width, 4) + adj;
if (!nv->push_data)
- permit_pb = FALSE;
+ permit_pb = false;
if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) {
tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
@@ -162,7 +162,7 @@ nouveau_transfer_staging(struct nouveau_context *nv,
* buffer. Also updates buf->data if present.
*
* Maybe just migrate to GART right away if we actually need to do this. */
-static boolean
+static bool
nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
{
struct nv04_resource *buf = nv04_resource(tx->base.resource);
@@ -175,12 +175,12 @@ nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
buf->bo, buf->offset + base, buf->domain, size);
if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client))
- return FALSE;
+ return false;
if (buf->data)
memcpy(buf->data + base, tx->map, size);
- return TRUE;
+ return true;
}
static void
@@ -190,7 +190,7 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
struct nv04_resource *buf = nv04_resource(tx->base.resource);
uint8_t *data = tx->map + offset;
const unsigned base = tx->base.box.x + offset;
- const boolean can_cb = !((base | size) & 3);
+ const bool can_cb = !((base | size) & 3);
if (buf->data)
memcpy(data, buf->data + base, size);
@@ -219,32 +219,32 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
/* Does a CPU wait for the buffer's backing data to become reliably accessible
* for write/read by waiting on the buffer's relevant fences.
*/
-static INLINE boolean
+static INLINE bool
nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
{
if (rw == PIPE_TRANSFER_READ) {
if (!buf->fence_wr)
- return TRUE;
+ return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
!nouveau_fence_signalled(buf->fence_wr));
if (!nouveau_fence_wait(buf->fence_wr))
- return FALSE;
+ return false;
} else {
if (!buf->fence)
- return TRUE;
+ return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
!nouveau_fence_signalled(buf->fence));
if (!nouveau_fence_wait(buf->fence))
- return FALSE;
+ return false;
nouveau_fence_ref(NULL, &buf->fence);
}
nouveau_fence_ref(NULL, &buf->fence_wr);
- return TRUE;
+ return true;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
{
if (rw == PIPE_TRANSFER_READ)
@@ -292,11 +292,11 @@ nouveau_buffer_transfer_del(struct nouveau_context *nv,
}
/* Creates a cache in system memory of the buffer data. */
-static boolean
+static bool
nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
{
struct nouveau_transfer tx;
- boolean ret;
+ bool ret;
tx.base.resource = &buf->base;
tx.base.box.x = 0;
tx.base.box.width = buf->base.width0;
@@ -305,13 +305,13 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
if (!buf->data)
if (!nouveau_buffer_malloc(buf))
- return FALSE;
+ return false;
if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY))
- return TRUE;
+ return true;
nv->stats.buf_cache_count++;
- if (!nouveau_transfer_staging(nv, &tx, FALSE))
- return FALSE;
+ if (!nouveau_transfer_staging(nv, &tx, false))
+ return false;
ret = nouveau_transfer_read(nv, &tx);
if (ret) {
@@ -330,15 +330,15 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
* resource. This can be useful if we would otherwise have to wait for a read
* operation to complete on this data.
*/
-static INLINE boolean
+static INLINE bool
nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
{
if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
- return FALSE;
+ return false;
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
- return FALSE;
+ return false;
if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
- return FALSE;
+ return false;
return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
}
@@ -408,7 +408,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
* back into VRAM on unmap. */
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
- nouveau_transfer_staging(nv, tx, TRUE);
+ nouveau_transfer_staging(nv, tx, true);
} else {
if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
/* The GPU is currently writing to this buffer. Copy its current
@@ -419,13 +419,13 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
align_free(buf->data);
buf->data = NULL;
}
- nouveau_transfer_staging(nv, tx, FALSE);
+ nouveau_transfer_staging(nv, tx, false);
nouveau_transfer_read(nv, tx);
} else {
/* The buffer is currently idle. Create a staging area for writes,
* and make sure that the cached data is up-to-date. */
if (usage & PIPE_TRANSFER_WRITE)
- nouveau_transfer_staging(nv, tx, TRUE);
+ nouveau_transfer_staging(nv, tx, true);
if (!buf->data)
nouveau_buffer_cache(nv, buf);
}
@@ -477,7 +477,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
/* The whole range is being discarded, so it doesn't matter what was
* there before. No need to copy anything over. */
- nouveau_transfer_staging(nv, tx, TRUE);
+ nouveau_transfer_staging(nv, tx, true);
map = tx->map;
} else
if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
@@ -488,7 +488,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
} else {
/* It is expected that the returned buffer be a representation of the
* data in question, so we must copy it over from the buffer. */
- nouveau_transfer_staging(nv, tx, TRUE);
+ nouveau_transfer_staging(nv, tx, true);
if (tx->map)
memcpy(tx->map, map, box->width);
map = tx->map;
@@ -539,7 +539,7 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
const uint8_t bind = buf->base.bind;
/* make sure we invalidate dedicated caches */
if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
- nv->vbo_dirty = TRUE;
+ nv->vbo_dirty = true;
}
util_range_add(&buf->valid_buffer_range,
@@ -634,7 +634,7 @@ nouveau_buffer_create(struct pipe_screen *pscreen,
{
struct nouveau_screen *screen = nouveau_screen(pscreen);
struct nv04_resource *buffer;
- boolean ret;
+ bool ret;
buffer = CALLOC_STRUCT(nv04_resource);
if (!buffer)
@@ -678,7 +678,7 @@ nouveau_buffer_create(struct pipe_screen *pscreen,
}
ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
- if (ret == FALSE)
+ if (ret == false)
goto fail;
if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
@@ -725,20 +725,20 @@ nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
return &buffer->base;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
struct nouveau_bo *bo, unsigned offset, unsigned size)
{
if (!nouveau_buffer_malloc(buf))
- return FALSE;
+ return false;
if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client))
- return FALSE;
+ return false;
memcpy(buf->data, (uint8_t *)bo->map + offset, size);
- return TRUE;
+ return true;
}
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
-boolean
+bool
nouveau_buffer_migrate(struct nouveau_context *nv,
struct nv04_resource *buf, const unsigned new_domain)
{
@@ -753,7 +753,7 @@ nouveau_buffer_migrate(struct nouveau_context *nv,
if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
if (!nouveau_buffer_allocate(screen, buf, new_domain))
- return FALSE;
+ return false;
ret = nouveau_bo_map(buf->bo, 0, nv->client);
if (ret)
return ret;
@@ -766,7 +766,7 @@ nouveau_buffer_migrate(struct nouveau_context *nv,
if (new_domain == NOUVEAU_BO_VRAM) {
/* keep a system memory copy of our data in case we hit a fallback */
if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
- return FALSE;
+ return false;
if (nouveau_mesa_debug)
debug_printf("migrating %u KiB to VRAM\n", size / 1024);
}
@@ -787,28 +787,28 @@ nouveau_buffer_migrate(struct nouveau_context *nv,
if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
struct nouveau_transfer tx;
if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
- return FALSE;
+ return false;
tx.base.resource = &buf->base;
tx.base.box.x = 0;
tx.base.box.width = buf->base.width0;
tx.bo = NULL;
tx.map = NULL;
- if (!nouveau_transfer_staging(nv, &tx, FALSE))
- return FALSE;
+ if (!nouveau_transfer_staging(nv, &tx, false))
+ return false;
nouveau_transfer_write(nv, &tx, 0, tx.base.box.width);
nouveau_buffer_transfer_del(nv, &tx);
} else
- return FALSE;
+ return false;
assert(buf->domain == new_domain);
- return TRUE;
+ return true;
}
/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
* We'd like to only allocate @size bytes here, but then we'd have to rebase
* the vertex indices ...
*/
-boolean
+bool
nouveau_user_buffer_upload(struct nouveau_context *nv,
struct nv04_resource *buf,
unsigned base, unsigned size)
@@ -820,14 +820,14 @@ nouveau_user_buffer_upload(struct nouveau_context *nv,
buf->base.width0 = base + size;
if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
- return FALSE;
+ return false;
ret = nouveau_bo_map(buf->bo, 0, nv->client);
if (ret)
- return FALSE;
+ return false;
memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
- return TRUE;
+ return true;
}
@@ -870,7 +870,7 @@ nouveau_scratch_runout_release(struct nouveau_context *nv)
/* Allocate an extra bo if we can't fit everything we need simultaneously.
* (Could happen for very large user arrays.)
*/
-static INLINE boolean
+static INLINE bool
nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
{
int ret;
@@ -904,7 +904,7 @@ nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
/* Continue to next scratch buffer, if available (no wrapping, large enough).
* Allocate it if it has not yet been created.
*/
-static INLINE boolean
+static INLINE bool
nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
{
struct nouveau_bo *bo;
@@ -912,14 +912,14 @@ nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap))
- return FALSE;
+ return false;
nv->scratch.id = i;
bo = nv->scratch.bo[i];
if (!bo) {
ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
if (ret)
- return FALSE;
+ return false;
nv->scratch.bo[i] = bo;
}
nv->scratch.current = bo;
@@ -932,10 +932,10 @@ nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
return !ret;
}
-static boolean
+static bool
nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
{
- boolean ret;
+ bool ret;
ret = nouveau_scratch_next(nv, min_size);
if (!ret)