diff options
author | Marek Olšák <[email protected]> | 2019-01-18 19:35:04 -0500 |
---|---|---|
committer | Marek Olšák <[email protected]> | 2019-01-22 13:34:03 -0500 |
commit | d85917deafb2fb2f880398de358b4b27fb32bcd0 (patch) | |
tree | d54901e8704a839aff00eb15c7b70fc5f51b06e5 /src/gallium/drivers/radeonsi | |
parent | 260ff576470f45991ce2bc8934494c82e98c061b (diff) |
radeonsi: rename rfence -> sfence
Reviewed-by: Bas Nieuwenhuizen <[email protected]>
Diffstat (limited to 'src/gallium/drivers/radeonsi')
-rw-r--r-- | src/gallium/drivers/radeonsi/si_fence.c | 98 |
1 files changed, 49 insertions, 49 deletions
diff --git a/src/gallium/drivers/radeonsi/si_fence.c b/src/gallium/drivers/radeonsi/si_fence.c index bb53ccba947..78da742b5da 100644 --- a/src/gallium/drivers/radeonsi/si_fence.c +++ b/src/gallium/drivers/radeonsi/si_fence.c @@ -286,15 +286,15 @@ static boolean si_fence_finish(struct pipe_screen *screen, uint64_t timeout) { struct radeon_winsys *rws = ((struct si_screen*)screen)->ws; - struct si_multi_fence *rfence = (struct si_multi_fence *)fence; + struct si_multi_fence *sfence = (struct si_multi_fence *)fence; struct si_context *sctx; int64_t abs_timeout = os_time_get_absolute_timeout(timeout); ctx = threaded_context_unwrap_sync(ctx); sctx = (struct si_context*)(ctx ? ctx : NULL); - if (!util_queue_fence_is_signalled(&rfence->ready)) { - if (rfence->tc_token) { + if (!util_queue_fence_is_signalled(&sfence->ready)) { + if (sfence->tc_token) { /* Ensure that si_flush_from_st will be called for * this fence, but only if we're in the API thread * where the context is current. @@ -303,7 +303,7 @@ static boolean si_fence_finish(struct pipe_screen *screen, * be in flight in the driver thread, so the fence * may not be ready yet when this call returns. */ - threaded_context_flush(ctx, rfence->tc_token, + threaded_context_flush(ctx, sfence->tc_token, timeout == 0); } @@ -311,9 +311,9 @@ static boolean si_fence_finish(struct pipe_screen *screen, return false; if (timeout == PIPE_TIMEOUT_INFINITE) { - util_queue_fence_wait(&rfence->ready); + util_queue_fence_wait(&sfence->ready); } else { - if (!util_queue_fence_wait_timeout(&rfence->ready, abs_timeout)) + if (!util_queue_fence_wait_timeout(&sfence->ready, abs_timeout)) return false; } @@ -323,8 +323,8 @@ static boolean si_fence_finish(struct pipe_screen *screen, } } - if (rfence->sdma) { - if (!rws->fence_wait(rws, rfence->sdma, timeout)) + if (sfence->sdma) { + if (!rws->fence_wait(rws, sfence->sdma, timeout)) return false; /* Recompute the timeout after waiting. */ @@ -334,19 +334,19 @@ static boolean si_fence_finish(struct pipe_screen *screen, } } - if (!rfence->gfx) + if (!sfence->gfx) return true; - if (rfence->fine.buf && - si_fine_fence_signaled(rws, &rfence->fine)) { - rws->fence_reference(&rfence->gfx, NULL); - si_resource_reference(&rfence->fine.buf, NULL); + if (sfence->fine.buf && + si_fine_fence_signaled(rws, &sfence->fine)) { + rws->fence_reference(&sfence->gfx, NULL); + si_resource_reference(&sfence->fine.buf, NULL); return true; } /* Flush the gfx IB if it hasn't been flushed yet. */ - if (sctx && rfence->gfx_unflushed.ctx == sctx && - rfence->gfx_unflushed.ib_index == sctx->num_gfx_cs_flushes) { + if (sctx && sfence->gfx_unflushed.ctx == sctx && + sfence->gfx_unflushed.ib_index == sctx->num_gfx_cs_flushes) { /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile) * spec says: * @@ -373,7 +373,7 @@ static boolean si_fence_finish(struct pipe_screen *screen, (timeout ? 0 : PIPE_FLUSH_ASYNC) | RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL); - rfence->gfx_unflushed.ctx = NULL; + sfence->gfx_unflushed.ctx = NULL; if (!timeout) return false; @@ -385,13 +385,13 @@ static boolean si_fence_finish(struct pipe_screen *screen, } } - if (rws->fence_wait(rws, rfence->gfx, timeout)) + if (rws->fence_wait(rws, sfence->gfx, timeout)) return true; /* Re-check in case the GPU is slow or hangs, but the commands before * the fine-grained fence have completed. */ - if (rfence->fine.buf && - si_fine_fence_signaled(rws, &rfence->fine)) + if (sfence->fine.buf && + si_fine_fence_signaled(rws, &sfence->fine)) return true; return false; @@ -403,12 +403,12 @@ static void si_create_fence_fd(struct pipe_context *ctx, { struct si_screen *sscreen = (struct si_screen*)ctx->screen; struct radeon_winsys *ws = sscreen->ws; - struct si_multi_fence *rfence; + struct si_multi_fence *sfence; *pfence = NULL; - rfence = si_create_multi_fence(); - if (!rfence) + sfence = si_create_multi_fence(); + if (!sfence) return; switch (type) { @@ -416,14 +416,14 @@ static void si_create_fence_fd(struct pipe_context *ctx, if (!sscreen->info.has_fence_to_handle) goto finish; - rfence->gfx = ws->fence_import_sync_file(ws, fd); + sfence->gfx = ws->fence_import_sync_file(ws, fd); break; case PIPE_FD_TYPE_SYNCOBJ: if (!sscreen->info.has_syncobj) goto finish; - rfence->gfx = ws->fence_import_syncobj(ws, fd); + sfence->gfx = ws->fence_import_syncobj(ws, fd); break; default: @@ -431,12 +431,12 @@ static void si_create_fence_fd(struct pipe_context *ctx, } finish: - if (!rfence->gfx) { - FREE(rfence); + if (!sfence->gfx) { + FREE(sfence); return; } - *pfence = (struct pipe_fence_handle*)rfence; + *pfence = (struct pipe_fence_handle*)sfence; } static int si_fence_get_fd(struct pipe_screen *screen, @@ -444,26 +444,26 @@ static int si_fence_get_fd(struct pipe_screen *screen, { struct si_screen *sscreen = (struct si_screen*)screen; struct radeon_winsys *ws = sscreen->ws; - struct si_multi_fence *rfence = (struct si_multi_fence *)fence; + struct si_multi_fence *sfence = (struct si_multi_fence *)fence; int gfx_fd = -1, sdma_fd = -1; if (!sscreen->info.has_fence_to_handle) return -1; - util_queue_fence_wait(&rfence->ready); + util_queue_fence_wait(&sfence->ready); /* Deferred fences aren't supported. */ - assert(!rfence->gfx_unflushed.ctx); - if (rfence->gfx_unflushed.ctx) + assert(!sfence->gfx_unflushed.ctx); + if (sfence->gfx_unflushed.ctx) return -1; - if (rfence->sdma) { - sdma_fd = ws->fence_export_sync_file(ws, rfence->sdma); + if (sfence->sdma) { + sdma_fd = ws->fence_export_sync_file(ws, sfence->sdma); if (sdma_fd == -1) return -1; } - if (rfence->gfx) { - gfx_fd = ws->fence_export_sync_file(ws, rfence->gfx); + if (sfence->gfx) { + gfx_fd = ws->fence_export_sync_file(ws, sfence->gfx); if (gfx_fd == -1) { if (sdma_fd != -1) close(sdma_fd); @@ -584,15 +584,15 @@ static void si_fence_server_signal(struct pipe_context *ctx, struct pipe_fence_handle *fence) { struct si_context *sctx = (struct si_context *)ctx; - struct si_multi_fence *rfence = (struct si_multi_fence *)fence; + struct si_multi_fence *sfence = (struct si_multi_fence *)fence; /* We should have at least one syncobj to signal */ - assert(rfence->sdma || rfence->gfx); + assert(sfence->sdma || sfence->gfx); - if (rfence->sdma) - si_add_syncobj_signal(sctx, rfence->sdma); - if (rfence->gfx) - si_add_syncobj_signal(sctx, rfence->gfx); + if (sfence->sdma) + si_add_syncobj_signal(sctx, sfence->sdma); + if (sfence->gfx) + si_add_syncobj_signal(sctx, sfence->gfx); /** * The spec does not require a flush here. We insert a flush @@ -611,13 +611,13 @@ static void si_fence_server_sync(struct pipe_context *ctx, struct pipe_fence_handle *fence) { struct si_context *sctx = (struct si_context *)ctx; - struct si_multi_fence *rfence = (struct si_multi_fence *)fence; + struct si_multi_fence *sfence = (struct si_multi_fence *)fence; - util_queue_fence_wait(&rfence->ready); + util_queue_fence_wait(&sfence->ready); /* Unflushed fences from the same context are no-ops. */ - if (rfence->gfx_unflushed.ctx && - rfence->gfx_unflushed.ctx == sctx) + if (sfence->gfx_unflushed.ctx && + sfence->gfx_unflushed.ctx == sctx) return; /* All unflushed commands will not start execution before @@ -627,10 +627,10 @@ static void si_fence_server_sync(struct pipe_context *ctx, */ si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC); - if (rfence->sdma) - si_add_fence_dependency(sctx, rfence->sdma); - if (rfence->gfx) - si_add_fence_dependency(sctx, rfence->gfx); + if (sfence->sdma) + si_add_fence_dependency(sctx, sfence->sdma); + if (sfence->gfx) + si_add_fence_dependency(sctx, sfence->gfx); } void si_init_fence_functions(struct si_context *ctx) |