diff options
author | Marek Olšák <[email protected]> | 2013-10-08 21:50:43 +0200 |
---|---|---|
committer | Marek Olšák <[email protected]> | 2013-10-25 11:55:55 +0200 |
commit | 9807556e863ca4f49905598a18912852e96fae76 (patch) | |
tree | fe365ecd0dc1d3ad147be0d7e922764b5d0c42e8 /src | |
parent | 6067a30838535c838262a9229b400afe4d92c184 (diff) |
r600g,radeonsi: use fences provided by the winsys
Diffstat (limited to 'src')
-rw-r--r-- | src/gallium/drivers/r600/r600_hw_context.c | 33 | ||||
-rw-r--r-- | src/gallium/drivers/r600/r600_pipe.c | 173 | ||||
-rw-r--r-- | src/gallium/drivers/r600/r600_pipe.h | 28 | ||||
-rw-r--r-- | src/gallium/drivers/radeon/r600_pipe_common.c | 30 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/r600.h | 2 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/r600_hw_context.c | 28 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/radeonsi_pipe.c | 179 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/radeonsi_pipe.h | 26 |
8 files changed, 37 insertions, 462 deletions
diff --git a/src/gallium/drivers/r600/r600_hw_context.c b/src/gallium/drivers/r600/r600_hw_context.c index b127c7da192..5f3a9bd5d7b 100644 --- a/src/gallium/drivers/r600/r600_hw_context.c +++ b/src/gallium/drivers/r600/r600_hw_context.c @@ -331,9 +331,7 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags) ctx->b.streamout.suspended = true; } - /* flush is needed to avoid lockups on some chips with user fences - * this will also flush the framebuffer cache - */ + /* flush the framebuffer cache */ ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV | R600_CONTEXT_FLUSH_AND_INV_CB | R600_CONTEXT_FLUSH_AND_INV_DB | @@ -434,35 +432,6 @@ void r600_begin_new_cs(struct r600_context *ctx) ctx->initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw; } -void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value) -{ - struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; - uint64_t va; - - r600_need_cs_space(ctx, 10, FALSE); - - va = r600_resource_va(&ctx->screen->b.b, (void*)fence_bo); - va = va + (offset << 2); - - /* Use of WAIT_UNTIL is deprecated on Cayman+ */ - if (ctx->b.family >= CHIP_CAYMAN) { - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4); - } else { - r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); - } - - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5); - cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* ADDRESS_LO */ - /* DATA_SEL | INT_EN | ADDRESS_HI */ - cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF); - cs->buf[cs->cdw++] = value; /* DATA_LO */ - cs->buf[cs->cdw++] = 0; /* DATA_HI */ - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, fence_bo, RADEON_USAGE_WRITE); -} - /* The max number of bytes to copy per packet. */ #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8) diff --git a/src/gallium/drivers/r600/r600_pipe.c b/src/gallium/drivers/r600/r600_pipe.c index e091b084a15..9da42d17d22 100644 --- a/src/gallium/drivers/r600/r600_pipe.c +++ b/src/gallium/drivers/r600/r600_pipe.c @@ -68,81 +68,6 @@ static const struct debug_named_value r600_debug_options[] = { /* * pipe_context */ -static struct r600_fence *r600_create_fence(struct r600_context *rctx) -{ - struct r600_screen *rscreen = rctx->screen; - struct r600_fence *fence = NULL; - - pipe_mutex_lock(rscreen->fences.mutex); - - if (!rscreen->fences.bo) { - /* Create the shared buffer object */ - rscreen->fences.bo = (struct r600_resource*) - pipe_buffer_create(&rscreen->b.b, PIPE_BIND_CUSTOM, - PIPE_USAGE_STAGING, 4096); - if (!rscreen->fences.bo) { - R600_ERR("r600: failed to create bo for fence objects\n"); - goto out; - } - rscreen->fences.data = r600_buffer_map_sync_with_rings(&rctx->b, rscreen->fences.bo, PIPE_TRANSFER_READ_WRITE); - } - - if (!LIST_IS_EMPTY(&rscreen->fences.pool)) { - struct r600_fence *entry; - - /* Try to find a freed fence that has been signalled */ - LIST_FOR_EACH_ENTRY(entry, &rscreen->fences.pool, head) { - if (rscreen->fences.data[entry->index] != 0) { - LIST_DELINIT(&entry->head); - fence = entry; - break; - } - } - } - - if (!fence) { - /* Allocate a new fence */ - struct r600_fence_block *block; - unsigned index; - - if ((rscreen->fences.next_index + 1) >= 1024) { - R600_ERR("r600: too many concurrent fences\n"); - goto out; - } - - index = rscreen->fences.next_index++; - - if (!(index % FENCE_BLOCK_SIZE)) { - /* Allocate a new block */ - block = CALLOC_STRUCT(r600_fence_block); - if (block == NULL) - goto out; - - LIST_ADD(&block->head, &rscreen->fences.blocks); - } else { - block = LIST_ENTRY(struct r600_fence_block, rscreen->fences.blocks.next, head); - } - - fence = &block->fences[index % FENCE_BLOCK_SIZE]; - fence->index = index; - } - - pipe_reference_init(&fence->reference, 1); - - rscreen->fences.data[fence->index] = 0; - r600_context_emit_fence(rctx, rscreen->fences.bo, fence->index, 1); - - /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */ - fence->sleep_bo = (struct r600_resource*) - pipe_buffer_create(&rctx->screen->b.b, PIPE_BIND_CUSTOM, - PIPE_USAGE_STAGING, 1); - /* Add the fence as a dummy relocation. */ - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE); - -out: - pipe_mutex_unlock(rscreen->fences.mutex); - return fence; -} static void r600_flush(struct pipe_context *ctx, unsigned flags) { @@ -180,12 +105,11 @@ static void r600_flush_from_st(struct pipe_context *ctx, unsigned flags) { struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_fence **rfence = (struct r600_fence**)fence; unsigned fflags; fflags = flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0; - if (rfence) { - *rfence = r600_create_fence(rctx); + if (fence) { + *fence = rctx->b.ws->cs_create_fence(rctx->b.rings.gfx.cs); } /* flush gfx & dma ring, order does not matter as only one can be live */ if (rctx->b.rings.dma.cs) { @@ -888,98 +812,15 @@ static void r600_destroy_screen(struct pipe_screen* pscreen) compute_memory_pool_delete(rscreen->global_pool); } - if (rscreen->fences.bo) { - struct r600_fence_block *entry, *tmp; - - LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rscreen->fences.blocks, head) { - LIST_DEL(&entry->head); - FREE(entry); - } - - rscreen->b.ws->buffer_unmap(rscreen->fences.bo->cs_buf); - pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL); - } if (rscreen->trace_bo) { rscreen->b.ws->buffer_unmap(rscreen->trace_bo->cs_buf); pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL); } - pipe_mutex_destroy(rscreen->fences.mutex); rscreen->b.ws->destroy(rscreen->b.ws); FREE(rscreen); } -static void r600_fence_reference(struct pipe_screen *pscreen, - struct pipe_fence_handle **ptr, - struct pipe_fence_handle *fence) -{ - struct r600_fence **oldf = (struct r600_fence**)ptr; - struct r600_fence *newf = (struct r600_fence*)fence; - - if (pipe_reference(&(*oldf)->reference, &newf->reference)) { - struct r600_screen *rscreen = (struct r600_screen *)pscreen; - pipe_mutex_lock(rscreen->fences.mutex); - pipe_resource_reference((struct pipe_resource**)&(*oldf)->sleep_bo, NULL); - LIST_ADDTAIL(&(*oldf)->head, &rscreen->fences.pool); - pipe_mutex_unlock(rscreen->fences.mutex); - } - - *ptr = fence; -} - -static boolean r600_fence_signalled(struct pipe_screen *pscreen, - struct pipe_fence_handle *fence) -{ - struct r600_screen *rscreen = (struct r600_screen *)pscreen; - struct r600_fence *rfence = (struct r600_fence*)fence; - - return rscreen->fences.data[rfence->index] != 0; -} - -static boolean r600_fence_finish(struct pipe_screen *pscreen, - struct pipe_fence_handle *fence, - uint64_t timeout) -{ - struct r600_screen *rscreen = (struct r600_screen *)pscreen; - struct r600_fence *rfence = (struct r600_fence*)fence; - int64_t start_time = 0; - unsigned spins = 0; - - if (timeout != PIPE_TIMEOUT_INFINITE) { - start_time = os_time_get(); - - /* Convert to microseconds. */ - timeout /= 1000; - } - - while (rscreen->fences.data[rfence->index] == 0) { - /* Special-case infinite timeout - wait for the dummy BO to become idle */ - if (timeout == PIPE_TIMEOUT_INFINITE) { - rscreen->b.ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE); - break; - } - - /* The dummy BO will be busy until the CS including the fence has completed, or - * the GPU is reset. Don't bother continuing to spin when the BO is idle. */ - if (!rscreen->b.ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE)) - break; - - if (++spins % 256) - continue; -#ifdef PIPE_OS_UNIX - sched_yield(); -#else - os_time_sleep(10); -#endif - if (timeout != PIPE_TIMEOUT_INFINITE && - os_time_get() - start_time >= timeout) { - break; - } - } - - return rscreen->fences.data[rfence->index] != 0; -} - static uint64_t r600_get_timestamp(struct pipe_screen *screen) { struct r600_screen *rscreen = (struct r600_screen*)screen; @@ -1035,9 +876,6 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws) } else { rscreen->b.b.is_format_supported = r600_is_format_supported; } - rscreen->b.b.fence_reference = r600_fence_reference; - rscreen->b.b.fence_signalled = r600_fence_signalled; - rscreen->b.b.fence_finish = r600_fence_finish; rscreen->b.b.get_driver_query_info = r600_get_driver_query_info; if (rscreen->b.info.has_uvd) { rscreen->b.b.get_video_param = ruvd_get_video_param; @@ -1113,13 +951,6 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws) rscreen->has_cp_dma = rscreen->b.info.drm_minor >= 27 && !(rscreen->b.debug_flags & DBG_NO_CP_DMA); - rscreen->fences.bo = NULL; - rscreen->fences.data = NULL; - rscreen->fences.next_index = 0; - LIST_INITHEAD(&rscreen->fences.pool); - LIST_INITHEAD(&rscreen->fences.blocks); - pipe_mutex_init(rscreen->fences.mutex); - rscreen->global_pool = compute_memory_pool_new(rscreen); rscreen->cs_count = 0; diff --git a/src/gallium/drivers/r600/r600_pipe.h b/src/gallium/drivers/r600/r600_pipe.h index 75a721cb99b..6aa944c54a0 100644 --- a/src/gallium/drivers/r600/r600_pipe.h +++ b/src/gallium/drivers/r600/r600_pipe.h @@ -187,17 +187,6 @@ struct r600_viewport_state { struct pipe_viewport_state state; }; -struct r600_pipe_fences { - struct r600_resource *bo; - unsigned *data; - unsigned next_index; - /* linked list of preallocated blocks */ - struct list_head blocks; - /* linked list of freed fences */ - struct list_head pool; - pipe_mutex mutex; -}; - /* This must start from 16. */ /* features */ #define DBG_NO_LLVM (1 << 17) @@ -220,7 +209,6 @@ struct r600_screen { bool has_msaa; bool has_cp_dma; bool has_compressed_msaa_texturing; - struct r600_pipe_fences fences; /*for compute global memory binding, we allocate stuff here, instead of * buffers. @@ -341,20 +329,6 @@ struct r600_textures_info { uint32_t *buffer_constants; }; -struct r600_fence { - struct pipe_reference reference; - unsigned index; /* in the shared bo */ - struct r600_resource *sleep_bo; - struct list_head head; -}; - -#define FENCE_BLOCK_SIZE 16 - -struct r600_fence_block { - struct r600_fence fences[FENCE_BLOCK_SIZE]; - struct list_head head; -}; - struct r600_constbuf_state { struct r600_atom atom; @@ -672,8 +646,6 @@ void r600_update_db_shader_control(struct r600_context * rctx); void r600_get_backend_mask(struct r600_context *ctx); void r600_context_flush(struct r600_context *ctx, unsigned flags); void r600_begin_new_cs(struct r600_context *ctx); -void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence, - unsigned offset, unsigned value); void r600_flush_emit(struct r600_context *ctx); void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in); void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw); diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c index f83c7e42a9e..5674f819466 100644 --- a/src/gallium/drivers/radeon/r600_pipe_common.c +++ b/src/gallium/drivers/radeon/r600_pipe_common.c @@ -47,6 +47,32 @@ static const struct debug_named_value common_debug_options[] = { DEBUG_NAMED_VALUE_END /* must be last */ }; +static void r600_fence_reference(struct pipe_screen *screen, + struct pipe_fence_handle **ptr, + struct pipe_fence_handle *fence) +{ + struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws; + + rws->fence_reference(ptr, fence); +} + +static boolean r600_fence_signalled(struct pipe_screen *screen, + struct pipe_fence_handle *fence) +{ + struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws; + + return rws->fence_wait(rws, fence, 0); +} + +static boolean r600_fence_finish(struct pipe_screen *screen, + struct pipe_fence_handle *fence, + uint64_t timeout) +{ + struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws; + + return rws->fence_wait(rws, fence, timeout); +} + static bool r600_interpret_tiling(struct r600_common_screen *rscreen, uint32_t tiling_config) { @@ -164,6 +190,10 @@ bool r600_common_screen_init(struct r600_common_screen *rscreen, { ws->query_info(ws, &rscreen->info); + rscreen->b.fence_finish = r600_fence_finish; + rscreen->b.fence_reference = r600_fence_reference; + rscreen->b.fence_signalled = r600_fence_signalled; + rscreen->ws = ws; rscreen->family = rscreen->info.family; rscreen->chip_class = rscreen->info.chip_class; diff --git a/src/gallium/drivers/radeonsi/r600.h b/src/gallium/drivers/radeonsi/r600.h index 0a9380bf03f..13bbad4b369 100644 --- a/src/gallium/drivers/radeonsi/r600.h +++ b/src/gallium/drivers/radeonsi/r600.h @@ -78,8 +78,6 @@ void r600_context_queries_suspend(struct r600_context *ctx); void r600_context_queries_resume(struct r600_context *ctx); void r600_query_predication(struct r600_context *ctx, struct r600_query *query, int operation, int flag_wait); -void si_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence, - unsigned offset, unsigned value); bool si_is_timer_query(unsigned type); bool si_query_needs_begin(unsigned type); diff --git a/src/gallium/drivers/radeonsi/r600_hw_context.c b/src/gallium/drivers/radeonsi/r600_hw_context.c index b6e7a0f45be..3003dad5e8a 100644 --- a/src/gallium/drivers/radeonsi/r600_hw_context.c +++ b/src/gallium/drivers/radeonsi/r600_hw_context.c @@ -161,9 +161,6 @@ void si_need_cs_space(struct r600_context *ctx, unsigned num_dw, /* Count in framebuffer cache flushes at the end of CS. */ num_dw += ctx->atoms.cache_flush->num_dw; - /* Save 16 dwords for the fence mechanism. */ - num_dw += 16; - #if R600_TRACE_CS if (ctx->screen->trace_bo) { num_dw += R600_TRACE_CS_DWORDS; @@ -203,7 +200,7 @@ void si_context_flush(struct r600_context *ctx, unsigned flags) R600_CONTEXT_INV_TEX_CACHE; si_emit_cache_flush(&ctx->b, NULL); - /* partial flush is needed to avoid lockups on some chips with user fences */ + /* this is probably not needed anymore */ cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4); @@ -279,29 +276,6 @@ void si_begin_new_cs(struct r600_context *ctx) si_all_descriptors_begin_new_cs(ctx); } -void si_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value) -{ - struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; - uint64_t va; - - si_need_cs_space(ctx, 10, FALSE); - - va = r600_resource_va(&ctx->screen->b.b, (void*)fence_bo); - va = va + (offset << 2); - - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4); - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5); - cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* ADDRESS_LO */ - /* DATA_SEL | INT_EN | ADDRESS_HI */ - cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF); - cs->buf[cs->cdw++] = value; /* DATA_LO */ - cs->buf[cs->cdw++] = 0; /* DATA_HI */ - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, fence_bo, RADEON_USAGE_WRITE); -} - static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index, bool test_status_bit) { diff --git a/src/gallium/drivers/radeonsi/radeonsi_pipe.c b/src/gallium/drivers/radeonsi/radeonsi_pipe.c index 121fd186fe7..b2f0866900b 100644 --- a/src/gallium/drivers/radeonsi/radeonsi_pipe.c +++ b/src/gallium/drivers/radeonsi/radeonsi_pipe.c @@ -54,95 +54,17 @@ /* * pipe_context */ -static struct r600_fence *r600_create_fence(struct r600_context *rctx) -{ - struct r600_screen *rscreen = rctx->screen; - struct r600_fence *fence = NULL; - - pipe_mutex_lock(rscreen->fences.mutex); - - if (!rscreen->fences.bo) { - /* Create the shared buffer object */ - rscreen->fences.bo = r600_resource_create_custom(&rscreen->b.b, - PIPE_USAGE_STAGING, - 4096); - if (!rscreen->fences.bo) { - R600_ERR("r600: failed to create bo for fence objects\n"); - goto out; - } - rscreen->fences.data = rctx->b.ws->buffer_map(rscreen->fences.bo->cs_buf, - rctx->b.rings.gfx.cs, - PIPE_TRANSFER_READ_WRITE); - } - - if (!LIST_IS_EMPTY(&rscreen->fences.pool)) { - struct r600_fence *entry; - - /* Try to find a freed fence that has been signalled */ - LIST_FOR_EACH_ENTRY(entry, &rscreen->fences.pool, head) { - if (rscreen->fences.data[entry->index] != 0) { - LIST_DELINIT(&entry->head); - fence = entry; - break; - } - } - } - - if (!fence) { - /* Allocate a new fence */ - struct r600_fence_block *block; - unsigned index; - - if ((rscreen->fences.next_index + 1) >= 1024) { - R600_ERR("r600: too many concurrent fences\n"); - goto out; - } - - index = rscreen->fences.next_index++; - - if (!(index % FENCE_BLOCK_SIZE)) { - /* Allocate a new block */ - block = CALLOC_STRUCT(r600_fence_block); - if (block == NULL) - goto out; - - LIST_ADD(&block->head, &rscreen->fences.blocks); - } else { - block = LIST_ENTRY(struct r600_fence_block, rscreen->fences.blocks.next, head); - } - - fence = &block->fences[index % FENCE_BLOCK_SIZE]; - fence->index = index; - } - - pipe_reference_init(&fence->reference, 1); - - rscreen->fences.data[fence->index] = 0; - si_context_emit_fence(rctx, rscreen->fences.bo, fence->index, 1); - - /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */ - fence->sleep_bo = r600_resource_create_custom(&rctx->screen->b.b, PIPE_USAGE_STAGING, 1); - - /* Add the fence as a dummy relocation. */ - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE); - -out: - pipe_mutex_unlock(rscreen->fences.mutex); - return fence; -} - - void radeonsi_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence, unsigned flags) { struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_fence **rfence = (struct r600_fence**)fence; struct pipe_query *render_cond = NULL; boolean render_cond_cond = FALSE; unsigned render_cond_mode = 0; - if (rfence) - *rfence = r600_create_fence(rctx); + if (fence) { + *fence = rctx->b.ws->cs_create_fence(rctx->b.rings.gfx.cs); + } /* Disable render condition. */ if (rctx->current_render_cond) { @@ -658,18 +580,6 @@ static void r600_destroy_screen(struct pipe_screen* pscreen) r600_common_screen_cleanup(&rscreen->b); - if (rscreen->fences.bo) { - struct r600_fence_block *entry, *tmp; - - LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rscreen->fences.blocks, head) { - LIST_DEL(&entry->head); - FREE(entry); - } - - rscreen->b.ws->buffer_unmap(rscreen->fences.bo->cs_buf); - r600_resource_reference(&rscreen->fences.bo, NULL); - } - #if R600_TRACE_CS if (rscreen->trace_bo) { rscreen->ws->buffer_unmap(rscreen->trace_bo->cs_buf); @@ -677,83 +587,10 @@ static void r600_destroy_screen(struct pipe_screen* pscreen) } #endif - pipe_mutex_destroy(rscreen->fences.mutex); - rscreen->b.ws->destroy(rscreen->b.ws); FREE(rscreen); } -static void r600_fence_reference(struct pipe_screen *pscreen, - struct pipe_fence_handle **ptr, - struct pipe_fence_handle *fence) -{ - struct r600_fence **oldf = (struct r600_fence**)ptr; - struct r600_fence *newf = (struct r600_fence*)fence; - - if (pipe_reference(&(*oldf)->reference, &newf->reference)) { - struct r600_screen *rscreen = (struct r600_screen *)pscreen; - pipe_mutex_lock(rscreen->fences.mutex); - r600_resource_reference(&(*oldf)->sleep_bo, NULL); - LIST_ADDTAIL(&(*oldf)->head, &rscreen->fences.pool); - pipe_mutex_unlock(rscreen->fences.mutex); - } - - *ptr = fence; -} - -static boolean r600_fence_signalled(struct pipe_screen *pscreen, - struct pipe_fence_handle *fence) -{ - struct r600_screen *rscreen = (struct r600_screen *)pscreen; - struct r600_fence *rfence = (struct r600_fence*)fence; - - return rscreen->fences.data[rfence->index] != 0; -} - -static boolean r600_fence_finish(struct pipe_screen *pscreen, - struct pipe_fence_handle *fence, - uint64_t timeout) -{ - struct r600_screen *rscreen = (struct r600_screen *)pscreen; - struct r600_fence *rfence = (struct r600_fence*)fence; - int64_t start_time = 0; - unsigned spins = 0; - - if (timeout != PIPE_TIMEOUT_INFINITE) { - start_time = os_time_get(); - - /* Convert to microseconds. */ - timeout /= 1000; - } - - while (rscreen->fences.data[rfence->index] == 0) { - /* Special-case infinite timeout - wait for the dummy BO to become idle */ - if (timeout == PIPE_TIMEOUT_INFINITE) { - rscreen->b.ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE); - break; - } - - /* The dummy BO will be busy until the CS including the fence has completed, or - * the GPU is reset. Don't bother continuing to spin when the BO is idle. */ - if (!rscreen->b.ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE)) - break; - - if (++spins % 256) - continue; -#ifdef PIPE_OS_UNIX - sched_yield(); -#else - os_time_sleep(10); -#endif - if (timeout != PIPE_TIMEOUT_INFINITE && - os_time_get() - start_time >= timeout) { - break; - } - } - - return rscreen->fences.data[rfence->index] != 0; -} - static uint64_t r600_get_timestamp(struct pipe_screen *screen) { struct r600_screen *rscreen = (struct r600_screen*)screen; @@ -782,9 +619,6 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws) rscreen->b.b.get_compute_param = r600_get_compute_param; rscreen->b.b.get_timestamp = r600_get_timestamp; rscreen->b.b.is_format_supported = si_is_format_supported; - rscreen->b.b.fence_reference = r600_fence_reference; - rscreen->b.b.fence_signalled = r600_fence_signalled; - rscreen->b.b.fence_finish = r600_fence_finish; if (rscreen->b.info.has_uvd) { rscreen->b.b.get_video_param = ruvd_get_video_param; rscreen->b.b.is_video_format_supported = ruvd_is_format_supported; @@ -802,13 +636,6 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws) if (debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE)) rscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS; - rscreen->fences.bo = NULL; - rscreen->fences.data = NULL; - rscreen->fences.next_index = 0; - LIST_INITHEAD(&rscreen->fences.pool); - LIST_INITHEAD(&rscreen->fences.blocks); - pipe_mutex_init(rscreen->fences.mutex); - #if R600_TRACE_CS rscreen->cs_count = 0; if (rscreen->info.drm_minor >= 28) { diff --git a/src/gallium/drivers/radeonsi/radeonsi_pipe.h b/src/gallium/drivers/radeonsi/radeonsi_pipe.h index 26f7e09aebc..1d4a91bf8ad 100644 --- a/src/gallium/drivers/radeonsi/radeonsi_pipe.h +++ b/src/gallium/drivers/radeonsi/radeonsi_pipe.h @@ -53,20 +53,8 @@ struct si_pipe_compute; -struct r600_pipe_fences { - struct r600_resource *bo; - unsigned *data; - unsigned next_index; - /* linked list of preallocated blocks */ - struct list_head blocks; - /* linked list of freed fences */ - struct list_head pool; - pipe_mutex mutex; -}; - struct r600_screen { struct r600_common_screen b; - struct r600_pipe_fences fences; #if R600_TRACE_CS struct r600_resource *trace_bo; uint32_t *trace_ptr; @@ -99,20 +87,6 @@ struct r600_textures_info { unsigned n_samplers; }; -struct r600_fence { - struct pipe_reference reference; - unsigned index; /* in the shared bo */ - struct r600_resource *sleep_bo; - struct list_head head; -}; - -#define FENCE_BLOCK_SIZE 16 - -struct r600_fence_block { - struct r600_fence fences[FENCE_BLOCK_SIZE]; - struct list_head head; -}; - #define SI_NUM_ATOMS(rctx) (sizeof((rctx)->atoms)/sizeof((rctx)->atoms.array[0])) #define SI_NUM_SHADERS (PIPE_SHADER_FRAGMENT+1) |