summaryrefslogtreecommitdiffstats
path: root/src/amd/vulkan/winsys/amdgpu
diff options
context:
space:
mode:
authorDave Airlie <[email protected]>2017-07-19 04:02:39 +0100
committerDave Airlie <[email protected]>2017-07-20 01:56:04 +0100
commit9ac1432a5714f2c946d005dcdaa90dc5f738a6d8 (patch)
treeaafcd87fb09a318392e5948a14a6b9499b177af8 /src/amd/vulkan/winsys/amdgpu
parentaee382510edafe6dda4e1890b5f0c1458fc0f6ff (diff)
radv: port to new libdrm API.
This bumps the libdrm requirement for amdgpu to the 2.4.82. Reviewed-by: Bas Nieuwenhuizen <[email protected]> Signed-off-by: Dave Airlie <[email protected]>
Diffstat (limited to 'src/amd/vulkan/winsys/amdgpu')
-rw-r--r--src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c121
1 files changed, 92 insertions, 29 deletions
diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
index 91212d2c755..93243dfd135 100644
--- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
+++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
@@ -96,10 +96,6 @@ static int ring_to_hw_ip(enum ring_type ring)
}
}
-static void radv_amdgpu_wait_sems(struct radv_amdgpu_ctx *ctx,
- uint32_t ip_type,
- uint32_t ring,
- struct radv_amdgpu_sem_info *sem_info);
static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
uint32_t ip_type,
uint32_t ring,
@@ -950,8 +946,6 @@ static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
sem_info.signal_sems = signal_sem;
sem_info.signal_sem_count = signal_sem_count;
- radv_amdgpu_wait_sems(ctx, cs->hw_ip, queue_idx, &sem_info);
-
if (!cs->ws->use_ib_bos) {
ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, &sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
@@ -1062,31 +1056,17 @@ static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
{
- int ret;
- amdgpu_semaphore_handle sem;
-
- ret = amdgpu_cs_create_semaphore(&sem);
- if (ret)
+ struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
+ if (!sem)
return NULL;
+
return (struct radeon_winsys_sem *)sem;
}
static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
{
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)_sem;
- amdgpu_cs_destroy_semaphore(sem);
-}
-
-static void radv_amdgpu_wait_sems(struct radv_amdgpu_ctx *ctx,
- uint32_t ip_type,
- uint32_t ring,
- struct radv_amdgpu_sem_info *sem_info)
-{
- for (unsigned i = 0; i < sem_info->wait_sem_count; i++) {
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)sem_info->wait_sems[i];
- amdgpu_cs_wait_semaphore(ctx->ctx, ip_type, 0, ring,
- sem);
- }
+ struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
+ FREE(sem);
}
static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
@@ -1095,9 +1075,12 @@ static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
struct radv_amdgpu_sem_info *sem_info)
{
for (unsigned i = 0; i < sem_info->signal_sem_count; i++) {
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)sem_info->signal_sems[i];
- amdgpu_cs_signal_semaphore(ctx->ctx, ip_type, 0, ring,
- sem);
+ struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)sem_info->signal_sems[i];
+
+ if (sem->context)
+ return -EINVAL;
+
+ *sem = ctx->last_submission[ip_type][ring].fence;
}
return 0;
}
@@ -1106,7 +1089,87 @@ static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
struct amdgpu_cs_request *request,
struct radv_amdgpu_sem_info *sem_info)
{
- return amdgpu_cs_submit(ctx->ctx, 0, request, 1);
+ int r;
+ int num_chunks;
+ int size;
+ bool user_fence;
+ struct drm_amdgpu_cs_chunk *chunks;
+ struct drm_amdgpu_cs_chunk_data *chunk_data;
+ struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
+ int i;
+ struct amdgpu_cs_fence *sem;
+ user_fence = (request->fence_info.handle != NULL);
+ size = request->number_of_ibs + (user_fence ? 2 : 1) + 1;
+
+ chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
+
+ size = request->number_of_ibs + (user_fence ? 1 : 0);
+
+ chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
+
+ num_chunks = request->number_of_ibs;
+ for (i = 0; i < request->number_of_ibs; i++) {
+ struct amdgpu_cs_ib_info *ib;
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
+
+ ib = &request->ibs[i];
+
+ chunk_data[i].ib_data._pad = 0;
+ chunk_data[i].ib_data.va_start = ib->ib_mc_address;
+ chunk_data[i].ib_data.ib_bytes = ib->size * 4;
+ chunk_data[i].ib_data.ip_type = request->ip_type;
+ chunk_data[i].ib_data.ip_instance = request->ip_instance;
+ chunk_data[i].ib_data.ring = request->ring;
+ chunk_data[i].ib_data.flags = ib->flags;
+ }
+
+ if (user_fence) {
+ i = num_chunks++;
+
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
+
+ amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
+ &chunk_data[i]);
+ }
+
+ if (sem_info->wait_sem_count) {
+ sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait_sem_count);
+ if (!sem_dependencies) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+ int sem_count = 0;
+ for (unsigned j = 0; j < sem_info->wait_sem_count; j++) {
+ sem = (struct amdgpu_cs_fence *)sem_info->wait_sems[j];
+ if (!sem->context)
+ continue;
+ struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
+
+ amdgpu_cs_chunk_fence_to_dep(sem, dep);
+ }
+ i = num_chunks++;
+
+ /* dependencies chunk */
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
+
+ sem_info->wait_sem_count = 0;
+ }
+
+ r = amdgpu_cs_submit_raw(ctx->ws->dev,
+ ctx->ctx,
+ request->resources,
+ num_chunks,
+ chunks,
+ &request->seq_no);
+error_out:
+ free(sem_dependencies);
+ return r;
}
void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)