summaryrefslogtreecommitdiffstats
path: root/src/amd
diff options
context:
space:
mode:
authorDave Airlie <[email protected]>2017-07-19 03:56:04 +0100
committerDave Airlie <[email protected]>2017-07-20 01:55:36 +0100
commitaee382510edafe6dda4e1890b5f0c1458fc0f6ff (patch)
treea1a73c8854dadacccb02c3915c478eeb463dd1fa /src/amd
parent1cb5a6061ce4b7f0cf5a9e5938bc1a11fe24b9ca (diff)
radv: introduce some wrapper in cs code to make porting off libdrm_amdgpu easier.
This just introduces a central semaphore info struct, and passes it around, and introduces some wrappers that will make porting off libdrm_amdgpu easier. Reviewed-by: Bas Nieuwenhuizen <[email protected]> Signed-off-by: Dave Airlie <[email protected]>
Diffstat (limited to 'src/amd')
-rw-r--r--src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c88
1 files changed, 70 insertions, 18 deletions
diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
index ffc756617c0..91212d2c755 100644
--- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
+++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
@@ -75,6 +75,13 @@ radv_amdgpu_cs(struct radeon_winsys_cs *base)
return (struct radv_amdgpu_cs*)base;
}
+struct radv_amdgpu_sem_info {
+ int wait_sem_count;
+ struct radeon_winsys_sem **wait_sems;
+ int signal_sem_count;
+ struct radeon_winsys_sem **signal_sems;
+};
+
static int ring_to_hw_ip(enum ring_type ring)
{
switch (ring) {
@@ -89,6 +96,18 @@ static int ring_to_hw_ip(enum ring_type ring)
}
}
+static void radv_amdgpu_wait_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_amdgpu_sem_info *sem_info);
+static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_amdgpu_sem_info *sem_info);
+static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
+ struct amdgpu_cs_request *request,
+ struct radv_amdgpu_sem_info *sem_info);
+
static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
struct radv_amdgpu_fence *fence,
struct amdgpu_cs_request *req)
@@ -647,6 +666,7 @@ static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
int queue_idx,
+ struct radv_amdgpu_sem_info *sem_info,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
@@ -703,7 +723,7 @@ static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
}
- r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
+ r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
@@ -724,6 +744,7 @@ static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
int queue_idx,
+ struct radv_amdgpu_sem_info *sem_info,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
@@ -775,7 +796,7 @@ static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
}
}
- r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
+ r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
@@ -801,6 +822,7 @@ static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
int queue_idx,
+ struct radv_amdgpu_sem_info *sem_info,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
@@ -880,7 +902,7 @@ static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
request.ibs = &ib;
request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
- r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
+ r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
@@ -921,29 +943,27 @@ static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
int ret;
- int i;
-
- for (i = 0; i < wait_sem_count; i++) {
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)wait_sem[i];
- amdgpu_cs_wait_semaphore(ctx->ctx, cs->hw_ip, 0, queue_idx,
- sem);
- }
+ struct radv_amdgpu_sem_info sem_info = {0};
+
+ sem_info.wait_sems = wait_sem;
+ sem_info.wait_sem_count = wait_sem_count;
+ sem_info.signal_sems = signal_sem;
+ sem_info.signal_sem_count = signal_sem_count;
+
+ radv_amdgpu_wait_sems(ctx, cs->hw_ip, queue_idx, &sem_info);
+
if (!cs->ws->use_ib_bos) {
- ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, &sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
} else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
- ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, &sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
} else {
- ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, &sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
}
- for (i = 0; i < signal_sem_count; i++) {
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)signal_sem[i];
- amdgpu_cs_signal_semaphore(ctx->ctx, cs->hw_ip, 0, queue_idx,
- sem);
- }
+ radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, &sem_info);
return ret;
}
@@ -1057,6 +1077,38 @@ static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
amdgpu_cs_destroy_semaphore(sem);
}
+static void radv_amdgpu_wait_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_amdgpu_sem_info *sem_info)
+{
+ for (unsigned i = 0; i < sem_info->wait_sem_count; i++) {
+ amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)sem_info->wait_sems[i];
+ amdgpu_cs_wait_semaphore(ctx->ctx, ip_type, 0, ring,
+ sem);
+ }
+}
+
+static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_amdgpu_sem_info *sem_info)
+{
+ for (unsigned i = 0; i < sem_info->signal_sem_count; i++) {
+ amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)sem_info->signal_sems[i];
+ amdgpu_cs_signal_semaphore(ctx->ctx, ip_type, 0, ring,
+ sem);
+ }
+ return 0;
+}
+
+static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
+ struct amdgpu_cs_request *request,
+ struct radv_amdgpu_sem_info *sem_info)
+{
+ return amdgpu_cs_submit(ctx->ctx, 0, request, 1);
+}
+
void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
{
ws->base.ctx_create = radv_amdgpu_ctx_create;