summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/i965/brw_sync.c162
1 files changed, 159 insertions, 3 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_sync.c b/src/mesa/drivers/dri/i965/brw_sync.c
index 77d382cab6f..1c5d5a50b6a 100644
--- a/src/mesa/drivers/dri/i965/brw_sync.c
+++ b/src/mesa/drivers/dri/i965/brw_sync.c
@@ -38,6 +38,8 @@
* performance bottleneck, though.
*/
+#include <libsync.h> /* Requires Android or libdrm-2.4.72 */
+
#include "main/imports.h"
#include "brw_context.h"
@@ -47,11 +49,19 @@ struct brw_fence {
struct brw_context *brw;
enum brw_fence_type {
+ /** The fence waits for completion of brw_fence::batch_bo. */
BRW_FENCE_TYPE_BO_WAIT,
+
+ /** The fence waits for brw_fence::sync_fd to signal. */
+ BRW_FENCE_TYPE_SYNC_FD,
} type;
- /** The fence waits for completion of this batch. */
- drm_intel_bo *batch_bo;
+ union {
+ drm_intel_bo *batch_bo;
+
+ /* This struct owns the fd. */
+ int sync_fd;
+ };
mtx_t mutex;
bool signalled;
@@ -74,6 +84,9 @@ brw_fence_init(struct brw_context *brw, struct brw_fence *fence,
case BRW_FENCE_TYPE_BO_WAIT:
fence->batch_bo = NULL;
break;
+ case BRW_FENCE_TYPE_SYNC_FD:
+ fence->sync_fd = -1;
+ break;
}
}
@@ -85,6 +98,10 @@ brw_fence_finish(struct brw_fence *fence)
if (fence->batch_bo)
drm_intel_bo_unreference(fence->batch_bo);
break;
+ case BRW_FENCE_TYPE_SYNC_FD:
+ if (fence->sync_fd != -1)
+ close(fence->sync_fd);
+ break;
}
mtx_destroy(&fence->mutex);
@@ -109,11 +126,46 @@ brw_fence_insert_locked(struct brw_context *brw, struct brw_fence *fence)
return false;
}
break;
+ case BRW_FENCE_TYPE_SYNC_FD:
+ assert(!fence->signalled);
+
+ if (fence->sync_fd == -1) {
+ /* Create an out-fence that signals after all pending commands
+ * complete.
+ */
+ if (intel_batchbuffer_flush_fence(brw, -1, &fence->sync_fd) < 0)
+ return false;
+ assert(fence->sync_fd != -1);
+ } else {
+ /* Wait on the in-fence before executing any subsequently submitted
+ * commands.
+ */
+ if (intel_batchbuffer_flush(brw) < 0)
+ return false;
+
+ /* Emit a dummy batch just for the fence. */
+ brw_emit_mi_flush(brw);
+ if (intel_batchbuffer_flush_fence(brw, fence->sync_fd, NULL) < 0)
+ return false;
+ }
+ break;
}
return true;
}
+static bool MUST_CHECK
+brw_fence_insert(struct brw_context *brw, struct brw_fence *fence)
+{
+ bool ret;
+
+ mtx_lock(&fence->mutex);
+ ret = brw_fence_insert_locked(brw, fence);
+ mtx_unlock(&fence->mutex);
+
+ return ret;
+}
+
static bool
brw_fence_has_completed_locked(struct brw_fence *fence)
{
@@ -135,6 +187,16 @@ brw_fence_has_completed_locked(struct brw_fence *fence)
fence->signalled = true;
return true;
+
+ case BRW_FENCE_TYPE_SYNC_FD:
+ assert(fence->sync_fd != -1);
+
+ if (sync_wait(fence->sync_fd, 0) == -1)
+ return false;
+
+ fence->signalled = true;
+
+ return true;
}
return false;
@@ -156,6 +218,8 @@ static bool
brw_fence_client_wait_locked(struct brw_context *brw, struct brw_fence *fence,
uint64_t timeout)
{
+ int32_t timeout_i32;
+
if (fence->signalled)
return true;
@@ -182,6 +246,20 @@ brw_fence_client_wait_locked(struct brw_context *brw, struct brw_fence *fence,
fence->batch_bo = NULL;
return true;
+ case BRW_FENCE_TYPE_SYNC_FD:
+ if (fence->sync_fd == -1)
+ return false;
+
+ if (timeout > INT32_MAX)
+ timeout_i32 = -1;
+ else
+ timeout_i32 = timeout;
+
+ if (sync_wait(fence->sync_fd, timeout_i32) == -1)
+ return false;
+
+ fence->signalled = true;
+ return true;
}
assert(!"bad enum brw_fence_type");
@@ -216,6 +294,16 @@ brw_fence_server_wait(struct brw_context *brw, struct brw_fence *fence)
* the previous one is done.
*/
break;
+ case BRW_FENCE_TYPE_SYNC_FD:
+ assert(fence->sync_fd != -1);
+
+ /* The user wants explicit synchronization, so give them what they want. */
+ if (!brw_fence_insert(brw, fence)) {
+ /* FIXME: There exists no way yet to report an error here. If an error
+ * occurs, continue silently and hope for the best.
+ */
+ }
+ break;
}
}
@@ -350,12 +438,80 @@ brw_dri_server_wait_sync(__DRIcontext *ctx, void *_fence, unsigned flags)
brw_fence_server_wait(fence->brw, fence);
}
+static unsigned
+brw_dri_get_capabilities(__DRIscreen *dri_screen)
+{
+ struct intel_screen *screen = dri_screen->driverPrivate;
+ unsigned caps = 0;
+
+ if (screen->has_exec_fence)
+ caps |= __DRI_FENCE_CAP_NATIVE_FD;
+
+ return caps;
+}
+
+static void *
+brw_dri_create_fence_fd(__DRIcontext *dri_ctx, int fd)
+{
+ struct brw_context *brw = dri_ctx->driverPrivate;
+ struct brw_fence *fence;
+
+ assert(brw->screen->has_exec_fence);
+
+ fence = calloc(1, sizeof(*fence));
+ if (!fence)
+ return NULL;
+
+ brw_fence_init(brw, fence, BRW_FENCE_TYPE_SYNC_FD);
+
+ if (fd == -1) {
+ /* Create an out-fence fd */
+ if (!brw_fence_insert_locked(brw, fence))
+ goto fail;
+ } else {
+ /* Import the sync fd as an in-fence. */
+ fence->sync_fd = fd;
+ }
+
+ assert(fence->sync_fd != -1);
+
+ return fence;
+
+fail:
+ brw_fence_finish(fence);
+ free(fence);
+ return NULL;
+}
+
+static int
+brw_dri_get_fence_fd_locked(struct brw_fence *fence)
+{
+ assert(fence->type == BRW_FENCE_TYPE_SYNC_FD);
+ return dup(fence->sync_fd);
+}
+
+static int
+brw_dri_get_fence_fd(__DRIscreen *dri_screen, void *_fence)
+{
+ struct brw_fence *fence = _fence;
+ int fd;
+
+ mtx_lock(&fence->mutex);
+ fd = brw_dri_get_fence_fd_locked(fence);
+ mtx_unlock(&fence->mutex);
+
+ return fd;
+}
+
const __DRI2fenceExtension intelFenceExtension = {
- .base = { __DRI2_FENCE, 1 },
+ .base = { __DRI2_FENCE, 2 },
.create_fence = brw_dri_create_fence,
.destroy_fence = brw_dri_destroy_fence,
.client_wait_sync = brw_dri_client_wait_sync,
.server_wait_sync = brw_dri_server_wait_sync,
.get_fence_from_cl_event = NULL,
+ .get_capabilities = brw_dri_get_capabilities,
+ .create_fence_fd = brw_dri_create_fence_fd,
+ .get_fence_fd = brw_dri_get_fence_fd,
};