summaryrefslogtreecommitdiffstats
path: root/src/util/u_queue.c
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2017-05-31 16:44:12 +0200
committerMarek Olšák <[email protected]>2017-06-07 18:43:42 +0200
commit33e507ec23db3778294b75a1485021d2a35b0a22 (patch)
tree83894a8f32a0c39ff5631801a8ab8b1635636434 /src/util/u_queue.c
parent812fd1aaa8451938dc411b4cdb58acf2d358372c (diff)
util/u_queue: add a way to remove a job when we just want to destroy it
Reviewed-by: Nicolai Hähnle <[email protected]>
Diffstat (limited to 'src/util/u_queue.c')
-rw-r--r--src/util/u_queue.c53
1 files changed, 47 insertions, 6 deletions
diff --git a/src/util/u_queue.c b/src/util/u_queue.c
index 8db09b027c2..01c3a96d5f3 100644
--- a/src/util/u_queue.c
+++ b/src/util/u_queue.c
@@ -180,13 +180,15 @@ util_queue_thread_func(void *input)
/* signal remaining jobs before terminating */
mtx_lock(&queue->lock);
- while (queue->jobs[queue->read_idx].job) {
- util_queue_fence_signal(queue->jobs[queue->read_idx].fence);
-
- queue->jobs[queue->read_idx].job = NULL;
- queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
+ for (unsigned i = queue->read_idx; i != queue->write_idx;
+ i = (i + 1) % queue->max_jobs) {
+ if (queue->jobs[i].job) {
+ util_queue_fence_signal(queue->jobs[i].fence);
+ queue->jobs[i].job = NULL;
+ }
}
- queue->num_queued = 0; /* reset this when exiting the thread */
+ queue->read_idx = queue->write_idx;
+ queue->num_queued = 0;
mtx_unlock(&queue->lock);
return 0;
}
@@ -329,6 +331,45 @@ util_queue_add_job(struct util_queue *queue,
mtx_unlock(&queue->lock);
}
+/**
+ * Remove a queued job. If the job hasn't started execution, it's removed from
+ * the queue. If the job has started execution, the function waits for it to
+ * complete.
+ *
+ * In all cases, the fence is signalled when the function returns.
+ *
+ * The function can be used when destroying an object associated with the job
+ * when you don't care about the job completion state.
+ */
+void
+util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence)
+{
+ bool removed = false;
+
+ if (util_queue_fence_is_signalled(fence))
+ return;
+
+ mtx_lock(&queue->lock);
+ for (unsigned i = queue->read_idx; i != queue->write_idx;
+ i = (i + 1) % queue->max_jobs) {
+ if (queue->jobs[i].fence == fence) {
+ if (queue->jobs[i].cleanup)
+ queue->jobs[i].cleanup(queue->jobs[i].job, -1);
+
+ /* Just clear it. The threads will treat as a no-op job. */
+ memset(&queue->jobs[i], 0, sizeof(queue->jobs[i]));
+ removed = true;
+ break;
+ }
+ }
+ mtx_unlock(&queue->lock);
+
+ if (removed)
+ util_queue_fence_signal(fence);
+ else
+ util_queue_fence_wait(fence);
+}
+
int64_t
util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
{