summaryrefslogtreecommitdiffstats
path: root/src/util
diff options
context:
space:
mode:
Diffstat (limited to 'src/util')
-rw-r--r--src/util/sparse_array.c85
-rw-r--r--src/util/sparse_array.h52
2 files changed, 137 insertions, 0 deletions
diff --git a/src/util/sparse_array.c b/src/util/sparse_array.c
index d73b129ea26..06816401309 100644
--- a/src/util/sparse_array.c
+++ b/src/util/sparse_array.c
@@ -192,3 +192,88 @@ util_sparse_array_validate(struct util_sparse_array *arr)
{
validate_node_level(arr, arr->root, arr->root->level);
}
+
+void
+util_sparse_array_free_list_init(struct util_sparse_array_free_list *fl,
+ struct util_sparse_array *arr,
+ uint32_t sentinel,
+ uint32_t next_offset)
+{
+ fl->head = sentinel;
+ fl->arr = arr;
+ fl->sentinel = sentinel;
+ fl->next_offset = next_offset;
+}
+
+static uint64_t
+free_list_head(uint64_t old, uint32_t next)
+{
+ return ((old & 0xffffffff00000000ull) + 0x100000000ull) | next;
+}
+
+void
+util_sparse_array_free_list_push(struct util_sparse_array_free_list *fl,
+ uint32_t *items, unsigned num_items)
+{
+ assert(num_items > 0);
+ assert(items[0] != fl->sentinel);
+ void *last_elem = util_sparse_array_get(fl->arr, items[0]);
+ uint32_t *last_next = (uint32_t *)((char *)last_elem + fl->next_offset);
+ for (unsigned i = 1; i < num_items; i++) {
+ *last_next = items[i];
+ assert(items[i] != fl->sentinel);
+ last_elem = util_sparse_array_get(fl->arr, items[i]);
+ last_next = (uint32_t *)((char *)last_elem + fl->next_offset);
+ }
+
+ uint64_t current_head, old_head;
+ old_head = p_atomic_read(&fl->head);
+ do {
+ current_head = old_head;
+ *last_next = current_head; /* Index is the bottom 32 bits */
+ uint64_t new_head = free_list_head(current_head, items[0]);
+ old_head = p_atomic_cmpxchg(&fl->head, current_head, new_head);
+ } while (old_head != current_head);
+}
+
+uint32_t
+util_sparse_array_free_list_pop_idx(struct util_sparse_array_free_list *fl)
+{
+ uint64_t current_head;
+
+ current_head = p_atomic_read(&fl->head);
+ while (1) {
+ if ((uint32_t)current_head == fl->sentinel)
+ return fl->sentinel;
+
+ uint32_t head_idx = current_head; /* Index is the bottom 32 bits */
+ void *head_elem = util_sparse_array_get(fl->arr, head_idx);
+ uint32_t *head_next = (uint32_t *)((char *)head_elem + fl->next_offset);
+ uint32_t new_head = free_list_head(current_head, *head_next);
+ uint64_t old_head = p_atomic_cmpxchg(&fl->head, current_head, new_head);
+ if (old_head == current_head)
+ return head_idx;
+ current_head = old_head;
+ }
+}
+
+void *
+util_sparse_array_free_list_pop_elem(struct util_sparse_array_free_list *fl)
+{
+ uint64_t current_head;
+
+ current_head = p_atomic_read(&fl->head);
+ while (1) {
+ if ((uint32_t)current_head == fl->sentinel)
+ return NULL;
+
+ uint32_t head_idx = current_head; /* Index is the bottom 32 bits */
+ void *head_elem = util_sparse_array_get(fl->arr, head_idx);
+ uint32_t *head_next = (uint32_t *)((char *)head_elem + fl->next_offset);
+ uint32_t new_head = free_list_head(current_head, *head_next);
+ uint64_t old_head = p_atomic_cmpxchg(&fl->head, current_head, new_head);
+ if (old_head == current_head)
+ return head_elem;
+ current_head = old_head;
+ }
+}
diff --git a/src/util/sparse_array.h b/src/util/sparse_array.h
index 5736613e33b..3947a2fa81b 100644
--- a/src/util/sparse_array.h
+++ b/src/util/sparse_array.h
@@ -81,6 +81,58 @@ void *util_sparse_array_get(struct util_sparse_array *arr, uint64_t idx);
void util_sparse_array_validate(struct util_sparse_array *arr);
+/** A thread-safe free list for use with struct util_sparse_array
+ *
+ * This data structure provides an easy way to manage a singly linked list of
+ * "free" elements backed by a util_sparse_array. The list supports only two
+ * operations: push and pop both of which are thread-safe and lock-free. T
+ */
+struct
+#ifdef _MSC_VER
+ __declspec(align(8))
+#else
+ __attribute__((aligned(8)))
+#endif
+util_sparse_array_free_list
+{
+ /** Head of the list
+ *
+ * The bottom 64 bits of this value are the index to the next free element
+ * or the sentinel value if the list is empty.
+ *
+ * We want this element to be 8-byte aligned. Otherwise, the performance
+ * of atomic operations on it will be aweful on 32-bit platforms.
+ */
+ uint64_t head;
+
+ /** The array backing this free list */
+ struct util_sparse_array *arr;
+
+ /** Sentinel value to indicate the end of the list
+ *
+ * This value must never be passed into util_sparse_array_free_list_push.
+ */
+ uint32_t sentinel;
+
+ /** Offset into the array element at which to find the "next" value
+ *
+ * The assumption is that there is some uint32_t "next" value embedded in
+ * the array element for use in the free list. This is its offset.
+ */
+ uint32_t next_offset;
+};
+
+void util_sparse_array_free_list_init(struct util_sparse_array_free_list *fl,
+ struct util_sparse_array *arr,
+ uint32_t sentinel,
+ uint32_t next_offset);
+
+void util_sparse_array_free_list_push(struct util_sparse_array_free_list *fl,
+ uint32_t *items, unsigned num_items);
+
+uint32_t util_sparse_array_free_list_pop_idx(struct util_sparse_array_free_list *fl);
+void *util_sparse_array_free_list_pop_elem(struct util_sparse_array_free_list *fl);
+
#ifdef __cplusplus
} /* extern C */
#endif