summaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan/anv_util.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2016-03-03 08:17:36 -0800
committerJason Ekstrand <[email protected]>2016-03-03 08:17:36 -0800
commit206414f92edb4a2149b504f9c296f687a9572ffe (patch)
treea39ef5086b70bc0b45941047d128578d38e0f2c1 /src/intel/vulkan/anv_util.c
parent98cdce1ce4737cf09c5d9613a85bb118f0f1757b (diff)
anv/util: Fix vector resizing
It wasn't properly handling the fact that wrap-around in the source may not translate to wrap-around in the destination. This really needs unit tests.
Diffstat (limited to 'src/intel/vulkan/anv_util.c')
-rw-r--r--src/intel/vulkan/anv_util.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/src/intel/vulkan/anv_util.c b/src/intel/vulkan/anv_util.c
index 22fd01c9495..62f47051ec7 100644
--- a/src/intel/vulkan/anv_util.c
+++ b/src/intel/vulkan/anv_util.c
@@ -144,7 +144,7 @@ anv_vector_init(struct anv_vector *vector, uint32_t element_size, uint32_t size)
void *
anv_vector_add(struct anv_vector *vector)
{
- uint32_t offset, size, split, tail;
+ uint32_t offset, size, split, src_tail, dst_tail;
void *data;
if (vector->head - vector->tail == vector->size) {
@@ -152,18 +152,25 @@ anv_vector_add(struct anv_vector *vector)
data = malloc(size);
if (data == NULL)
return NULL;
- split = align_u32(vector->tail, vector->size);
- tail = vector->tail & (vector->size - 1);
- if (vector->head - split < vector->size) {
- memcpy(data + tail,
- vector->data + tail,
- split - vector->tail);
- memcpy(data + vector->size,
- vector->data, vector->head - split);
+ src_tail = vector->tail & (vector->size - 1);
+ dst_tail = vector->tail & (size - 1);
+ if (src_tail == 0) {
+ /* Since we know that the vector is full, this means that it's
+ * linear from start to end so we can do one copy.
+ */
+ memcpy(data + dst_tail, vector->data, vector->size);
} else {
- memcpy(data + tail,
- vector->data + tail,
- vector->head - vector->tail);
+ /* In this case, the vector is split into two pieces and we have
+ * to do two copies. We have to be careful to make sure each
+ * piece goes to the right locations. Thanks to the change in
+ * size, it may or may not still wrap around.
+ */
+ split = align_u32(vector->tail, vector->size);
+ assert(vector->tail <= split && split < vector->head);
+ memcpy(data + dst_tail, vector->data + src_tail,
+ split - vector->tail);
+ memcpy(data + (split & (size - 1)), vector->data,
+ vector->head - split);
}
free(vector->data);
vector->data = data;