summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorTimothy Arceri <[email protected]>2017-07-31 13:00:35 +1000
committerTimothy Arceri <[email protected]>2017-08-25 13:20:29 +1000
commitb86ecea3446e3eb461f6fbdf245a640a8a5d4739 (patch)
treea625359a02eb32eccb0b3f93ddfb66c3f1e79735 /src
parentea2515d780a40599036ff50b572d309e69635d20 (diff)
util/disk_cache: write cache item metadata to disk
Reviewed-by: Nicolai Hähnle <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/util/disk_cache.c88
1 files changed, 87 insertions, 1 deletions
diff --git a/src/util/disk_cache.c b/src/util/disk_cache.c
index 6e85a5e2bea..36c1e8e72c6 100644
--- a/src/util/disk_cache.c
+++ b/src/util/disk_cache.c
@@ -112,6 +112,8 @@ struct disk_cache_put_job {
/* Size of data to be compressed and written. */
size_t size;
+
+ struct cache_item_metadata cache_item_metadata;
};
/* Create a directory named 'path' if it does not already exist.
@@ -780,15 +782,45 @@ create_put_job(struct disk_cache *cache, const cache_key key,
dc_job->data = dc_job + 1;
memcpy(dc_job->data, data, size);
dc_job->size = size;
+
+ /* Copy the cache item metadata */
+ if (cache_item_metadata) {
+ dc_job->cache_item_metadata.type = cache_item_metadata->type;
+ if (cache_item_metadata->type == CACHE_ITEM_TYPE_GLSL) {
+ dc_job->cache_item_metadata.num_keys =
+ cache_item_metadata->num_keys;
+ dc_job->cache_item_metadata.keys = (cache_key *)
+ malloc(cache_item_metadata->num_keys * sizeof(cache_key));
+
+ if (!dc_job->cache_item_metadata.keys)
+ goto fail;
+
+ memcpy(dc_job->cache_item_metadata.keys,
+ cache_item_metadata->keys,
+ sizeof(cache_key) * cache_item_metadata->num_keys);
+ }
+ } else {
+ dc_job->cache_item_metadata.type = CACHE_ITEM_TYPE_UNKNOWN;
+ dc_job->cache_item_metadata.keys = NULL;
+ }
}
return dc_job;
+
+fail:
+ free(dc_job->cache_item_metadata.keys);
+ free(dc_job);
+
+ return NULL;
}
static void
destroy_put_job(void *job, int thread_index)
{
if (job) {
+ struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
+ free(dc_job->cache_item_metadata.keys);
+
free(job);
}
}
@@ -877,6 +909,34 @@ cache_put(void *job, int thread_index)
goto done;
}
+ /* Write the cache item metadata. This data can be used to deal with
+ * hash collisions, as well as providing useful information to 3rd party
+ * tools reading the cache files.
+ */
+ ret = write_all(fd, &dc_job->cache_item_metadata.type,
+ sizeof(uint32_t));
+ if (ret == -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+
+ if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) {
+ ret = write_all(fd, &dc_job->cache_item_metadata.num_keys,
+ sizeof(uint32_t));
+ if (ret == -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+
+ ret = write_all(fd, dc_job->cache_item_metadata.keys[0],
+ dc_job->cache_item_metadata.num_keys *
+ sizeof(cache_key));
+ if (ret == -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+ }
+
/* Create CRC of the data. We will read this when restoring the cache and
* use it to check for corruption.
*/
@@ -1028,6 +1088,31 @@ disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size)
if (memcmp(cache->driver_keys_blob, file_header, ck_size) != 0)
goto fail;
+ size_t cache_item_md_size = sizeof(uint32_t);
+ uint32_t md_type;
+ ret = read_all(fd, &md_type, cache_item_md_size);
+ if (ret == -1)
+ goto fail;
+
+ if (md_type == CACHE_ITEM_TYPE_GLSL) {
+ uint32_t num_keys;
+ cache_item_md_size += sizeof(uint32_t);
+ ret = read_all(fd, &num_keys, sizeof(uint32_t));
+ if (ret == -1)
+ goto fail;
+
+ /* The cache item metadata is currently just used for distributing
+ * precompiled shaders, they are not used by Mesa so just skip them for
+ * now.
+ * TODO: pass the metadata back to the caller and do some basic
+ * validation.
+ */
+ cache_item_md_size += sizeof(cache_key);
+ ret = lseek(fd, num_keys * sizeof(cache_key), SEEK_CUR);
+ if (ret == -1)
+ goto fail;
+ }
+
/* Load the CRC that was created when the file was written. */
struct cache_entry_file_data cf_data;
size_t cf_data_size = sizeof(cf_data);
@@ -1036,7 +1121,8 @@ disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size)
goto fail;
/* Load the actual cache data. */
- size_t cache_data_size = sb.st_size - cf_data_size - ck_size;
+ size_t cache_data_size =
+ sb.st_size - cf_data_size - ck_size - cache_item_md_size;
ret = read_all(fd, data, cache_data_size);
if (ret == -1)
goto fail;