diff options
author | Bas Nieuwenhuizen <[email protected]> | 2018-08-14 00:07:57 +0200 |
---|---|---|
committer | Bas Nieuwenhuizen <[email protected]> | 2018-08-14 10:26:24 +0200 |
commit | fbcd1673144facf0f4037330ba3d6b176dad955b (patch) | |
tree | a7ad865c25efd51f3b261d096ec27103b0d6d461 /src/amd/vulkan/radv_meta_decompress.c | |
parent | 24a9033d6f7eb88a760d382ace64bffa65d14cdc (diff) |
radv: Add on-demand compilation of built-in shaders.
In environments where we cannot cache, e.g. Android (no homedir),
ChromeOS (readonly rootfs) or sandboxes (cannot open cache), the
startup cost of creating a device in radv is rather high, due
to compiling all possible built-in pipelines up front. This meant
depending on the CPU a 1-4 sec cost of creating a Device.
For CTS this cost is unacceptable, and likely for starting random
apps too.
So if there is no cache, with this patch radv will compile shaders
on demand. Once there is a cache from the first run, even if
incomplete, the driver knows that it can likely write the cache
and precompiles everything.
Note that I did not switch the buffer and itob/btoi compute pipelines
to on-demand, since you cannot really do anything in Vulkan without
them and there are only a few.
This reduces the CTS runtime for the no caches scenario on my
threadripper from 32 minutes to 8 minutes.
Reviewed-by: Dave Airlie <[email protected]>
Diffstat (limited to 'src/amd/vulkan/radv_meta_decompress.c')
-rw-r--r-- | src/amd/vulkan/radv_meta_decompress.c | 32 |
1 files changed, 31 insertions, 1 deletions
diff --git a/src/amd/vulkan/radv_meta_decompress.c b/src/amd/vulkan/radv_meta_decompress.c index 1a8058c7cc5..41ed7b6d043 100644 --- a/src/amd/vulkan/radv_meta_decompress.c +++ b/src/amd/vulkan/radv_meta_decompress.c @@ -103,6 +103,18 @@ create_pipeline(struct radv_device *device, { VkResult result; VkDevice device_h = radv_device_to_handle(device); + struct radv_shader_module vs_module = {0}; + + mtx_lock(&device->meta_state.mtx); + if (*decompress_pipeline) { + mtx_unlock(&device->meta_state.mtx); + return VK_SUCCESS; + } + + if (!vs_module_h) { + vs_module.nir = radv_meta_build_nir_vs_generate_vertices(); + vs_module_h = radv_shader_module_to_handle(&vs_module); + } struct radv_shader_module fs_module = { .nir = radv_meta_build_nir_fs_noop(), @@ -219,6 +231,9 @@ create_pipeline(struct radv_device *device, cleanup: ralloc_free(fs_module.nir); + if (vs_module.nir) + ralloc_free(vs_module.nir); + mtx_unlock(&device->meta_state.mtx); return result; } @@ -244,7 +259,7 @@ radv_device_finish_meta_depth_decomp_state(struct radv_device *device) } VkResult -radv_device_init_meta_depth_decomp_state(struct radv_device *device) +radv_device_init_meta_depth_decomp_state(struct radv_device *device, bool on_demand) { struct radv_meta_state *state = &device->meta_state; VkResult res = VK_SUCCESS; @@ -270,6 +285,9 @@ radv_device_init_meta_depth_decomp_state(struct radv_device *device) if (res != VK_SUCCESS) goto fail; + if (on_demand) + continue; + res = create_pipeline(device, vs_module_h, samples, state->depth_decomp[i].pass, state->depth_decomp[i].p_layout, @@ -343,6 +361,18 @@ static void radv_process_depth_image_inplace(struct radv_cmd_buffer *cmd_buffer, if (!radv_image_has_htile(image)) return; + if (!meta_state->depth_decomp[samples_log2].decompress_pipeline) { + VkResult ret = create_pipeline(cmd_buffer->device, NULL, samples, + meta_state->depth_decomp[samples_log2].pass, + meta_state->depth_decomp[samples_log2].p_layout, + &meta_state->depth_decomp[samples_log2].decompress_pipeline, + &meta_state->depth_decomp[samples_log2].resummarize_pipeline); + if (ret != VK_SUCCESS) { + cmd_buffer->record_result = ret; + return; + } + } + radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_GRAPHICS_PIPELINE | RADV_META_SAVE_PASS); |