aboutsummaryrefslogtreecommitdiffstats
path: root/src/amd/vulkan/radv_meta_resolve_cs.c
diff options
context:
space:
mode:
authorBas Nieuwenhuizen <[email protected]>2018-08-14 00:07:57 +0200
committerBas Nieuwenhuizen <[email protected]>2018-08-14 10:26:24 +0200
commitfbcd1673144facf0f4037330ba3d6b176dad955b (patch)
treea7ad865c25efd51f3b261d096ec27103b0d6d461 /src/amd/vulkan/radv_meta_resolve_cs.c
parent24a9033d6f7eb88a760d382ace64bffa65d14cdc (diff)
radv: Add on-demand compilation of built-in shaders.
In environments where we cannot cache, e.g. Android (no homedir), ChromeOS (readonly rootfs) or sandboxes (cannot open cache), the startup cost of creating a device in radv is rather high, due to compiling all possible built-in pipelines up front. This meant depending on the CPU a 1-4 sec cost of creating a Device. For CTS this cost is unacceptable, and likely for starting random apps too. So if there is no cache, with this patch radv will compile shaders on demand. Once there is a cache from the first run, even if incomplete, the driver knows that it can likely write the cache and precompiles everything. Note that I did not switch the buffer and itob/btoi compute pipelines to on-demand, since you cannot really do anything in Vulkan without them and there are only a few. This reduces the CTS runtime for the no caches scenario on my threadripper from 32 minutes to 8 minutes. Reviewed-by: Dave Airlie <[email protected]>
Diffstat (limited to 'src/amd/vulkan/radv_meta_resolve_cs.c')
-rw-r--r--src/amd/vulkan/radv_meta_resolve_cs.c34
1 files changed, 28 insertions, 6 deletions
diff --git a/src/amd/vulkan/radv_meta_resolve_cs.c b/src/amd/vulkan/radv_meta_resolve_cs.c
index 2d79cb09fec..fca49a01bb0 100644
--- a/src/amd/vulkan/radv_meta_resolve_cs.c
+++ b/src/amd/vulkan/radv_meta_resolve_cs.c
@@ -212,6 +212,12 @@ create_resolve_pipeline(struct radv_device *device,
VkResult result;
struct radv_shader_module cs = { .nir = NULL };
+ mtx_lock(&device->meta_state.mtx);
+ if (*pipeline) {
+ mtx_unlock(&device->meta_state.mtx);
+ return VK_SUCCESS;
+ }
+
cs.nir = build_resolve_compute_shader(device, is_integer, is_srgb, samples);
/* compute shader */
@@ -239,14 +245,16 @@ create_resolve_pipeline(struct radv_device *device,
goto fail;
ralloc_free(cs.nir);
+ mtx_unlock(&device->meta_state.mtx);
return VK_SUCCESS;
fail:
ralloc_free(cs.nir);
+ mtx_unlock(&device->meta_state.mtx);
return result;
}
VkResult
-radv_device_init_meta_resolve_compute_state(struct radv_device *device)
+radv_device_init_meta_resolve_compute_state(struct radv_device *device, bool on_demand)
{
struct radv_meta_state *state = &device->meta_state;
VkResult res;
@@ -255,6 +263,9 @@ radv_device_init_meta_resolve_compute_state(struct radv_device *device)
if (res != VK_SUCCESS)
goto fail;
+ if (on_demand)
+ return VK_SUCCESS;
+
for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; ++i) {
uint32_t samples = 1 << i;
@@ -353,16 +364,27 @@ emit_resolve(struct radv_cmd_buffer *cmd_buffer,
}
});
- VkPipeline pipeline;
+ VkPipeline *pipeline;
if (vk_format_is_int(src_iview->image->vk_format))
- pipeline = device->meta_state.resolve_compute.rc[samples_log2].i_pipeline;
+ pipeline = &device->meta_state.resolve_compute.rc[samples_log2].i_pipeline;
else if (vk_format_is_srgb(src_iview->image->vk_format))
- pipeline = device->meta_state.resolve_compute.rc[samples_log2].srgb_pipeline;
+ pipeline = &device->meta_state.resolve_compute.rc[samples_log2].srgb_pipeline;
else
- pipeline = device->meta_state.resolve_compute.rc[samples_log2].pipeline;
+ pipeline = &device->meta_state.resolve_compute.rc[samples_log2].pipeline;
+
+ if (!*pipeline) {
+ VkResult ret = create_resolve_pipeline(device, samples,
+ vk_format_is_int(src_iview->image->vk_format),
+ vk_format_is_srgb(src_iview->image->vk_format),
+ pipeline);
+ if (ret != VK_SUCCESS) {
+ cmd_buffer->record_result = ret;
+ return;
+ }
+ }
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
- VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
+ VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
unsigned push_constants[4] = {
src_offset->x,