diff options
author | Jason Ekstrand <[email protected]> | 2019-01-09 16:04:22 -0600 |
---|---|---|
committer | Jason Ekstrand <[email protected]> | 2019-04-19 19:56:42 +0000 |
commit | 79fb0d27f3ab41fec88acbe24bc3163a42c0715c (patch) | |
tree | 053d3bba13cc24b8711c43470b25546b9508eec1 /src/intel/vulkan/anv_pipeline.c | |
parent | 3cf78ec2bdc22833082d026d59ccb28d79b07f6f (diff) |
anv: Implement SSBOs bindings with GPU addresses in the descriptor BO
This commit adds a new way for ANV to do SSBO bindings by just passing a
GPU address in through the descriptor buffer and using the A64 messages
to access the GPU address directly. This means that our variable
pointers are now "real" pointers instead of a vec2(BTI, offset) pair.
This carries a few of advantages:
1. It lets us support a virtually unbounded number of SSBO bindings.
2. It lets us implement VK_KHR_shader_atomic_int64 which we couldn't
implement before because those atomic messages are only available
in the bindless A64 form.
3. It's way better than messing around with bindless handles for SSBOs
which is the only other option for VK_EXT_descriptor_indexing.
4. It's more future looking, maybe? At the least, this is what NVIDIA
does (they don't have binding based SSBOs at all). This doesn't a
priori mean it's better, it just means it's probably not terrible.
The big disadvantage, of course, is that we have to start doing our own
bounds checking for robustBufferAccess again have to push in dynamic
offsets.
Reviewed-by: Lionel Landwerlin <[email protected]>
Reviewed-by: Caio Marcelo de Oliveira Filho <[email protected]>
Diffstat (limited to 'src/intel/vulkan/anv_pipeline.c')
-rw-r--r-- | src/intel/vulkan/anv_pipeline.c | 32 |
1 files changed, 26 insertions, 6 deletions
diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c index 2dd60f2dd2c..b0ed2187376 100644 --- a/src/intel/vulkan/anv_pipeline.c +++ b/src/intel/vulkan/anv_pipeline.c @@ -166,12 +166,20 @@ anv_shader_compile_to_nir(struct anv_device *device, .variable_pointers = true, }, .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2), - .ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2), .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1), .push_const_ptr_type = glsl_uint_type(), .shared_ptr_type = glsl_uint_type(), }; + if (pdevice->has_a64_buffer_access) { + if (device->robust_buffer_access) + spirv_options.ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 4); + else + spirv_options.ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1); + } else { + spirv_options.ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2); + } + nir_function *entry_point = spirv_to_nir(spirv, module->size / 4, spec_entries, num_spec_entries, @@ -553,8 +561,9 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline, struct anv_pipeline_stage *stage, struct anv_pipeline_layout *layout) { - const struct brw_compiler *compiler = - pipeline->device->instance->physicalDevice.compiler; + const struct anv_physical_device *pdevice = + &pipeline->device->instance->physicalDevice; + const struct brw_compiler *compiler = pdevice->compiler; struct brw_stage_prog_data *prog_data = &stage->prog_data.base; nir_shader *nir = stage->nir; @@ -607,15 +616,26 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline, /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */ if (layout) { - anv_nir_apply_pipeline_layout(&pipeline->device->instance->physicalDevice, + anv_nir_apply_pipeline_layout(pdevice, pipeline->device->robust_buffer_access, layout, nir, prog_data, &stage->bind_map); - NIR_PASS_V(nir, nir_lower_explicit_io, - nir_var_mem_ubo | nir_var_mem_ssbo, + NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo, nir_address_format_32bit_index_offset); + nir_address_format ssbo_address_format; + if (pdevice->has_a64_buffer_access) { + if (pipeline->device->robust_buffer_access) + ssbo_address_format = nir_address_format_64bit_bounded_global; + else + ssbo_address_format = nir_address_format_64bit_global; + } else { + ssbo_address_format = nir_address_format_32bit_index_offset; + } + NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo, + ssbo_address_format); + NIR_PASS_V(nir, nir_opt_constant_folding); } |