diff options
author | Jason Ekstrand <[email protected]> | 2019-02-25 13:59:07 -0600 |
---|---|---|
committer | Jason Ekstrand <[email protected]> | 2019-04-19 19:56:42 +0000 |
commit | 146deec9ef5f73794daba4ad7cd95016fd07266a (patch) | |
tree | 21c9e6933bcdc5accc3eb0d8c7169dbfb3046e81 /src | |
parent | a7d48718466978bd5a6ae73d9cf1c2ea566d772c (diff) |
anv/pipeline: Add skeleton support for spilling to bindless
If the number of surfaces or samplers exceeds what we can put in a
table, we will want to spill out to bindless. There is no bindless
support yet but this gets us the basic framework that will be used by
later commits.
Reviewed-by: Lionel Landwerlin <[email protected]>
Reviewed-by: Caio Marcelo de Oliveira Filho <[email protected]>
Diffstat (limited to 'src')
-rw-r--r-- | src/intel/vulkan/anv_descriptor_set.c | 43 | ||||
-rw-r--r-- | src/intel/vulkan/anv_device.c | 6 | ||||
-rw-r--r-- | src/intel/vulkan/anv_nir_apply_pipeline_layout.c | 91 | ||||
-rw-r--r-- | src/intel/vulkan/anv_private.h | 9 |
4 files changed, 122 insertions, 27 deletions
diff --git a/src/intel/vulkan/anv_descriptor_set.c b/src/intel/vulkan/anv_descriptor_set.c index 004d3f2f77f..90a02997a8d 100644 --- a/src/intel/vulkan/anv_descriptor_set.c +++ b/src/intel/vulkan/anv_descriptor_set.c @@ -125,22 +125,60 @@ anv_descriptor_type_size(const struct anv_physical_device *pdevice, return anv_descriptor_data_size(anv_descriptor_data_for_type(pdevice, type)); } +static bool +anv_descriptor_data_supports_bindless(const struct anv_physical_device *pdevice, + enum anv_descriptor_data data, + bool sampler) +{ + return false; +} + +bool +anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice, + const struct anv_descriptor_set_binding_layout *binding, + bool sampler) +{ + return anv_descriptor_data_supports_bindless(pdevice, binding->data, + sampler); +} + +bool +anv_descriptor_requires_bindless(const struct anv_physical_device *pdevice, + const struct anv_descriptor_set_binding_layout *binding, + bool sampler) +{ + if (pdevice->always_use_bindless) + return anv_descriptor_supports_bindless(pdevice, binding, sampler); + + return false; +} + void anv_GetDescriptorSetLayoutSupport( - VkDevice device, + VkDevice _device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport) { + ANV_FROM_HANDLE(anv_device, device, _device); + const struct anv_physical_device *pdevice = + &device->instance->physicalDevice; + uint32_t surface_count[MESA_SHADER_STAGES] = { 0, }; for (uint32_t b = 0; b < pCreateInfo->bindingCount; b++) { const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[b]; + enum anv_descriptor_data desc_data = + anv_descriptor_data_for_type(pdevice, binding->descriptorType); + switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: /* There is no real limit on samplers */ break; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: + if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false)) + break; + if (binding->pImmutableSamplers) { for (uint32_t i = 0; i < binding->descriptorCount; i++) { ANV_FROM_HANDLE(anv_sampler, sampler, @@ -155,6 +193,9 @@ void anv_GetDescriptorSetLayoutSupport( break; default: + if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false)) + break; + anv_foreach_stage(s, binding->stageFlags) surface_count[s] += binding->descriptorCount; break; diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c index 71de8865986..2f1260dae2f 100644 --- a/src/intel/vulkan/anv_device.c +++ b/src/intel/vulkan/anv_device.c @@ -276,6 +276,8 @@ anv_physical_device_init_uuids(struct anv_physical_device *device) _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len); _mesa_sha1_update(&sha1_ctx, &device->chipset_id, sizeof(device->chipset_id)); + _mesa_sha1_update(&sha1_ctx, &device->always_use_bindless, + sizeof(device->always_use_bindless)); _mesa_sha1_final(&sha1_ctx, sha1); memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE); @@ -451,6 +453,10 @@ anv_physical_device_init(struct anv_physical_device *device, device->has_context_isolation = anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION); + device->always_use_bindless = + env_var_as_boolean("ANV_ALWAYS_BINDLESS", false); + + /* Starting with Gen10, the timestamp frequency of the command streamer may * vary from one part to another. We can query the value from the kernel. */ diff --git a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c index 74f70806586..ea02ed1be78 100644 --- a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c @@ -26,6 +26,12 @@ #include "nir/nir_builder.h" #include "compiler/brw_nir.h" +/* Sampler tables don't actually have a maximum size but we pick one just so + * that we don't end up emitting too much state on-the-fly. + */ +#define MAX_SAMPLER_TABLE_SIZE 128 +#define BINDLESS_OFFSET 255 + struct apply_pipeline_layout_state { const struct anv_physical_device *pdevice; @@ -600,11 +606,21 @@ anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice, &layout->set[set].layout->binding[b]; /* Do a fixed-point calculation to generate a score based on the - * number of uses and the binding array size. + * number of uses and the binding array size. We shift by 7 instead + * of 8 because we're going to use the top bit below to make + * everything which does not support bindless super higher priority + * than things which do. */ uint16_t score = ((uint16_t)state.set[set].use_count[b] << 7) / binding->array_size; + /* If the descriptor type doesn't support bindless then put it at the + * beginning so we guarantee it gets a slot. + */ + if (!anv_descriptor_supports_bindless(pdevice, binding, true) || + !anv_descriptor_supports_bindless(pdevice, binding, false)) + score |= 1 << 15; + infos[used_binding_count++] = (struct binding_info) { .set = set, .binding = b, @@ -624,37 +640,57 @@ anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice, struct anv_descriptor_set_binding_layout *binding = &layout->set[set].layout->binding[b]; + const uint32_t array_size = binding->array_size; + if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) { - state.set[set].surface_offsets[b] = map->surface_count; - struct anv_sampler **samplers = binding->immutable_samplers; - for (unsigned i = 0; i < binding->array_size; i++) { - uint8_t planes = samplers ? samplers[i]->n_planes : 1; - for (uint8_t p = 0; p < planes; p++) { - map->surface_to_descriptor[map->surface_count++] = - (struct anv_pipeline_binding) { - .set = set, - .binding = b, - .index = i, - .plane = p, - }; + if (map->surface_count + array_size > MAX_BINDING_TABLE_SIZE || + anv_descriptor_requires_bindless(pdevice, binding, false)) { + /* If this descriptor doesn't fit in the binding table or if it + * requires bindless for some reason, flag it as bindless. + */ + assert(anv_descriptor_supports_bindless(pdevice, binding, false)); + state.set[set].surface_offsets[b] = BINDLESS_OFFSET; + } else { + state.set[set].surface_offsets[b] = map->surface_count; + struct anv_sampler **samplers = binding->immutable_samplers; + for (unsigned i = 0; i < binding->array_size; i++) { + uint8_t planes = samplers ? samplers[i]->n_planes : 1; + for (uint8_t p = 0; p < planes; p++) { + map->surface_to_descriptor[map->surface_count++] = + (struct anv_pipeline_binding) { + .set = set, + .binding = b, + .index = i, + .plane = p, + }; + } } } + assert(map->surface_count <= MAX_BINDING_TABLE_SIZE); } - assert(map->surface_count <= MAX_BINDING_TABLE_SIZE); if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) { - state.set[set].sampler_offsets[b] = map->sampler_count; - struct anv_sampler **samplers = binding->immutable_samplers; - for (unsigned i = 0; i < binding->array_size; i++) { - uint8_t planes = samplers ? samplers[i]->n_planes : 1; - for (uint8_t p = 0; p < planes; p++) { - map->sampler_to_descriptor[map->sampler_count++] = - (struct anv_pipeline_binding) { - .set = set, - .binding = b, - .index = i, - .plane = p, - }; + if (map->sampler_count + array_size > MAX_SAMPLER_TABLE_SIZE || + anv_descriptor_requires_bindless(pdevice, binding, true)) { + /* If this descriptor doesn't fit in the binding table or if it + * requires bindless for some reason, flag it as bindless. + */ + assert(anv_descriptor_supports_bindless(pdevice, binding, true)); + state.set[set].sampler_offsets[b] = BINDLESS_OFFSET; + } else { + state.set[set].sampler_offsets[b] = map->sampler_count; + struct anv_sampler **samplers = binding->immutable_samplers; + for (unsigned i = 0; i < binding->array_size; i++) { + uint8_t planes = samplers ? samplers[i]->n_planes : 1; + for (uint8_t p = 0; p < planes; p++) { + map->sampler_to_descriptor[map->sampler_count++] = + (struct anv_pipeline_binding) { + .set = set, + .binding = b, + .index = i, + .plane = p, + }; + } } } } @@ -676,6 +712,9 @@ anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice, if (state.set[set].use_count[binding] == 0) continue; + if (state.set[set].surface_offsets[binding] >= MAX_BINDING_TABLE_SIZE) + continue; + struct anv_pipeline_binding *pipe_binding = &map->surface_to_descriptor[state.set[set].surface_offsets[binding]]; for (unsigned i = 0; i < array_size; i++) { diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 5ae71b4e8c4..4f22a405d0d 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -949,6 +949,7 @@ struct anv_physical_device { bool has_context_priority; bool use_softpin; bool has_context_isolation; + bool always_use_bindless; struct anv_device_extension_table supported_extensions; @@ -1565,6 +1566,14 @@ unsigned anv_descriptor_size(const struct anv_descriptor_set_binding_layout *lay unsigned anv_descriptor_type_size(const struct anv_physical_device *pdevice, VkDescriptorType type); +bool anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice, + const struct anv_descriptor_set_binding_layout *binding, + bool sampler); + +bool anv_descriptor_requires_bindless(const struct anv_physical_device *pdevice, + const struct anv_descriptor_set_binding_layout *binding, + bool sampler); + struct anv_descriptor_set_layout { /* Descriptor set layouts can be destroyed at almost any time */ uint32_t ref_cnt; |