From a5f19f64c3317c98b984010f144416ce768a3c0b Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 2 Dec 2015 16:08:13 -0800 Subject: vk/0.210.0: Remove the VkShaderStage enum This made for an unfortunately large amount of work since we were using it fairly heavily internally. However, gl_shader_stage does basically the same things, so it's not too bad. --- include/vulkan/vulkan.h | 15 +------ src/vulkan/anv_cmd_buffer.c | 27 ++++++------ src/vulkan/anv_descriptor_set.c | 10 ++--- src/vulkan/anv_meta.c | 4 +- src/vulkan/anv_meta_clear.c | 4 +- src/vulkan/anv_nir.h | 7 --- src/vulkan/anv_nir_apply_dynamic_offsets.c | 4 +- src/vulkan/anv_nir_apply_pipeline_layout.c | 22 +++++----- src/vulkan/anv_pipeline.c | 70 ++++++++++++------------------ src/vulkan/anv_private.h | 35 ++++++++++----- src/vulkan/gen7_cmd_buffer.c | 54 +++++++++++------------ src/vulkan/gen7_pipeline.c | 6 +-- src/vulkan/gen8_cmd_buffer.c | 23 +++++----- src/vulkan/gen8_pipeline.c | 10 ++--- 14 files changed, 133 insertions(+), 158 deletions(-) diff --git a/include/vulkan/vulkan.h b/include/vulkan/vulkan.h index f7bde4cc7c7..7affe394992 100644 --- a/include/vulkan/vulkan.h +++ b/include/vulkan/vulkan.h @@ -477,19 +477,6 @@ typedef enum VkComponentSwizzle { VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF } VkComponentSwizzle; -typedef enum { - VK_SHADER_STAGE_VERTEX = 0, - VK_SHADER_STAGE_TESS_CONTROL = 1, - VK_SHADER_STAGE_TESS_EVALUATION = 2, - VK_SHADER_STAGE_GEOMETRY = 3, - VK_SHADER_STAGE_FRAGMENT = 4, - VK_SHADER_STAGE_COMPUTE = 5, - VK_SHADER_STAGE_BEGIN_RANGE = VK_SHADER_STAGE_VERTEX, - VK_SHADER_STAGE_END_RANGE = VK_SHADER_STAGE_COMPUTE, - VK_SHADER_STAGE_NUM = (VK_SHADER_STAGE_COMPUTE - VK_SHADER_STAGE_VERTEX + 1), - VK_SHADER_STAGE_MAX_ENUM = 0x7FFFFFFF -} VkShaderStage; - typedef enum VkVertexInputRate { VK_VERTEX_INPUT_RATE_VERTEX = 0, VK_VERTEX_INPUT_RATE_INSTANCE = 1, @@ -1604,7 +1591,7 @@ typedef struct VkPipelineShaderStageCreateInfo { VkStructureType sType; const void* pNext; VkPipelineShaderStageCreateFlags flags; - VkShaderStage stage; + VkShaderStageFlagBits stage; VkShaderModule module; const char* pName; const VkSpecializationInfo* pSpecializationInfo; diff --git a/src/vulkan/anv_cmd_buffer.c b/src/vulkan/anv_cmd_buffer.c index 125b078413b..19d4be90274 100644 --- a/src/vulkan/anv_cmd_buffer.c +++ b/src/vulkan/anv_cmd_buffer.c @@ -129,7 +129,7 @@ anv_cmd_state_init(struct anv_cmd_state *state) static VkResult anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer, - VkShaderStage stage, uint32_t size) + gl_shader_stage stage, uint32_t size) { struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage]; @@ -520,8 +520,7 @@ void anv_CmdBindDescriptorSets( } if (set_layout->dynamic_offset_count > 0) { - VkShaderStage s; - for_each_bit(s, set_layout->shader_stages) { + anv_foreach_stage(s, set_layout->shader_stages) { anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic); struct anv_push_constants *push = @@ -585,7 +584,8 @@ add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer, static void fill_descriptor_buffer_surface_state(struct anv_device *device, void *state, - VkShaderStage stage, VkDescriptorType type, + gl_shader_stage stage, + VkDescriptorType type, uint32_t offset, uint32_t range) { VkFormat format; @@ -594,8 +594,7 @@ fill_descriptor_buffer_surface_state(struct anv_device *device, void *state, switch (type) { case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: - if (anv_is_scalar_shader_stage(device->instance->physicalDevice.compiler, - stage)) { + if (device->instance->physicalDevice.compiler->scalar_stage[stage]) { stride = 4; } else { stride = 16; @@ -620,19 +619,20 @@ fill_descriptor_buffer_surface_state(struct anv_device *device, void *state, VkResult anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer, - VkShaderStage stage, struct anv_state *bt_state) + gl_shader_stage stage, + struct anv_state *bt_state) { struct anv_framebuffer *fb = cmd_buffer->state.framebuffer; struct anv_subpass *subpass = cmd_buffer->state.subpass; struct anv_pipeline_layout *layout; uint32_t color_count, bias, state_offset; - if (stage == VK_SHADER_STAGE_COMPUTE) + if (stage == MESA_SHADER_COMPUTE) layout = cmd_buffer->state.compute_pipeline->layout; else layout = cmd_buffer->state.pipeline->layout; - if (stage == VK_SHADER_STAGE_FRAGMENT) { + if (stage == MESA_SHADER_FRAGMENT) { bias = MAX_RTS; color_count = subpass->color_count; } else { @@ -729,12 +729,12 @@ anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer, VkResult anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer, - VkShaderStage stage, struct anv_state *state) + gl_shader_stage stage, struct anv_state *state) { struct anv_pipeline_layout *layout; uint32_t sampler_count; - if (stage == VK_SHADER_STAGE_COMPUTE) + if (stage == MESA_SHADER_COMPUTE) layout = cmd_buffer->state.compute_pipeline->layout; else layout = cmd_buffer->state.pipeline->layout; @@ -858,7 +858,7 @@ void anv_CmdWaitEvents( struct anv_state anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer, - VkShaderStage stage) + gl_shader_stage stage) { struct anv_push_constants *data = cmd_buffer->state.push_constants[stage]; @@ -893,9 +893,8 @@ void anv_CmdPushConstants( const void* pValues) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); - VkShaderStage stage; - for_each_bit(stage, stageFlags) { + anv_foreach_stage(stage, stageFlags) { anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data); memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset, diff --git a/src/vulkan/anv_descriptor_set.c b/src/vulkan/anv_descriptor_set.c index 081d1e85ae1..e1cfd788b73 100644 --- a/src/vulkan/anv_descriptor_set.c +++ b/src/vulkan/anv_descriptor_set.c @@ -77,8 +77,8 @@ VkResult anv_CreateDescriptorSetLayout( /* Initialize all samplers to 0 */ memset(samplers, 0, immutable_sampler_count * sizeof(*samplers)); - uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, }; - uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, }; + uint32_t sampler_count[MESA_SHADER_STAGES] = { 0, }; + uint32_t surface_count[MESA_SHADER_STAGES] = { 0, }; uint32_t dynamic_offset_count = 0; for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) { @@ -196,7 +196,7 @@ VkResult anv_CreatePipelineLayout( dynamic_offset_count += set_layout->binding[b].array_size; } - for (VkShaderStage s = 0; s < VK_SHADER_STAGE_NUM; s++) { + for (gl_shader_stage s = 0; s < MESA_SHADER_STAGES; s++) { l.set[set].stage[s].surface_start = l.stage[s].surface_count; l.set[set].stage[s].sampler_start = l.stage[s].sampler_count; @@ -217,7 +217,7 @@ VkResult anv_CreatePipelineLayout( } unsigned num_bindings = 0; - for (VkShaderStage s = 0; s < VK_SHADER_STAGE_NUM; s++) + for (gl_shader_stage s = 0; s < MESA_SHADER_STAGES; s++) num_bindings += l.stage[s].surface_count + l.stage[s].sampler_count; size_t size = sizeof(*layout) + num_bindings * sizeof(layout->entries[0]); @@ -229,7 +229,7 @@ VkResult anv_CreatePipelineLayout( /* Now we can actually build our surface and sampler maps */ struct anv_pipeline_binding *entry = layout->entries; - for (VkShaderStage s = 0; s < VK_SHADER_STAGE_NUM; s++) { + for (gl_shader_stage s = 0; s < MESA_SHADER_STAGES; s++) { l.stage[s].surface_to_descriptor = entry; entry += l.stage[s].surface_count; l.stage[s].sampler_to_descriptor = entry; diff --git a/src/vulkan/anv_meta.c b/src/vulkan/anv_meta.c index 84f828eb4cf..67651c542b7 100644 --- a/src/vulkan/anv_meta.c +++ b/src/vulkan/anv_meta.c @@ -304,13 +304,13 @@ anv_device_init_meta_blit_state(struct anv_device *device) VkPipelineShaderStageCreateInfo pipeline_shader_stages[] = { { .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, - .stage = VK_SHADER_STAGE_VERTEX, + .stage = VK_SHADER_STAGE_VERTEX_BIT, .module = anv_shader_module_to_handle(&vs), .pName = "main", .pSpecializationInfo = NULL }, { .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, - .stage = VK_SHADER_STAGE_FRAGMENT, + .stage = VK_SHADER_STAGE_FRAGMENT_BIT, .module = VK_NULL_HANDLE, /* TEMPLATE VALUE! FILL ME IN! */ .pName = "main", .pSpecializationInfo = NULL diff --git a/src/vulkan/anv_meta_clear.c b/src/vulkan/anv_meta_clear.c index cac34b3d7d3..a2667c7bb6e 100644 --- a/src/vulkan/anv_meta_clear.c +++ b/src/vulkan/anv_meta_clear.c @@ -134,13 +134,13 @@ create_pipeline(struct anv_device *device, .pStages = (VkPipelineShaderStageCreateInfo[]) { { .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, - .stage = VK_SHADER_STAGE_VERTEX, + .stage = VK_SHADER_STAGE_VERTEX_BIT, .module = anv_shader_module_to_handle(&vs_m), .pName = "main", }, { .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, - .stage = VK_SHADER_STAGE_FRAGMENT, + .stage = VK_SHADER_STAGE_FRAGMENT_BIT, .module = anv_shader_module_to_handle(&fs_m), .pName = "main", }, diff --git a/src/vulkan/anv_nir.h b/src/vulkan/anv_nir.h index b164ae581e1..666b127451a 100644 --- a/src/vulkan/anv_nir.h +++ b/src/vulkan/anv_nir.h @@ -30,13 +30,6 @@ extern "C" { #endif -static inline VkShaderStage -anv_vk_shader_stage_for_mesa_stage(gl_shader_stage stage) -{ - /* The two enums happen to line up. */ - return (VkShaderStage)(int)stage; -} - void anv_nir_lower_push_constants(nir_shader *shader, bool is_scalar); void anv_nir_apply_dynamic_offsets(struct anv_pipeline *pipeline, diff --git a/src/vulkan/anv_nir_apply_dynamic_offsets.c b/src/vulkan/anv_nir_apply_dynamic_offsets.c index dd4f5dfe545..c500ab3e03c 100644 --- a/src/vulkan/anv_nir_apply_dynamic_offsets.c +++ b/src/vulkan/anv_nir_apply_dynamic_offsets.c @@ -28,7 +28,6 @@ struct apply_dynamic_offsets_state { nir_shader *shader; nir_builder builder; - VkShaderStage stage; struct anv_pipeline_layout *layout; uint32_t indices_start; @@ -216,12 +215,11 @@ anv_nir_apply_dynamic_offsets(struct anv_pipeline *pipeline, { struct apply_dynamic_offsets_state state = { .shader = shader, - .stage = anv_vk_shader_stage_for_mesa_stage(shader->stage), .layout = pipeline->layout, .indices_start = shader->num_uniforms, }; - if (!state.layout || !state.layout->stage[state.stage].has_dynamic_offsets) + if (!state.layout || !state.layout->stage[shader->stage].has_dynamic_offsets) return; nir_foreach_overload(shader, overload) { diff --git a/src/vulkan/anv_nir_apply_pipeline_layout.c b/src/vulkan/anv_nir_apply_pipeline_layout.c index fe1702dfda7..1b196cd62b7 100644 --- a/src/vulkan/anv_nir_apply_pipeline_layout.c +++ b/src/vulkan/anv_nir_apply_pipeline_layout.c @@ -28,7 +28,6 @@ struct apply_pipeline_layout_state { nir_shader *shader; nir_builder builder; - VkShaderStage stage; const struct anv_pipeline_layout *layout; bool progress; @@ -42,15 +41,17 @@ get_surface_index(unsigned set, unsigned binding, struct anv_descriptor_set_layout *set_layout = state->layout->set[set].layout; + gl_shader_stage stage = state->shader->stage; + assert(binding < set_layout->binding_count); - assert(set_layout->binding[binding].stage[state->stage].surface_index >= 0); + assert(set_layout->binding[binding].stage[stage].surface_index >= 0); uint32_t surface_index = - state->layout->set[set].stage[state->stage].surface_start + - set_layout->binding[binding].stage[state->stage].surface_index; + state->layout->set[set].stage[stage].surface_start + + set_layout->binding[binding].stage[stage].surface_index; - assert(surface_index < state->layout->stage[state->stage].surface_count); + assert(surface_index < state->layout->stage[stage].surface_count); return surface_index; } @@ -65,16 +66,18 @@ get_sampler_index(unsigned set, unsigned binding, nir_texop tex_op, assert(binding < set_layout->binding_count); - if (set_layout->binding[binding].stage[state->stage].sampler_index < 0) { + gl_shader_stage stage = state->shader->stage; + + if (set_layout->binding[binding].stage[stage].sampler_index < 0) { assert(tex_op == nir_texop_txf); return 0; } uint32_t sampler_index = - state->layout->set[set].stage[state->stage].sampler_start + - set_layout->binding[binding].stage[state->stage].sampler_index; + state->layout->set[set].stage[stage].sampler_start + + set_layout->binding[binding].stage[stage].sampler_index; - assert(sampler_index < state->layout->stage[state->stage].sampler_count); + assert(sampler_index < state->layout->stage[stage].sampler_count); return sampler_index; } @@ -217,7 +220,6 @@ anv_nir_apply_pipeline_layout(nir_shader *shader, { struct apply_pipeline_layout_state state = { .shader = shader, - .stage = anv_vk_shader_stage_for_mesa_stage(shader->stage), .layout = layout, }; diff --git a/src/vulkan/anv_pipeline.c b/src/vulkan/anv_pipeline.c index 8b95bce8823..e910298921e 100644 --- a/src/vulkan/anv_pipeline.c +++ b/src/vulkan/anv_pipeline.c @@ -77,35 +77,19 @@ void anv_DestroyShaderModule( #define SPIR_V_MAGIC_NUMBER 0x07230203 -static const gl_shader_stage vk_shader_stage_to_mesa_stage[] = { - [VK_SHADER_STAGE_VERTEX] = MESA_SHADER_VERTEX, - [VK_SHADER_STAGE_TESS_CONTROL] = -1, - [VK_SHADER_STAGE_TESS_EVALUATION] = -1, - [VK_SHADER_STAGE_GEOMETRY] = MESA_SHADER_GEOMETRY, - [VK_SHADER_STAGE_FRAGMENT] = MESA_SHADER_FRAGMENT, - [VK_SHADER_STAGE_COMPUTE] = MESA_SHADER_COMPUTE, -}; - -bool -anv_is_scalar_shader_stage(const struct brw_compiler *compiler, - VkShaderStage stage) -{ - return compiler->scalar_stage[vk_shader_stage_to_mesa_stage[stage]]; -} - /* Eventually, this will become part of anv_CreateShader. Unfortunately, * we can't do that yet because we don't have the ability to copy nir. */ static nir_shader * anv_shader_compile_to_nir(struct anv_device *device, struct anv_shader_module *module, - const char *entrypoint_name, VkShaderStage vk_stage) + const char *entrypoint_name, + gl_shader_stage stage) { if (strcmp(entrypoint_name, "main") != 0) { anv_finishme("Multiple shaders per module not really supported"); } - gl_shader_stage stage = vk_shader_stage_to_mesa_stage[vk_stage]; const struct brw_compiler *compiler = device->instance->physicalDevice.compiler; const nir_shader_compiler_options *nir_options = @@ -304,7 +288,7 @@ static nir_shader * anv_pipeline_compile(struct anv_pipeline *pipeline, struct anv_shader_module *module, const char *entrypoint, - VkShaderStage stage, + gl_shader_stage stage, struct brw_stage_prog_data *prog_data) { const struct brw_compiler *compiler = @@ -315,7 +299,7 @@ anv_pipeline_compile(struct anv_pipeline *pipeline, if (nir == NULL) return NULL; - anv_nir_lower_push_constants(nir, anv_is_scalar_shader_stage(compiler, stage)); + anv_nir_lower_push_constants(nir, compiler->scalar_stage[stage]); /* Figure out the number of parameters */ prog_data->nr_params = 0; @@ -358,7 +342,7 @@ anv_pipeline_compile(struct anv_pipeline *pipeline, /* All binding table offsets provided by apply_pipeline_layout() are * relative to the start of the bindint table (plus MAX_RTS for VS). */ - unsigned bias = stage == VK_SHADER_STAGE_FRAGMENT ? MAX_RTS : 0; + unsigned bias = stage == MESA_SHADER_FRAGMENT ? MAX_RTS : 0; prog_data->binding_table.size_bytes = 0; prog_data->binding_table.texture_start = bias; prog_data->binding_table.ubo_start = bias; @@ -367,7 +351,7 @@ anv_pipeline_compile(struct anv_pipeline *pipeline, /* Finish the optimization and compilation process */ nir = brw_lower_nir(nir, &pipeline->device->info, NULL, - anv_is_scalar_shader_stage(compiler, stage)); + compiler->scalar_stage[stage]); /* nir_lower_io will only handle the push constants; we need to set this * to the full number of possible uniforms. @@ -392,21 +376,21 @@ anv_pipeline_upload_kernel(struct anv_pipeline *pipeline, } static void anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline, - VkShaderStage stage, + gl_shader_stage stage, struct brw_stage_prog_data *prog_data) { struct brw_device_info *devinfo = &pipeline->device->info; uint32_t max_threads[] = { - [VK_SHADER_STAGE_VERTEX] = devinfo->max_vs_threads, - [VK_SHADER_STAGE_TESS_CONTROL] = 0, - [VK_SHADER_STAGE_TESS_EVALUATION] = 0, - [VK_SHADER_STAGE_GEOMETRY] = devinfo->max_gs_threads, - [VK_SHADER_STAGE_FRAGMENT] = devinfo->max_wm_threads, - [VK_SHADER_STAGE_COMPUTE] = devinfo->max_cs_threads, + [MESA_SHADER_VERTEX] = devinfo->max_vs_threads, + [MESA_SHADER_TESS_CTRL] = 0, + [MESA_SHADER_TESS_EVAL] = 0, + [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads, + [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads, + [MESA_SHADER_COMPUTE] = devinfo->max_cs_threads, }; pipeline->prog_data[stage] = prog_data; - pipeline->active_stages |= 1 << stage; + pipeline->active_stages |= mesa_to_vk_shader_stage(stage); pipeline->scratch_start[stage] = pipeline->total_scratch; pipeline->total_scratch = align_u32(pipeline->total_scratch, 1024) + @@ -431,7 +415,7 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline, memset(prog_data, 0, sizeof(*prog_data)); nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint, - VK_SHADER_STAGE_VERTEX, + MESA_SHADER_VERTEX, &prog_data->base.base); if (nir == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -470,7 +454,7 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline, ralloc_free(mem_ctx); - anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_VERTEX, + anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, &prog_data->base.base); return VK_SUCCESS; @@ -494,7 +478,7 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline, memset(prog_data, 0, sizeof(*prog_data)); nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint, - VK_SHADER_STAGE_GEOMETRY, + MESA_SHADER_GEOMETRY, &prog_data->base.base); if (nir == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -525,7 +509,7 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline, ralloc_free(mem_ctx); - anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_GEOMETRY, + anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, &prog_data->base.base); return VK_SUCCESS; @@ -554,7 +538,7 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline, prog_data->binding_table.render_target_start = 0; nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint, - VK_SHADER_STAGE_FRAGMENT, + MESA_SHADER_FRAGMENT, &prog_data->base); if (nir == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -602,7 +586,7 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline, ralloc_free(mem_ctx); - anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_FRAGMENT, + anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, &prog_data->base); return VK_SUCCESS; @@ -626,7 +610,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, memset(prog_data, 0, sizeof(*prog_data)); nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint, - VK_SHADER_STAGE_COMPUTE, + MESA_SHADER_COMPUTE, &prog_data->base); if (nir == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -649,7 +633,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, shader_code, code_size); ralloc_free(mem_ctx); - anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_COMPUTE, + anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, &prog_data->base); return VK_SUCCESS; @@ -916,8 +900,8 @@ anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info) for (uint32_t i = 0; i < info->stageCount; ++i) { switch (info->pStages[i].stage) { - case VK_SHADER_STAGE_TESS_CONTROL: - case VK_SHADER_STAGE_TESS_EVALUATION: + case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: + case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: assert(info->pTessellationState); break; default: @@ -983,13 +967,13 @@ anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device, const char *entrypoint = pCreateInfo->pStages[i].pName; switch (pCreateInfo->pStages[i].stage) { - case VK_SHADER_STAGE_VERTEX: + case VK_SHADER_STAGE_VERTEX_BIT: anv_pipeline_compile_vs(pipeline, pCreateInfo, module, entrypoint); break; - case VK_SHADER_STAGE_GEOMETRY: + case VK_SHADER_STAGE_GEOMETRY_BIT: anv_pipeline_compile_gs(pipeline, pCreateInfo, module, entrypoint); break; - case VK_SHADER_STAGE_FRAGMENT: + case VK_SHADER_STAGE_FRAGMENT_BIT: anv_pipeline_compile_fs(pipeline, pCreateInfo, module, entrypoint); break; default: diff --git a/src/vulkan/anv_private.h b/src/vulkan/anv_private.h index 940d1662c1f..9c1e6b2f955 100644 --- a/src/vulkan/anv_private.h +++ b/src/vulkan/anv_private.h @@ -489,9 +489,6 @@ struct anv_physical_device { struct isl_device isl_dev; }; -bool anv_is_scalar_shader_stage(const struct brw_compiler *compiler, - VkShaderStage stage); - struct anv_instance { VK_LOADER_DATA _loader_data; @@ -772,7 +769,7 @@ struct anv_descriptor_set_binding_layout { /* Index into the sampler table for the associated sampler */ int16_t sampler_index; - } stage[VK_SHADER_STAGE_NUM]; + } stage[MESA_SHADER_STAGES]; /* Immutable samplers (or NULL if no immutable samplers) */ struct anv_sampler **immutable_samplers; @@ -852,7 +849,7 @@ struct anv_pipeline_layout { struct { uint32_t surface_start; uint32_t sampler_start; - } stage[VK_SHADER_STAGE_NUM]; + } stage[MESA_SHADER_STAGES]; } set[MAX_SETS]; uint32_t num_sets; @@ -863,7 +860,7 @@ struct anv_pipeline_layout { struct anv_pipeline_binding *surface_to_descriptor; uint32_t sampler_count; struct anv_pipeline_binding *sampler_to_descriptor; - } stage[VK_SHADER_STAGE_NUM]; + } stage[MESA_SHADER_STAGES]; struct anv_pipeline_binding entries[0]; }; @@ -991,7 +988,7 @@ struct anv_cmd_state { uint32_t restart_index; struct anv_vertex_binding vertex_bindings[MAX_VBS]; struct anv_descriptor_set * descriptors[MAX_SETS]; - struct anv_push_constants * push_constants[VK_SHADER_STAGE_NUM]; + struct anv_push_constants * push_constants[MESA_SHADER_STAGES]; struct anv_dynamic_state dynamic; struct { @@ -1137,7 +1134,7 @@ void anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer, struct anv_state anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer, - VkShaderStage stage); + gl_shader_stage stage); void anv_cmd_buffer_clear_attachments(struct anv_cmd_buffer *cmd_buffer, struct anv_render_pass *pass, @@ -1163,6 +1160,24 @@ struct anv_shader_module { char data[0]; }; +static inline gl_shader_stage +vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage) +{ + assert(__builtin_popcount(vk_stage) == 1); + return ffs(vk_stage) - 1; +} + +static inline VkShaderStageFlagBits +mesa_to_vk_shader_stage(gl_shader_stage mesa_stage) +{ + return (1 << mesa_stage); +} + +#define anv_foreach_stage(stage, stage_bits) \ + for (gl_shader_stage stage, __tmp = (gl_shader_stage)(stage_bits);\ + stage = __builtin_ffs(__tmp) - 1, __tmp; \ + __tmp &= ~(1 << (stage))) + struct anv_pipeline { struct anv_device * device; struct anv_batch batch; @@ -1179,8 +1194,8 @@ struct anv_pipeline { struct brw_gs_prog_data gs_prog_data; struct brw_cs_prog_data cs_prog_data; bool writes_point_size; - struct brw_stage_prog_data * prog_data[VK_SHADER_STAGE_NUM]; - uint32_t scratch_start[VK_SHADER_STAGE_NUM]; + struct brw_stage_prog_data * prog_data[MESA_SHADER_STAGES]; + uint32_t scratch_start[MESA_SHADER_STAGES]; uint32_t total_scratch; struct { uint32_t vs_start; diff --git a/src/vulkan/gen7_cmd_buffer.c b/src/vulkan/gen7_cmd_buffer.c index 5f215c6c312..dd80144270b 100644 --- a/src/vulkan/gen7_cmd_buffer.c +++ b/src/vulkan/gen7_cmd_buffer.c @@ -36,18 +36,17 @@ static void cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) { static const uint32_t push_constant_opcodes[] = { - [VK_SHADER_STAGE_VERTEX] = 21, - [VK_SHADER_STAGE_TESS_CONTROL] = 25, /* HS */ - [VK_SHADER_STAGE_TESS_EVALUATION] = 26, /* DS */ - [VK_SHADER_STAGE_GEOMETRY] = 22, - [VK_SHADER_STAGE_FRAGMENT] = 23, - [VK_SHADER_STAGE_COMPUTE] = 0, + [MESA_SHADER_VERTEX] = 21, + [MESA_SHADER_TESS_CTRL] = 25, /* HS */ + [MESA_SHADER_TESS_EVAL] = 26, /* DS */ + [MESA_SHADER_GEOMETRY] = 22, + [MESA_SHADER_FRAGMENT] = 23, + [MESA_SHADER_COMPUTE] = 0, }; - VkShaderStage stage; VkShaderStageFlags flushed = 0; - for_each_bit(stage, cmd_buffer->state.push_constants_dirty) { + anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) { struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage); if (state.offset == 0) @@ -60,14 +59,14 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32), }); - flushed |= 1 << stage; + flushed |= mesa_to_vk_shader_stage(stage); } cmd_buffer->state.push_constants_dirty &= ~flushed; } static VkResult -flush_descriptor_set(struct anv_cmd_buffer *cmd_buffer, VkShaderStage stage) +flush_descriptor_set(struct anv_cmd_buffer *cmd_buffer, gl_shader_stage stage) { struct anv_state surfaces = { 0, }, samplers = { 0, }; VkResult result; @@ -80,21 +79,21 @@ flush_descriptor_set(struct anv_cmd_buffer *cmd_buffer, VkShaderStage stage) return result; static const uint32_t sampler_state_opcodes[] = { - [VK_SHADER_STAGE_VERTEX] = 43, - [VK_SHADER_STAGE_TESS_CONTROL] = 44, /* HS */ - [VK_SHADER_STAGE_TESS_EVALUATION] = 45, /* DS */ - [VK_SHADER_STAGE_GEOMETRY] = 46, - [VK_SHADER_STAGE_FRAGMENT] = 47, - [VK_SHADER_STAGE_COMPUTE] = 0, + [MESA_SHADER_VERTEX] = 43, + [MESA_SHADER_TESS_CTRL] = 44, /* HS */ + [MESA_SHADER_TESS_EVAL] = 45, /* DS */ + [MESA_SHADER_GEOMETRY] = 46, + [MESA_SHADER_FRAGMENT] = 47, + [MESA_SHADER_COMPUTE] = 0, }; static const uint32_t binding_table_opcodes[] = { - [VK_SHADER_STAGE_VERTEX] = 38, - [VK_SHADER_STAGE_TESS_CONTROL] = 39, - [VK_SHADER_STAGE_TESS_EVALUATION] = 40, - [VK_SHADER_STAGE_GEOMETRY] = 41, - [VK_SHADER_STAGE_FRAGMENT] = 42, - [VK_SHADER_STAGE_COMPUTE] = 0, + [MESA_SHADER_VERTEX] = 38, + [MESA_SHADER_TESS_CTRL] = 39, + [MESA_SHADER_TESS_EVAL] = 40, + [MESA_SHADER_GEOMETRY] = 41, + [MESA_SHADER_FRAGMENT] = 42, + [MESA_SHADER_COMPUTE] = 0, }; if (samplers.alloc_size > 0) { @@ -117,12 +116,11 @@ flush_descriptor_set(struct anv_cmd_buffer *cmd_buffer, VkShaderStage stage) GENX_FUNC(GEN7, GEN7) void genX(cmd_buffer_flush_descriptor_sets)(struct anv_cmd_buffer *cmd_buffer) { - VkShaderStage s; VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty & cmd_buffer->state.pipeline->active_stages; VkResult result = VK_SUCCESS; - for_each_bit(s, dirty) { + anv_foreach_stage(s, dirty) { result = flush_descriptor_set(cmd_buffer, s); if (result != VK_SUCCESS) break; @@ -140,7 +138,7 @@ genX(cmd_buffer_flush_descriptor_sets)(struct anv_cmd_buffer *cmd_buffer) anv_cmd_buffer_emit_state_base_address(cmd_buffer); /* Re-emit all active binding tables */ - for_each_bit(s, cmd_buffer->state.pipeline->active_stages) { + anv_foreach_stage(s, cmd_buffer->state.pipeline->active_stages) { result = flush_descriptor_set(cmd_buffer, s); /* It had better succeed this time */ @@ -260,11 +258,11 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) VkResult result; result = anv_cmd_buffer_emit_samplers(cmd_buffer, - VK_SHADER_STAGE_COMPUTE, &samplers); + MESA_SHADER_COMPUTE, &samplers); if (result != VK_SUCCESS) return result; result = anv_cmd_buffer_emit_binding_table(cmd_buffer, - VK_SHADER_STAGE_COMPUTE, &surfaces); + MESA_SHADER_COMPUTE, &surfaces); if (result != VK_SUCCESS) return result; @@ -310,7 +308,7 @@ cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer) /* FIXME: figure out descriptors for gen7 */ result = flush_compute_descriptor_set(cmd_buffer); assert(result == VK_SUCCESS); - cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE; + cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT; } cmd_buffer->state.compute_dirty = 0; diff --git a/src/vulkan/gen7_pipeline.c b/src/vulkan/gen7_pipeline.c index 95c7bd53591..400b9ae997d 100644 --- a/src/vulkan/gen7_pipeline.c +++ b/src/vulkan/gen7_pipeline.c @@ -468,7 +468,7 @@ genX(graphics_pipeline_create)( else anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), .KernelStartPointer = pipeline->vs_vec4, - .ScratchSpaceBaseOffset = pipeline->scratch_start[VK_SHADER_STAGE_VERTEX], + .ScratchSpaceBaseOffset = pipeline->scratch_start[MESA_SHADER_VERTEX], .PerThreadScratchSpace = scratch_space(&vue_prog_data->base), .DispatchGRFStartRegisterforURBData = @@ -490,7 +490,7 @@ genX(graphics_pipeline_create)( anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), .KernelStartPointer = pipeline->gs_vec4, - .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_GEOMETRY], + .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_GEOMETRY], .PerThreadScratchSpace = scratch_space(&gs_prog_data->base.base), .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1, @@ -531,7 +531,7 @@ genX(graphics_pipeline_create)( anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), .KernelStartPointer0 = pipeline->ps_ksp0, - .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT], + .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_FRAGMENT], .PerThreadScratchSpace = scratch_space(&wm_prog_data->base), .MaximumNumberofThreads = device->info.max_wm_threads - 1, diff --git a/src/vulkan/gen8_cmd_buffer.c b/src/vulkan/gen8_cmd_buffer.c index bdccee8a7b7..23dc9ad5748 100644 --- a/src/vulkan/gen8_cmd_buffer.c +++ b/src/vulkan/gen8_cmd_buffer.c @@ -36,18 +36,17 @@ static void cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) { static const uint32_t push_constant_opcodes[] = { - [VK_SHADER_STAGE_VERTEX] = 21, - [VK_SHADER_STAGE_TESS_CONTROL] = 25, /* HS */ - [VK_SHADER_STAGE_TESS_EVALUATION] = 26, /* DS */ - [VK_SHADER_STAGE_GEOMETRY] = 22, - [VK_SHADER_STAGE_FRAGMENT] = 23, - [VK_SHADER_STAGE_COMPUTE] = 0, + [MESA_SHADER_VERTEX] = 21, + [MESA_SHADER_TESS_CTRL] = 25, /* HS */ + [MESA_SHADER_TESS_EVAL] = 26, /* DS */ + [MESA_SHADER_GEOMETRY] = 22, + [MESA_SHADER_FRAGMENT] = 23, + [MESA_SHADER_COMPUTE] = 0, }; - VkShaderStage stage; VkShaderStageFlags flushed = 0; - for_each_bit(stage, cmd_buffer->state.push_constants_dirty) { + anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) { struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage); if (state.offset == 0) @@ -60,7 +59,7 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32), }); - flushed |= 1 << stage; + flushed |= mesa_to_vk_shader_stage(stage); } cmd_buffer->state.push_constants_dirty &= ~flushed; @@ -493,11 +492,11 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) VkResult result; result = anv_cmd_buffer_emit_samplers(cmd_buffer, - VK_SHADER_STAGE_COMPUTE, &samplers); + MESA_SHADER_COMPUTE, &samplers); if (result != VK_SUCCESS) return result; result = anv_cmd_buffer_emit_binding_table(cmd_buffer, - VK_SHADER_STAGE_COMPUTE, &surfaces); + MESA_SHADER_COMPUTE, &surfaces); if (result != VK_SUCCESS) return result; @@ -548,7 +547,7 @@ cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer) (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) { result = flush_compute_descriptor_set(cmd_buffer); assert(result == VK_SUCCESS); - cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE; + cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT; } cmd_buffer->state.compute_dirty = 0; diff --git a/src/vulkan/gen8_pipeline.c b/src/vulkan/gen8_pipeline.c index 106f82d36fa..82a63d3bfb3 100644 --- a/src/vulkan/gen8_pipeline.c +++ b/src/vulkan/gen8_pipeline.c @@ -454,7 +454,7 @@ genX(graphics_pipeline_create)( .BindingTableEntryCount = 0, .ExpectedVertexCount = pipeline->gs_vertex_count, - .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_GEOMETRY], + .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_GEOMETRY], .PerThreadScratchSpace = ffs(gs_prog_data->base.base.total_scratch / 2048), .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1, @@ -512,7 +512,7 @@ genX(graphics_pipeline_create)( .AccessesUAV = false, .SoftwareExceptionEnable = false, - .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_VERTEX], + .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_VERTEX], .PerThreadScratchSpace = ffs(vue_prog_data->base.total_scratch / 2048), .DispatchGRFStartRegisterForURBData = @@ -624,7 +624,7 @@ genX(graphics_pipeline_create)( .VectorMaskEnable = true, .SamplerCount = 1, - .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT], + .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_FRAGMENT], .PerThreadScratchSpace = ffs(wm_prog_data->base.total_scratch / 2048), .MaximumNumberofThreadsPerPSD = 64 - num_thread_bias, @@ -709,7 +709,7 @@ VkResult genX(compute_pipeline_create)( pipeline->active_stages = 0; pipeline->total_scratch = 0; - assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE); + assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT); ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->stage.module); anv_pipeline_compile_cs(pipeline, pCreateInfo, module, pCreateInfo->stage.pName); @@ -719,7 +719,7 @@ VkResult genX(compute_pipeline_create)( const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data; anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), - .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_COMPUTE], + .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_COMPUTE], .PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048), .ScratchSpaceBasePointerHigh = 0, .StackSize = 0, -- cgit v1.2.3