diff options
Diffstat (limited to 'src/intel/vulkan')
-rw-r--r-- | src/intel/vulkan/anv_cmd_buffer.c | 29 | ||||
-rw-r--r-- | src/intel/vulkan/anv_pipeline.c | 145 | ||||
-rw-r--r-- | src/intel/vulkan/anv_private.h | 104 | ||||
-rw-r--r-- | src/intel/vulkan/gen7_cmd_buffer.c | 2 | ||||
-rw-r--r-- | src/intel/vulkan/gen8_cmd_buffer.c | 6 | ||||
-rw-r--r-- | src/intel/vulkan/genX_cmd_buffer.c | 42 | ||||
-rw-r--r-- | src/intel/vulkan/genX_pipeline.c | 183 |
7 files changed, 275 insertions, 236 deletions
diff --git a/src/intel/vulkan/anv_cmd_buffer.c b/src/intel/vulkan/anv_cmd_buffer.c index 8715ba3ef86..790f1433e0a 100644 --- a/src/intel/vulkan/anv_cmd_buffer.c +++ b/src/intel/vulkan/anv_cmd_buffer.c @@ -421,35 +421,40 @@ void anv_CmdBindPipeline( switch (pipelineBindPoint) { case VK_PIPELINE_BIND_POINT_COMPUTE: { - if (cmd_buffer->state.compute.base.pipeline == pipeline) + struct anv_compute_pipeline *compute_pipeline = + anv_pipeline_to_compute(pipeline); + if (cmd_buffer->state.compute.pipeline == compute_pipeline) return; - cmd_buffer->state.compute.base.pipeline = pipeline; + cmd_buffer->state.compute.pipeline = compute_pipeline; cmd_buffer->state.compute.pipeline_dirty = true; set_dirty_for_bind_map(cmd_buffer, MESA_SHADER_COMPUTE, - &pipeline->cs->bind_map); + &compute_pipeline->cs->bind_map); break; } - case VK_PIPELINE_BIND_POINT_GRAPHICS: - if (cmd_buffer->state.gfx.base.pipeline == pipeline) + case VK_PIPELINE_BIND_POINT_GRAPHICS: { + struct anv_graphics_pipeline *gfx_pipeline = + anv_pipeline_to_graphics(pipeline); + if (cmd_buffer->state.gfx.pipeline == gfx_pipeline) return; - cmd_buffer->state.gfx.base.pipeline = pipeline; - cmd_buffer->state.gfx.vb_dirty |= pipeline->vb_used; + cmd_buffer->state.gfx.pipeline = gfx_pipeline; + cmd_buffer->state.gfx.vb_dirty |= gfx_pipeline->vb_used; cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE; - anv_foreach_stage(stage, pipeline->active_stages) { + anv_foreach_stage(stage, gfx_pipeline->active_stages) { set_dirty_for_bind_map(cmd_buffer, stage, - &pipeline->shaders[stage]->bind_map); + &gfx_pipeline->shaders[stage]->bind_map); } /* Apply the dynamic state from the pipeline */ cmd_buffer->state.gfx.dirty |= anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic, - &pipeline->dynamic_state, - pipeline->dynamic_state_mask); + &gfx_pipeline->dynamic_state, + gfx_pipeline->dynamic_state_mask); break; + } default: assert(!"invalid bind point"); @@ -819,7 +824,7 @@ anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer) { struct anv_push_constants *data = &cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE]; - struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline; + struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline; const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline); const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0]; diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c index 58fcaed3b6e..7c324d54408 100644 --- a/src/intel/vulkan/anv_pipeline.c +++ b/src/intel/vulkan/anv_pipeline.c @@ -298,21 +298,30 @@ void anv_DestroyPipeline( ralloc_free(pipeline->mem_ctx); - if (pipeline->blend_state.map) - anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state); - switch (pipeline->type) { - case ANV_PIPELINE_GRAPHICS: + case ANV_PIPELINE_GRAPHICS: { + struct anv_graphics_pipeline *gfx_pipeline = + anv_pipeline_to_graphics(pipeline); + + if (gfx_pipeline->blend_state.map) + anv_state_pool_free(&device->dynamic_state_pool, gfx_pipeline->blend_state); + for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { - if (pipeline->shaders[s]) - anv_shader_bin_unref(device, pipeline->shaders[s]); + if (gfx_pipeline->shaders[s]) + anv_shader_bin_unref(device, gfx_pipeline->shaders[s]); } break; + } + + case ANV_PIPELINE_COMPUTE: { + struct anv_compute_pipeline *compute_pipeline = + anv_pipeline_to_compute(pipeline); + + if (compute_pipeline->cs) + anv_shader_bin_unref(device, compute_pipeline->cs); - case ANV_PIPELINE_COMPUTE: - if (pipeline->cs) - anv_shader_bin_unref(device, pipeline->cs); break; + } default: unreachable("invalid pipeline type"); @@ -571,7 +580,7 @@ anv_pipeline_hash_shader(const struct anv_shader_module *module, } static void -anv_pipeline_hash_graphics(struct anv_pipeline *pipeline, +anv_pipeline_hash_graphics(struct anv_graphics_pipeline *pipeline, struct anv_pipeline_layout *layout, struct anv_pipeline_stage *stages, unsigned char *sha1_out) @@ -585,7 +594,7 @@ anv_pipeline_hash_graphics(struct anv_pipeline *pipeline, if (layout) _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1)); - const bool rba = pipeline->device->robust_buffer_access; + const bool rba = pipeline->base.device->robust_buffer_access; _mesa_sha1_update(&ctx, &rba, sizeof(rba)); for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { @@ -600,7 +609,7 @@ anv_pipeline_hash_graphics(struct anv_pipeline *pipeline, } static void -anv_pipeline_hash_compute(struct anv_pipeline *pipeline, +anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline, struct anv_pipeline_layout *layout, struct anv_pipeline_stage *stage, unsigned char *sha1_out) @@ -611,7 +620,7 @@ anv_pipeline_hash_compute(struct anv_pipeline *pipeline, if (layout) _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1)); - const bool rba = pipeline->device->robust_buffer_access; + const bool rba = pipeline->base.device->robust_buffer_access; _mesa_sha1_update(&ctx, &rba, sizeof(rba)); _mesa_sha1_update(&ctx, stage->shader_sha1, @@ -669,14 +678,17 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline, nir_shader *nir = stage->nir; if (nir->info.stage == MESA_SHADER_FRAGMENT) { - NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable); + NIR_PASS_V(nir, nir_lower_wpos_center, + anv_pipeline_to_graphics(pipeline)->sample_shading_enable); NIR_PASS_V(nir, nir_lower_input_attachments, true); } NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout); - if (pipeline->type == ANV_PIPELINE_GRAPHICS) - NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask); + if (pipeline->type == ANV_PIPELINE_GRAPHICS) { + NIR_PASS_V(nir, anv_nir_lower_multiview, + anv_pipeline_to_graphics(pipeline)->subpass->view_mask); + } nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir)); @@ -1106,7 +1118,7 @@ anv_pipeline_add_executables(struct anv_pipeline *pipeline, } static VkResult -anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, +anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline, struct anv_pipeline_cache *cache, const VkGraphicsPipelineCreateInfo *info) { @@ -1115,7 +1127,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, }; int64_t pipeline_start = os_time_get_nano(); - const struct brw_compiler *compiler = pipeline->device->physical->compiler; + const struct brw_compiler *compiler = pipeline->base.device->physical->compiler; struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {}; pipeline->active_stages = 0; @@ -1139,7 +1151,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, stages[stage].spec_info, stages[stage].shader_sha1); - const struct gen_device_info *devinfo = &pipeline->device->info; + const struct gen_device_info *devinfo = &pipeline->base.device->info; switch (stage) { case MESA_SHADER_VERTEX: populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs); @@ -1191,7 +1203,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, } const bool skip_cache_lookup = - (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR); + (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR); if (!skip_cache_lookup) { unsigned found = 0; @@ -1204,7 +1216,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, bool cache_hit; struct anv_shader_bin *bin = - anv_device_search_for_kernel(pipeline->device, cache, + anv_device_search_for_kernel(pipeline->base.device, cache, &stages[s].cache_key, sizeof(stages[s].cache_key), &cache_hit); if (bin) { @@ -1230,7 +1242,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, if (!stages[s].entrypoint) continue; - anv_pipeline_add_executables(pipeline, &stages[s], + anv_pipeline_add_executables(&pipeline->base, &stages[s], pipeline->shaders[s]); } goto done; @@ -1241,7 +1253,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, */ assert(found < __builtin_popcount(pipeline->active_stages)); - vk_debug_report(&pipeline->device->physical->instance->debug_report_callbacks, + vk_debug_report(&pipeline->base.device->physical->instance->debug_report_callbacks, VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, @@ -1258,7 +1270,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { stages[s].feedback.flags = 0; if (pipeline->shaders[s]) { - anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]); + anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]); pipeline->shaders[s] = NULL; } } @@ -1281,7 +1293,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, .sampler_to_descriptor = stages[s].sampler_to_descriptor }; - stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache, + stages[s].nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, pipeline_ctx, &stages[s]); if (stages[s].nir == NULL) { @@ -1336,27 +1348,27 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, s == MESA_SHADER_GEOMETRY) xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx); - anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout); + anv_pipeline_lower_nir(&pipeline->base, stage_ctx, &stages[s], layout); switch (s) { case MESA_SHADER_VERTEX: - anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device, + anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->base.device, &stages[s]); break; case MESA_SHADER_TESS_CTRL: - anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device, + anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->base.device, &stages[s], prev_stage); break; case MESA_SHADER_TESS_EVAL: - anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device, + anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->base.device, &stages[s], prev_stage); break; case MESA_SHADER_GEOMETRY: - anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device, + anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->base.device, &stages[s], prev_stage); break; case MESA_SHADER_FRAGMENT: - anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device, + anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->base.device, &stages[s], prev_stage); break; default: @@ -1372,7 +1384,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, &stages[s].bind_map); struct anv_shader_bin *bin = - anv_device_upload_kernel(pipeline->device, cache, s, + anv_device_upload_kernel(pipeline->base.device, cache, s, &stages[s].cache_key, sizeof(stages[s].cache_key), stages[s].code, @@ -1389,7 +1401,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline, goto fail; } - anv_pipeline_add_executables(pipeline, &stages[s], bin); + anv_pipeline_add_executables(&pipeline->base, &stages[s], bin); pipeline->shaders[s] = bin; ralloc_free(stage_ctx); @@ -1408,7 +1420,7 @@ done: /* This can happen if we decided to implicitly disable the fragment * shader. See anv_pipeline_compile_fs(). */ - anv_shader_bin_unref(pipeline->device, + anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[MESA_SHADER_FRAGMENT]); pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL; pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT; @@ -1435,7 +1447,7 @@ fail: for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { if (pipeline->shaders[s]) - anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]); + anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]); } return result; @@ -1454,7 +1466,7 @@ shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align) } VkResult -anv_pipeline_compile_cs(struct anv_pipeline *pipeline, +anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline, struct anv_pipeline_cache *cache, const VkComputePipelineCreateInfo *info, const struct anv_shader_module *module, @@ -1466,7 +1478,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, }; int64_t pipeline_start = os_time_get_nano(); - const struct brw_compiler *compiler = pipeline->device->physical->compiler; + const struct brw_compiler *compiler = pipeline->base.device->physical->compiler; struct anv_pipeline_stage stage = { .stage = MESA_SHADER_COMPUTE, @@ -1492,19 +1504,19 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, vk_find_struct_const(info->stage.pNext, PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT); - populate_cs_prog_key(&pipeline->device->info, info->stage.flags, + populate_cs_prog_key(&pipeline->base.device->info, info->stage.flags, rss_info, &stage.key.cs); ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout); const bool skip_cache_lookup = - (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR); + (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR); anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1); bool cache_hit = false; if (!skip_cache_lookup) { - bin = anv_device_search_for_kernel(pipeline->device, cache, + bin = anv_device_search_for_kernel(pipeline->base.device, cache, &stage.cache_key, sizeof(stage.cache_key), &cache_hit); @@ -1525,7 +1537,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS, }; - stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage); + stage.nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, mem_ctx, &stage); if (stage.nir == NULL) { ralloc_free(mem_ctx); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1533,7 +1545,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id); - anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout); + anv_pipeline_lower_nir(&pipeline->base, mem_ctx, &stage, layout); NIR_PASS_V(stage.nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_type_info); @@ -1541,7 +1553,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, nir_var_mem_shared, nir_address_format_32bit_offset); stage.num_stats = 1; - stage.code = brw_compile_cs(compiler, pipeline->device, mem_ctx, + stage.code = brw_compile_cs(compiler, pipeline->base.device, mem_ctx, &stage.key.cs, &stage.prog_data.cs, stage.nir, -1, stage.stats, NULL); if (stage.code == NULL) { @@ -1558,7 +1570,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, } const unsigned code_size = stage.prog_data.base.program_size; - bin = anv_device_upload_kernel(pipeline->device, cache, + bin = anv_device_upload_kernel(pipeline->base.device, cache, MESA_SHADER_COMPUTE, &stage.cache_key, sizeof(stage.cache_key), stage.code, code_size, @@ -1576,7 +1588,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, stage.feedback.duration = os_time_get_nano() - stage_start; } - anv_pipeline_add_executables(pipeline, &stage, bin); + anv_pipeline_add_executables(&pipeline->base, &stage, bin); ralloc_free(mem_ctx); @@ -1597,7 +1609,6 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback; } - pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT; pipeline->cs = bin; return VK_SUCCESS; @@ -1617,7 +1628,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, * @param[in] pCreateInfo Source of non_dynamic state to be copied. */ static void -copy_non_dynamic_state(struct anv_pipeline *pipeline, +copy_non_dynamic_state(struct anv_graphics_pipeline *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo) { anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL; @@ -1830,7 +1841,7 @@ anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm) } VkResult -anv_pipeline_init(struct anv_pipeline *pipeline, +anv_pipeline_init(struct anv_graphics_pipeline *pipeline, struct anv_device *device, struct anv_pipeline_cache *cache, const VkGraphicsPipelineCreateInfo *pCreateInfo, @@ -1843,25 +1854,25 @@ anv_pipeline_init(struct anv_pipeline *pipeline, if (alloc == NULL) alloc = &device->alloc; - pipeline->device = device; - pipeline->type = ANV_PIPELINE_GRAPHICS; + pipeline->base.device = device; + pipeline->base.type = ANV_PIPELINE_GRAPHICS; ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass); assert(pCreateInfo->subpass < render_pass->subpass_count); pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass]; - result = anv_reloc_list_init(&pipeline->batch_relocs, alloc); + result = anv_reloc_list_init(&pipeline->base.batch_relocs, alloc); if (result != VK_SUCCESS) return result; - pipeline->batch.alloc = alloc; - pipeline->batch.next = pipeline->batch.start = pipeline->batch_data; - pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data); - pipeline->batch.relocs = &pipeline->batch_relocs; - pipeline->batch.status = VK_SUCCESS; + pipeline->base.batch.alloc = alloc; + pipeline->base.batch.next = pipeline->base.batch.start = pipeline->base.batch_data; + pipeline->base.batch.end = pipeline->base.batch.start + sizeof(pipeline->base.batch_data); + pipeline->base.batch.relocs = &pipeline->base.batch_relocs; + pipeline->base.batch.status = VK_SUCCESS; - pipeline->mem_ctx = ralloc_context(NULL); - pipeline->flags = pCreateInfo->flags; + pipeline->base.mem_ctx = ralloc_context(NULL); + pipeline->base.flags = pCreateInfo->flags; assert(pCreateInfo->pRasterizationState); @@ -1888,18 +1899,18 @@ anv_pipeline_init(struct anv_pipeline *pipeline, */ memset(pipeline->shaders, 0, sizeof(pipeline->shaders)); - util_dynarray_init(&pipeline->executables, pipeline->mem_ctx); + util_dynarray_init(&pipeline->base.executables, pipeline->base.mem_ctx); result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo); if (result != VK_SUCCESS) { - ralloc_free(pipeline->mem_ctx); - anv_reloc_list_finish(&pipeline->batch_relocs, alloc); + ralloc_free(pipeline->base.mem_ctx); + anv_reloc_list_finish(&pipeline->base.batch_relocs, alloc); return result; } assert(pipeline->shaders[MESA_SHADER_VERTEX]); - anv_pipeline_setup_l3_config(pipeline, false); + anv_pipeline_setup_l3_config(&pipeline->base, false); const VkPipelineVertexInputStateCreateInfo *vi_info = pCreateInfo->pVertexInputState; @@ -2043,12 +2054,14 @@ VkResult anv_GetPipelineExecutableStatisticsKHR( const struct brw_stage_prog_data *prog_data; switch (pipeline->type) { - case ANV_PIPELINE_GRAPHICS: - prog_data = pipeline->shaders[exe->stage]->prog_data; + case ANV_PIPELINE_GRAPHICS: { + prog_data = anv_pipeline_to_graphics(pipeline)->shaders[exe->stage]->prog_data; break; - case ANV_PIPELINE_COMPUTE: - prog_data = pipeline->cs->prog_data; + } + case ANV_PIPELINE_COMPUTE: { + prog_data = anv_pipeline_to_compute(pipeline)->cs->prog_data; break; + } default: unreachable("invalid pipeline type"); } diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index f04495c7958..defe44a3383 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -2632,8 +2632,6 @@ struct anv_vb_cache_range { * per-stage array in anv_cmd_state. */ struct anv_cmd_pipeline_state { - struct anv_pipeline *pipeline; - struct anv_descriptor_set *descriptors[MAX_SETS]; struct anv_push_descriptor_set *push_descriptors[MAX_SETS]; }; @@ -2648,6 +2646,8 @@ struct anv_cmd_pipeline_state { struct anv_cmd_graphics_state { struct anv_cmd_pipeline_state base; + struct anv_graphics_pipeline *pipeline; + anv_cmd_dirty_mask_t dirty; uint32_t vb_dirty; @@ -2675,6 +2675,8 @@ struct anv_cmd_graphics_state { struct anv_cmd_compute_state { struct anv_cmd_pipeline_state base; + struct anv_compute_pipeline *pipeline; + bool pipeline_dirty; struct anv_address num_workgroups; @@ -3166,40 +3168,36 @@ enum anv_pipeline_type { struct anv_pipeline { struct anv_device * device; + struct anv_batch batch; - uint32_t batch_data[512]; struct anv_reloc_list batch_relocs; - anv_cmd_dirty_mask_t dynamic_state_mask; - struct anv_dynamic_state dynamic_state; + uint32_t batch_data[512]; void * mem_ctx; enum anv_pipeline_type type; VkPipelineCreateFlags flags; - struct anv_subpass * subpass; - - struct anv_shader_bin * shaders[MESA_SHADER_STAGES]; - struct anv_shader_bin * cs; struct util_dynarray executables; const struct gen_l3_config * l3_config; +}; - VkShaderStageFlags active_stages; - struct anv_state blend_state; +struct anv_graphics_pipeline { + struct anv_pipeline base; - uint32_t vb_used; - struct anv_pipeline_vertex_binding { - uint32_t stride; - bool instanced; - uint32_t instance_divisor; - } vb[MAX_VBS]; + anv_cmd_dirty_mask_t dynamic_state_mask; + struct anv_dynamic_state dynamic_state; - bool primitive_restart; uint32_t topology; - uint32_t cs_right_mask; + struct anv_subpass * subpass; + + struct anv_shader_bin * shaders[MESA_SHADER_STAGES]; + + VkShaderStageFlags active_stages; + bool primitive_restart; bool writes_depth; bool depth_test_enable; bool writes_stencil; @@ -3210,6 +3208,15 @@ struct anv_pipeline { bool kill_pixel; bool depth_bounds_test_enable; + struct anv_state blend_state; + + uint32_t vb_used; + struct anv_pipeline_vertex_binding { + uint32_t stride; + bool instanced; + uint32_t instance_divisor; + } vb[MAX_VBS]; + struct { uint32_t sf[7]; uint32_t depth_stencil_state[3]; @@ -3224,44 +3231,61 @@ struct anv_pipeline { struct { uint32_t wm_depth_stencil[4]; } gen9; +}; + +struct anv_compute_pipeline { + struct anv_pipeline base; + struct anv_shader_bin * cs; + uint32_t cs_right_mask; uint32_t interface_descriptor_data[8]; }; +#define ANV_DECL_PIPELINE_DOWNCAST(pipe_type, pipe_enum) \ + static inline struct anv_##pipe_type##_pipeline * \ + anv_pipeline_to_##pipe_type(struct anv_pipeline *pipeline) \ + { \ + assert(pipeline->type == pipe_enum); \ + return (struct anv_##pipe_type##_pipeline *) pipeline; \ + } + +ANV_DECL_PIPELINE_DOWNCAST(graphics, ANV_PIPELINE_GRAPHICS) +ANV_DECL_PIPELINE_DOWNCAST(compute, ANV_PIPELINE_COMPUTE) + static inline bool -anv_pipeline_has_stage(const struct anv_pipeline *pipeline, +anv_pipeline_has_stage(const struct anv_graphics_pipeline *pipeline, gl_shader_stage stage) { return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0; } -#define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \ -static inline const struct brw_##prefix##_prog_data * \ -get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \ -{ \ - if (anv_pipeline_has_stage(pipeline, stage)) { \ - return (const struct brw_##prefix##_prog_data *) \ - pipeline->shaders[stage]->prog_data; \ - } else { \ - return NULL; \ - } \ +#define ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(prefix, stage) \ +static inline const struct brw_##prefix##_prog_data * \ +get_##prefix##_prog_data(const struct anv_graphics_pipeline *pipeline) \ +{ \ + if (anv_pipeline_has_stage(pipeline, stage)) { \ + return (const struct brw_##prefix##_prog_data *) \ + pipeline->shaders[stage]->prog_data; \ + } else { \ + return NULL; \ + } \ } -ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX) -ANV_DECL_GET_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL) -ANV_DECL_GET_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL) -ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY) -ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT) +ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX) +ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL) +ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL) +ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY) +ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT) static inline const struct brw_cs_prog_data * -get_cs_prog_data(const struct anv_pipeline *pipeline) +get_cs_prog_data(const struct anv_compute_pipeline *pipeline) { - assert(pipeline->type == ANV_PIPELINE_COMPUTE); + assert(pipeline->cs); return (const struct brw_cs_prog_data *) pipeline->cs->prog_data; } static inline const struct brw_vue_prog_data * -anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline *pipeline) +anv_pipeline_get_last_vue_prog_data(const struct anv_graphics_pipeline *pipeline) { if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) return &get_gs_prog_data(pipeline)->base; @@ -3272,13 +3296,13 @@ anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline *pipeline) } VkResult -anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device, +anv_pipeline_init(struct anv_graphics_pipeline *pipeline, struct anv_device *device, struct anv_pipeline_cache *cache, const VkGraphicsPipelineCreateInfo *pCreateInfo, const VkAllocationCallbacks *alloc); VkResult -anv_pipeline_compile_cs(struct anv_pipeline *pipeline, +anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline, struct anv_pipeline_cache *cache, const VkComputePipelineCreateInfo *info, const struct anv_shader_module *module, diff --git a/src/intel/vulkan/gen7_cmd_buffer.c b/src/intel/vulkan/gen7_cmd_buffer.c index c5281e527ef..4977cc97aae 100644 --- a/src/intel/vulkan/gen7_cmd_buffer.c +++ b/src/intel/vulkan/gen7_cmd_buffer.c @@ -195,7 +195,7 @@ get_depth_format(struct anv_cmd_buffer *cmd_buffer) void genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) { - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; struct anv_dynamic_state *d = &cmd_buffer->state.gfx.dynamic; if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE | diff --git a/src/intel/vulkan/gen8_cmd_buffer.c b/src/intel/vulkan/gen8_cmd_buffer.c index d7ee78ccb65..fe136ecd928 100644 --- a/src/intel/vulkan/gen8_cmd_buffer.c +++ b/src/intel/vulkan/gen8_cmd_buffer.c @@ -244,7 +244,7 @@ want_depth_pma_fix(struct anv_cmd_buffer *cmd_buffer) return false; /* 3DSTATE_PS_EXTRA::PixelShaderValid */ - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) return false; @@ -354,7 +354,7 @@ want_stencil_pma_fix(struct anv_cmd_buffer *cmd_buffer) assert(ds_iview && ds_iview->image->planes[0].aux_usage == ISL_AUX_USAGE_HIZ); /* 3DSTATE_PS_EXTRA::PixelShaderValid */ - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) return false; @@ -409,7 +409,7 @@ want_stencil_pma_fix(struct anv_cmd_buffer *cmd_buffer) void genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) { - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; struct anv_dynamic_state *d = &cmd_buffer->state.gfx.dynamic; if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE | diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c index b76dcd893f6..0e304694004 100644 --- a/src/intel/vulkan/genX_cmd_buffer.c +++ b/src/intel/vulkan/genX_cmd_buffer.c @@ -2379,7 +2379,7 @@ static void cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer) { VkShaderStageFlags stages = - cmd_buffer->state.gfx.base.pipeline->active_stages; + cmd_buffer->state.gfx.pipeline->active_stages; /* In order to avoid thrash, we assume that vertex and fragment stages * always exist. In the rare case where one is missing *and* the other @@ -2991,7 +2991,7 @@ cmd_buffer_emit_push_constant(struct anv_cmd_buffer *cmd_buffer, unsigned buffer_count) { const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx; - const struct anv_pipeline *pipeline = gfx_state->base.pipeline; + const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline; static const uint32_t push_constant_opcodes[] = { [MESA_SHADER_VERTEX] = 21, @@ -3083,7 +3083,7 @@ cmd_buffer_emit_push_constant_all(struct anv_cmd_buffer *cmd_buffer, } const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx; - const struct anv_pipeline *pipeline = gfx_state->base.pipeline; + const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline; static const uint32_t push_constant_opcodes[] = { [MESA_SHADER_VERTEX] = 21, @@ -3130,7 +3130,7 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer, { VkShaderStageFlags flushed = 0; const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx; - const struct anv_pipeline *pipeline = gfx_state->base.pipeline; + const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline; #if GEN_GEN >= 12 uint32_t nobuffer_stages = 0; @@ -3218,7 +3218,7 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer, void genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) { - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; uint32_t *p; uint32_t vb_emit = cmd_buffer->state.gfx.vb_dirty & pipeline->vb_used; @@ -3227,7 +3227,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0); - genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->l3_config); + genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->base.l3_config); genX(cmd_buffer_emit_hashing_mode)(cmd_buffer, UINT_MAX, UINT_MAX, 1); @@ -3311,7 +3311,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) #endif if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) { - anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch); + anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->base.batch); /* If the pipeline changed, we may need to re-allocate push constant * space in the URB. @@ -3465,7 +3465,7 @@ static void update_dirty_vbs_for_gen8_vb_flush(struct anv_cmd_buffer *cmd_buffer, uint32_t access_type) { - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); uint64_t vb_used = pipeline->vb_used; @@ -3488,7 +3488,7 @@ void genX(CmdDraw)( uint32_t firstInstance) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) @@ -3538,7 +3538,7 @@ void genX(CmdDrawIndexed)( uint32_t firstInstance) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) @@ -3599,7 +3599,7 @@ void genX(CmdDrawIndirectByteCountEXT)( #if GEN_IS_HASWELL || GEN_GEN >= 8 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); ANV_FROM_HANDLE(anv_buffer, counter_buffer, counterBuffer); - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); /* firstVertex is always zero for this draw function */ @@ -3701,7 +3701,7 @@ void genX(CmdDrawIndirect)( { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) @@ -3750,7 +3750,7 @@ void genX(CmdDrawIndexedIndirect)( { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); - struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) @@ -3894,7 +3894,7 @@ void genX(CmdDrawIndirectCount)( ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); ANV_FROM_HANDLE(anv_buffer, count_buffer, _countBuffer); struct anv_cmd_state *cmd_state = &cmd_buffer->state; - struct anv_pipeline *pipeline = cmd_state->gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_state->gfx.pipeline; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) @@ -3960,7 +3960,7 @@ void genX(CmdDrawIndexedIndirectCount)( ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); ANV_FROM_HANDLE(anv_buffer, count_buffer, _countBuffer); struct anv_cmd_state *cmd_state = &cmd_buffer->state; - struct anv_pipeline *pipeline = cmd_state->gfx.base.pipeline; + struct anv_graphics_pipeline *pipeline = cmd_state->gfx.pipeline; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) @@ -4118,11 +4118,11 @@ void genX(CmdEndTransformFeedbackEXT)( void genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer) { - struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline; + struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline; - assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT); + assert(pipeline->cs); - genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->l3_config); + genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->base.l3_config); genX(flush_pipeline_select_gpgpu)(cmd_buffer); @@ -4138,7 +4138,7 @@ genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT; genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); - anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch); + anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->base.batch); /* The workgroup size of the pipeline affects our push constant layout * so flag push constants as dirty if we change the pipeline. @@ -4254,7 +4254,7 @@ void genX(CmdDispatchBase)( uint32_t groupCountZ) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); - struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline; + struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline; const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline); anv_cmd_buffer_push_base_group_id(cmd_buffer, baseGroupX, @@ -4311,7 +4311,7 @@ void genX(CmdDispatchIndirect)( { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); - struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline; + struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline; const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline); struct anv_address addr = anv_address_add(buffer->address, offset); struct anv_batch *batch = &cmd_buffer->batch; diff --git a/src/intel/vulkan/genX_pipeline.c b/src/intel/vulkan/genX_pipeline.c index 95fabdaf158..d65fd216636 100644 --- a/src/intel/vulkan/genX_pipeline.c +++ b/src/intel/vulkan/genX_pipeline.c @@ -85,7 +85,7 @@ vertex_element_comp_control(enum isl_format format, unsigned comp) } static void -emit_vertex_input(struct anv_pipeline *pipeline, +emit_vertex_input(struct anv_graphics_pipeline *pipeline, const VkPipelineVertexInputStateCreateInfo *info) { const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); @@ -111,7 +111,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, uint32_t *p; const uint32_t num_dwords = 1 + total_elems * 2; - p = anv_batch_emitn(&pipeline->batch, num_dwords, + p = anv_batch_emitn(&pipeline->base.batch, num_dwords, GENX(3DSTATE_VERTEX_ELEMENTS)); if (!p) return; @@ -147,7 +147,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) { const VkVertexInputAttributeDescription *desc = &info->pVertexAttributeDescriptions[i]; - enum isl_format format = anv_get_isl_format(&pipeline->device->info, + enum isl_format format = anv_get_isl_format(&pipeline->base.device->info, desc->format, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_TILING_LINEAR); @@ -180,7 +180,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, * that controls instancing. On Haswell and prior, that's part of * VERTEX_BUFFER_STATE which we emit later. */ - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_INSTANCING), vfi) { vfi.InstancingEnable = pipeline->vb[desc->binding].instanced; vfi.VertexElementIndex = slot; vfi.InstanceDataStepRate = @@ -222,7 +222,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, } #if GEN_GEN >= 8 - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS), sgvs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_SGVS), sgvs) { sgvs.VertexIDEnable = vs_prog_data->uses_vertexid; sgvs.VertexIDComponentNumber = 2; sgvs.VertexIDElementOffset = id_slot; @@ -248,7 +248,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, &element); #if GEN_GEN >= 8 - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_INSTANCING), vfi) { vfi.VertexElementIndex = drawid_slot; } #endif @@ -299,7 +299,7 @@ genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch, } static void -emit_urb_setup(struct anv_pipeline *pipeline, +emit_urb_setup(struct anv_graphics_pipeline *pipeline, enum gen_urb_deref_block_size *deref_block_size) { unsigned entry_size[4]; @@ -311,21 +311,21 @@ emit_urb_setup(struct anv_pipeline *pipeline, entry_size[i] = prog_data ? prog_data->urb_entry_size : 1; } - genX(emit_urb_setup)(pipeline->device, &pipeline->batch, - pipeline->l3_config, + genX(emit_urb_setup)(pipeline->base.device, &pipeline->base.batch, + pipeline->base.l3_config, pipeline->active_stages, entry_size, deref_block_size); } static void -emit_3dstate_sbe(struct anv_pipeline *pipeline) +emit_3dstate_sbe(struct anv_graphics_pipeline *pipeline) { const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_SBE), sbe); #if GEN_GEN >= 8 - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ), sbe); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_SBE_SWIZ), sbe); #endif return; } @@ -411,17 +411,17 @@ emit_3dstate_sbe(struct anv_pipeline *pipeline) sbe.ForceVertexURBEntryReadLength = true; #endif - uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch, + uint32_t *dw = anv_batch_emit_dwords(&pipeline->base.batch, GENX(3DSTATE_SBE_length)); if (!dw) return; - GENX(3DSTATE_SBE_pack)(&pipeline->batch, dw, &sbe); + GENX(3DSTATE_SBE_pack)(&pipeline->base.batch, dw, &sbe); #if GEN_GEN >= 8 - dw = anv_batch_emit_dwords(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ_length)); + dw = anv_batch_emit_dwords(&pipeline->base.batch, GENX(3DSTATE_SBE_SWIZ_length)); if (!dw) return; - GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz); + GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->base.batch, dw, &swiz); #endif } @@ -468,7 +468,7 @@ vk_line_rasterization_mode(const VkPipelineRasterizationLineStateCreateInfoEXT * * different shader stages which might generate their own type of primitives. */ static VkPolygonMode -anv_raster_polygon_mode(struct anv_pipeline *pipeline, +anv_raster_polygon_mode(struct anv_graphics_pipeline *pipeline, const VkPipelineInputAssemblyStateCreateInfo *ia_info, const VkPipelineRasterizationStateCreateInfo *rs_info) { @@ -531,7 +531,7 @@ anv_raster_polygon_mode(struct anv_pipeline *pipeline, #if GEN_GEN <= 7 static uint32_t -gen7_ms_rast_mode(struct anv_pipeline *pipeline, +gen7_ms_rast_mode(struct anv_graphics_pipeline *pipeline, const VkPipelineInputAssemblyStateCreateInfo *ia_info, const VkPipelineRasterizationStateCreateInfo *rs_info, const VkPipelineMultisampleStateCreateInfo *ms_info) @@ -562,7 +562,7 @@ gen7_ms_rast_mode(struct anv_pipeline *pipeline, #endif static void -emit_rs_state(struct anv_pipeline *pipeline, +emit_rs_state(struct anv_graphics_pipeline *pipeline, const VkPipelineInputAssemblyStateCreateInfo *ia_info, const VkPipelineRasterizationStateCreateInfo *rs_info, const VkPipelineMultisampleStateCreateInfo *ms_info, @@ -693,7 +693,7 @@ emit_rs_state(struct anv_pipeline *pipeline, assert(vk_format_is_depth_or_stencil(vk_format)); if (vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT) { enum isl_format isl_format = - anv_get_isl_format(&pipeline->device->info, vk_format, + anv_get_isl_format(&pipeline->base.device->info, vk_format, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_TILING_OPTIMAL); sf.DepthBufferSurfaceFormat = @@ -712,7 +712,7 @@ emit_rs_state(struct anv_pipeline *pipeline, } static void -emit_ms_state(struct anv_pipeline *pipeline, +emit_ms_state(struct anv_graphics_pipeline *pipeline, const VkPipelineMultisampleStateCreateInfo *info) { uint32_t samples = 1; @@ -738,7 +738,7 @@ emit_ms_state(struct anv_pipeline *pipeline, if (info && info->pSampleMask) sample_mask &= info->pSampleMask[0]; - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_MULTISAMPLE), ms) { ms.NumberofMultisamples = log2_samples; ms.PixelLocation = CENTER; @@ -770,7 +770,7 @@ emit_ms_state(struct anv_pipeline *pipeline, #endif } - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_SAMPLE_MASK), sm) { sm.SampleMask = sample_mask; } } @@ -978,7 +978,7 @@ sanitize_ds_state(VkPipelineDepthStencilStateCreateInfo *state, } static void -emit_ds_state(struct anv_pipeline *pipeline, +emit_ds_state(struct anv_graphics_pipeline *pipeline, const VkPipelineDepthStencilStateCreateInfo *pCreateInfo, const struct anv_render_pass *pass, const struct anv_subpass *subpass) @@ -1056,11 +1056,11 @@ is_dual_src_blend_factor(VkBlendFactor factor) } static void -emit_cb_state(struct anv_pipeline *pipeline, +emit_cb_state(struct anv_graphics_pipeline *pipeline, const VkPipelineColorBlendStateCreateInfo *info, const VkPipelineMultisampleStateCreateInfo *ms_info) { - struct anv_device *device = pipeline->device; + struct anv_device *device = pipeline->base.device; const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); struct GENX(BLEND_STATE) blend_state = { @@ -1201,7 +1201,7 @@ emit_cb_state(struct anv_pipeline *pipeline, } #if GEN_GEN >= 8 - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS_BLEND), blend) { blend.AlphaToCoverageEnable = blend_state.AlphaToCoverageEnable; blend.HasWriteableRT = has_writeable_rt; blend.ColorBufferBlendEnable = bs0.ColorBufferBlendEnable; @@ -1219,7 +1219,7 @@ emit_cb_state(struct anv_pipeline *pipeline, GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { bsp.BlendStatePointer = pipeline->blend_state.offset; #if GEN_GEN >= 8 bsp.BlendStatePointerValid = true; @@ -1228,14 +1228,14 @@ emit_cb_state(struct anv_pipeline *pipeline, } static void -emit_3dstate_clip(struct anv_pipeline *pipeline, +emit_3dstate_clip(struct anv_graphics_pipeline *pipeline, const VkPipelineInputAssemblyStateCreateInfo *ia_info, const VkPipelineViewportStateCreateInfo *vp_info, const VkPipelineRasterizationStateCreateInfo *rs_info) { const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); (void) wm_prog_data; - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_CLIP), clip) { clip.ClipEnable = true; clip.StatisticsEnable = true; clip.EarlyCullEnable = true; @@ -1302,7 +1302,7 @@ emit_3dstate_clip(struct anv_pipeline *pipeline, } static void -emit_3dstate_streamout(struct anv_pipeline *pipeline, +emit_3dstate_streamout(struct anv_graphics_pipeline *pipeline, const VkPipelineRasterizationStateCreateInfo *rs_info) { #if GEN_GEN >= 8 @@ -1319,7 +1319,7 @@ emit_3dstate_streamout(struct anv_pipeline *pipeline, xfb_info = pipeline->shaders[MESA_SHADER_VERTEX]->xfb_info; #endif - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_STREAMOUT), so) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_STREAMOUT), so) { so.RenderingDisable = rs_info->rasterizerDiscardEnable; #if GEN_GEN >= 8 @@ -1435,7 +1435,7 @@ emit_3dstate_streamout(struct anv_pipeline *pipeline, sbs[xfb_info->buffer_to_stream[b]] |= 1 << b; } - uint32_t *dw = anv_batch_emitn(&pipeline->batch, 3 + 2 * max_decls, + uint32_t *dw = anv_batch_emitn(&pipeline->base.batch, 3 + 2 * max_decls, GENX(3DSTATE_SO_DECL_LIST), .StreamtoBufferSelects0 = sbs[0], .StreamtoBufferSelects1 = sbs[1], @@ -1497,16 +1497,16 @@ get_scratch_space(const struct anv_shader_bin *bin) } static void -emit_3dstate_vs(struct anv_pipeline *pipeline) +emit_3dstate_vs(struct anv_graphics_pipeline *pipeline) { - const struct gen_device_info *devinfo = &pipeline->device->info; + const struct gen_device_info *devinfo = &pipeline->base.device->info; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); const struct anv_shader_bin *vs_bin = pipeline->shaders[MESA_SHADER_VERTEX]; assert(anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX)); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VS), vs) { vs.Enable = true; vs.StatisticsEnable = true; vs.KernelStartPointer = vs_bin->kernel.offset; @@ -1568,22 +1568,22 @@ emit_3dstate_vs(struct anv_pipeline *pipeline) vs.PerThreadScratchSpace = get_scratch_space(vs_bin); vs.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_VERTEX, vs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_VERTEX, vs_bin); } } static void -emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, +emit_3dstate_hs_te_ds(struct anv_graphics_pipeline *pipeline, const VkPipelineTessellationStateCreateInfo *tess_info) { if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_HS), hs); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_TE), te); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_DS), ds); return; } - const struct gen_device_info *devinfo = &pipeline->device->info; + const struct gen_device_info *devinfo = &pipeline->base.device->info; const struct anv_shader_bin *tcs_bin = pipeline->shaders[MESA_SHADER_TESS_CTRL]; const struct anv_shader_bin *tes_bin = @@ -1592,7 +1592,7 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, const struct brw_tcs_prog_data *tcs_prog_data = get_tcs_prog_data(pipeline); const struct brw_tes_prog_data *tes_prog_data = get_tes_prog_data(pipeline); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_HS), hs) { hs.Enable = true; hs.StatisticsEnable = true; hs.KernelStartPointer = tcs_bin->kernel.offset; @@ -1621,7 +1621,7 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, hs.PerThreadScratchSpace = get_scratch_space(tcs_bin); hs.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_TESS_CTRL, tcs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_TESS_CTRL, tcs_bin); #if GEN_GEN >= 9 hs.DispatchMode = tcs_prog_data->base.dispatch_mode; @@ -1636,7 +1636,7 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, domain_origin_state ? domain_origin_state->domainOrigin : VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT; - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_TE), te) { te.Partitioning = tes_prog_data->partitioning; if (uv_origin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT) { @@ -1658,7 +1658,7 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, te.MaximumTessellationFactorNotOdd = 64.0; } - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_DS), ds) { ds.Enable = true; ds.StatisticsEnable = true; ds.KernelStartPointer = tes_bin->kernel.offset; @@ -1694,25 +1694,25 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, ds.PerThreadScratchSpace = get_scratch_space(tes_bin); ds.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_TESS_EVAL, tes_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_TESS_EVAL, tes_bin); } } static void -emit_3dstate_gs(struct anv_pipeline *pipeline) +emit_3dstate_gs(struct anv_graphics_pipeline *pipeline) { - const struct gen_device_info *devinfo = &pipeline->device->info; + const struct gen_device_info *devinfo = &pipeline->base.device->info; const struct anv_shader_bin *gs_bin = pipeline->shaders[MESA_SHADER_GEOMETRY]; if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_GS), gs); return; } const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_GS), gs) { gs.Enable = true; gs.StatisticsEnable = true; gs.KernelStartPointer = gs_bin->kernel.offset; @@ -1762,12 +1762,12 @@ emit_3dstate_gs(struct anv_pipeline *pipeline) gs.PerThreadScratchSpace = get_scratch_space(gs_bin); gs.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_GEOMETRY, gs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_GEOMETRY, gs_bin); } } static bool -has_color_buffer_write_enabled(const struct anv_pipeline *pipeline, +has_color_buffer_write_enabled(const struct anv_graphics_pipeline *pipeline, const VkPipelineColorBlendStateCreateInfo *blend) { const struct anv_shader_bin *shader_bin = @@ -1793,7 +1793,7 @@ has_color_buffer_write_enabled(const struct anv_pipeline *pipeline, } static void -emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass, +emit_3dstate_wm(struct anv_graphics_pipeline *pipeline, struct anv_subpass *subpass, const VkPipelineInputAssemblyStateCreateInfo *ia, const VkPipelineRasterizationStateCreateInfo *raster, const VkPipelineColorBlendStateCreateInfo *blend, @@ -1802,7 +1802,7 @@ emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass, { const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_WM), wm) { wm.StatisticsEnable = true; wm.LineEndCapAntialiasingRegionWidth = _05pixels; wm.LineAntialiasingRegionWidth = _10pixels; @@ -1883,16 +1883,16 @@ emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass, } static void -emit_3dstate_ps(struct anv_pipeline *pipeline, +emit_3dstate_ps(struct anv_graphics_pipeline *pipeline, const VkPipelineColorBlendStateCreateInfo *blend, const VkPipelineMultisampleStateCreateInfo *multisample) { - UNUSED const struct gen_device_info *devinfo = &pipeline->device->info; + UNUSED const struct gen_device_info *devinfo = &pipeline->base.device->info; const struct anv_shader_bin *fs_bin = pipeline->shaders[MESA_SHADER_FRAGMENT]; if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS), ps) { #if GEN_GEN == 7 /* Even if no fragments are ever dispatched, gen7 hardware hangs if * we don't at least set the maximum number of threads. @@ -1927,7 +1927,7 @@ emit_3dstate_ps(struct anv_pipeline *pipeline, } #endif - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS), ps) { ps._8PixelDispatchEnable = wm_prog_data->dispatch_8; ps._16PixelDispatchEnable = wm_prog_data->dispatch_16; ps._32PixelDispatchEnable = wm_prog_data->dispatch_32; @@ -1992,23 +1992,23 @@ emit_3dstate_ps(struct anv_pipeline *pipeline, ps.PerThreadScratchSpace = get_scratch_space(fs_bin); ps.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_FRAGMENT, fs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_FRAGMENT, fs_bin); } } #if GEN_GEN >= 8 static void -emit_3dstate_ps_extra(struct anv_pipeline *pipeline, +emit_3dstate_ps_extra(struct anv_graphics_pipeline *pipeline, struct anv_subpass *subpass) { const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS_EXTRA), ps); return; } - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS_EXTRA), ps) { ps.PixelShaderValid = true; ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0; ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask; @@ -2044,24 +2044,24 @@ emit_3dstate_ps_extra(struct anv_pipeline *pipeline, } static void -emit_3dstate_vf_topology(struct anv_pipeline *pipeline) +emit_3dstate_vf_topology(struct anv_graphics_pipeline *pipeline) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_TOPOLOGY), vft) { vft.PrimitiveTopologyType = pipeline->topology; } } #endif static void -emit_3dstate_vf_statistics(struct anv_pipeline *pipeline) +emit_3dstate_vf_statistics(struct anv_graphics_pipeline *pipeline) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_STATISTICS), vfs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_STATISTICS), vfs) { vfs.StatisticsEnable = true; } } static void -compute_kill_pixel(struct anv_pipeline *pipeline, +compute_kill_pixel(struct anv_graphics_pipeline *pipeline, const VkPipelineMultisampleStateCreateInfo *ms_info, const struct anv_subpass *subpass) { @@ -2103,7 +2103,7 @@ genX(graphics_pipeline_create)( ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass); struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass]; - struct anv_pipeline *pipeline; + struct anv_graphics_pipeline *pipeline; VkResult result; assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO); @@ -2201,9 +2201,9 @@ genX(graphics_pipeline_create)( #endif emit_3dstate_vf_statistics(pipeline); - *pPipeline = anv_pipeline_to_handle(pipeline); + *pPipeline = anv_pipeline_to_handle(&pipeline->base); - return pipeline->batch.status; + return pipeline->base.batch.status; } static VkResult @@ -2216,7 +2216,7 @@ compute_pipeline_create( { ANV_FROM_HANDLE(anv_device, device, _device); const struct gen_device_info *devinfo = &device->info; - struct anv_pipeline *pipeline; + struct anv_compute_pipeline *pipeline; VkResult result; assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO); @@ -2230,46 +2230,43 @@ compute_pipeline_create( if (pipeline == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - pipeline->device = device; - pipeline->type = ANV_PIPELINE_COMPUTE; - - pipeline->blend_state.map = NULL; + pipeline->base.device = device; + pipeline->base.type = ANV_PIPELINE_COMPUTE; const VkAllocationCallbacks *alloc = pAllocator ? pAllocator : &device->alloc; - result = anv_reloc_list_init(&pipeline->batch_relocs, alloc); + result = anv_reloc_list_init(&pipeline->base.batch_relocs, alloc); if (result != VK_SUCCESS) { vk_free2(&device->alloc, pAllocator, pipeline); return result; } - pipeline->batch.alloc = alloc; - pipeline->batch.next = pipeline->batch.start = pipeline->batch_data; - pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data); - pipeline->batch.relocs = &pipeline->batch_relocs; - pipeline->batch.status = VK_SUCCESS; - - pipeline->mem_ctx = ralloc_context(NULL); - pipeline->flags = pCreateInfo->flags; + pipeline->base.batch.alloc = alloc; + pipeline->base.batch.next = pipeline->base.batch.start = pipeline->base.batch_data; + pipeline->base.batch.end = pipeline->base.batch.start + sizeof(pipeline->base.batch_data); + pipeline->base.batch.relocs = &pipeline->base.batch_relocs; + pipeline->base.batch.status = VK_SUCCESS; + + pipeline->base.mem_ctx = ralloc_context(NULL); + pipeline->base.flags = pCreateInfo->flags; pipeline->cs = NULL; - util_dynarray_init(&pipeline->executables, pipeline->mem_ctx); + util_dynarray_init(&pipeline->base.executables, pipeline->base.mem_ctx); assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT); - pipeline->active_stages |= VK_SHADER_STAGE_COMPUTE_BIT; ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->stage.module); result = anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module, pCreateInfo->stage.pName, pCreateInfo->stage.pSpecializationInfo); if (result != VK_SUCCESS) { - ralloc_free(pipeline->mem_ctx); + ralloc_free(pipeline->base.mem_ctx); vk_free2(&device->alloc, pAllocator, pipeline); return result; } const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline); - anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0); + anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0); uint32_t group_size = cs_prog_data->local_size[0] * cs_prog_data->local_size[1] * cs_prog_data->local_size[2]; @@ -2288,7 +2285,7 @@ compute_pipeline_create( const struct anv_shader_bin *cs_bin = pipeline->cs; - anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) { + anv_batch_emit(&pipeline->base.batch, GENX(MEDIA_VFE_STATE), vfe) { #if GEN_GEN > 7 vfe.StackSize = 0; #else @@ -2327,7 +2324,7 @@ compute_pipeline_create( cs_bin->prog_data->total_scratch / 1024 - 1; } vfe.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_COMPUTE, cs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_COMPUTE, cs_bin); } } @@ -2370,9 +2367,9 @@ compute_pipeline_create( pipeline->interface_descriptor_data, &desc); - *pPipeline = anv_pipeline_to_handle(pipeline); + *pPipeline = anv_pipeline_to_handle(&pipeline->base); - return pipeline->batch.status; + return pipeline->base.batch.status; } VkResult genX(CreateGraphicsPipelines)( |