diff options
author | Caio Marcelo de Oliveira Filho <[email protected]> | 2020-03-03 15:31:50 -0800 |
---|---|---|
committer | Caio Marcelo de Oliveira Filho <[email protected]> | 2020-03-12 13:18:54 -0700 |
commit | 925df46b7e00ee7ca4128ef2cc2f3fad68ce8d4e (patch) | |
tree | ad0d309323c449d669a5f46c34f774ccf89527aa /src/intel/vulkan/genX_pipeline.c | |
parent | af33f0d767a72dfd89246947d89d28d3157b4f59 (diff) |
anv: Split graphics and compute bits from anv_pipeline
Add two new structs that use the anv_pipeline as base. Changed all
functions that work on a specific pipeline to use the corresponding
struct.
Reviewed-by: Jason Ekstrand <[email protected]>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4040>
Diffstat (limited to 'src/intel/vulkan/genX_pipeline.c')
-rw-r--r-- | src/intel/vulkan/genX_pipeline.c | 183 |
1 files changed, 90 insertions, 93 deletions
diff --git a/src/intel/vulkan/genX_pipeline.c b/src/intel/vulkan/genX_pipeline.c index 95fabdaf158..d65fd216636 100644 --- a/src/intel/vulkan/genX_pipeline.c +++ b/src/intel/vulkan/genX_pipeline.c @@ -85,7 +85,7 @@ vertex_element_comp_control(enum isl_format format, unsigned comp) } static void -emit_vertex_input(struct anv_pipeline *pipeline, +emit_vertex_input(struct anv_graphics_pipeline *pipeline, const VkPipelineVertexInputStateCreateInfo *info) { const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); @@ -111,7 +111,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, uint32_t *p; const uint32_t num_dwords = 1 + total_elems * 2; - p = anv_batch_emitn(&pipeline->batch, num_dwords, + p = anv_batch_emitn(&pipeline->base.batch, num_dwords, GENX(3DSTATE_VERTEX_ELEMENTS)); if (!p) return; @@ -147,7 +147,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) { const VkVertexInputAttributeDescription *desc = &info->pVertexAttributeDescriptions[i]; - enum isl_format format = anv_get_isl_format(&pipeline->device->info, + enum isl_format format = anv_get_isl_format(&pipeline->base.device->info, desc->format, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_TILING_LINEAR); @@ -180,7 +180,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, * that controls instancing. On Haswell and prior, that's part of * VERTEX_BUFFER_STATE which we emit later. */ - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_INSTANCING), vfi) { vfi.InstancingEnable = pipeline->vb[desc->binding].instanced; vfi.VertexElementIndex = slot; vfi.InstanceDataStepRate = @@ -222,7 +222,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, } #if GEN_GEN >= 8 - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS), sgvs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_SGVS), sgvs) { sgvs.VertexIDEnable = vs_prog_data->uses_vertexid; sgvs.VertexIDComponentNumber = 2; sgvs.VertexIDElementOffset = id_slot; @@ -248,7 +248,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, &element); #if GEN_GEN >= 8 - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_INSTANCING), vfi) { vfi.VertexElementIndex = drawid_slot; } #endif @@ -299,7 +299,7 @@ genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch, } static void -emit_urb_setup(struct anv_pipeline *pipeline, +emit_urb_setup(struct anv_graphics_pipeline *pipeline, enum gen_urb_deref_block_size *deref_block_size) { unsigned entry_size[4]; @@ -311,21 +311,21 @@ emit_urb_setup(struct anv_pipeline *pipeline, entry_size[i] = prog_data ? prog_data->urb_entry_size : 1; } - genX(emit_urb_setup)(pipeline->device, &pipeline->batch, - pipeline->l3_config, + genX(emit_urb_setup)(pipeline->base.device, &pipeline->base.batch, + pipeline->base.l3_config, pipeline->active_stages, entry_size, deref_block_size); } static void -emit_3dstate_sbe(struct anv_pipeline *pipeline) +emit_3dstate_sbe(struct anv_graphics_pipeline *pipeline) { const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_SBE), sbe); #if GEN_GEN >= 8 - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ), sbe); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_SBE_SWIZ), sbe); #endif return; } @@ -411,17 +411,17 @@ emit_3dstate_sbe(struct anv_pipeline *pipeline) sbe.ForceVertexURBEntryReadLength = true; #endif - uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch, + uint32_t *dw = anv_batch_emit_dwords(&pipeline->base.batch, GENX(3DSTATE_SBE_length)); if (!dw) return; - GENX(3DSTATE_SBE_pack)(&pipeline->batch, dw, &sbe); + GENX(3DSTATE_SBE_pack)(&pipeline->base.batch, dw, &sbe); #if GEN_GEN >= 8 - dw = anv_batch_emit_dwords(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ_length)); + dw = anv_batch_emit_dwords(&pipeline->base.batch, GENX(3DSTATE_SBE_SWIZ_length)); if (!dw) return; - GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz); + GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->base.batch, dw, &swiz); #endif } @@ -468,7 +468,7 @@ vk_line_rasterization_mode(const VkPipelineRasterizationLineStateCreateInfoEXT * * different shader stages which might generate their own type of primitives. */ static VkPolygonMode -anv_raster_polygon_mode(struct anv_pipeline *pipeline, +anv_raster_polygon_mode(struct anv_graphics_pipeline *pipeline, const VkPipelineInputAssemblyStateCreateInfo *ia_info, const VkPipelineRasterizationStateCreateInfo *rs_info) { @@ -531,7 +531,7 @@ anv_raster_polygon_mode(struct anv_pipeline *pipeline, #if GEN_GEN <= 7 static uint32_t -gen7_ms_rast_mode(struct anv_pipeline *pipeline, +gen7_ms_rast_mode(struct anv_graphics_pipeline *pipeline, const VkPipelineInputAssemblyStateCreateInfo *ia_info, const VkPipelineRasterizationStateCreateInfo *rs_info, const VkPipelineMultisampleStateCreateInfo *ms_info) @@ -562,7 +562,7 @@ gen7_ms_rast_mode(struct anv_pipeline *pipeline, #endif static void -emit_rs_state(struct anv_pipeline *pipeline, +emit_rs_state(struct anv_graphics_pipeline *pipeline, const VkPipelineInputAssemblyStateCreateInfo *ia_info, const VkPipelineRasterizationStateCreateInfo *rs_info, const VkPipelineMultisampleStateCreateInfo *ms_info, @@ -693,7 +693,7 @@ emit_rs_state(struct anv_pipeline *pipeline, assert(vk_format_is_depth_or_stencil(vk_format)); if (vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT) { enum isl_format isl_format = - anv_get_isl_format(&pipeline->device->info, vk_format, + anv_get_isl_format(&pipeline->base.device->info, vk_format, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_TILING_OPTIMAL); sf.DepthBufferSurfaceFormat = @@ -712,7 +712,7 @@ emit_rs_state(struct anv_pipeline *pipeline, } static void -emit_ms_state(struct anv_pipeline *pipeline, +emit_ms_state(struct anv_graphics_pipeline *pipeline, const VkPipelineMultisampleStateCreateInfo *info) { uint32_t samples = 1; @@ -738,7 +738,7 @@ emit_ms_state(struct anv_pipeline *pipeline, if (info && info->pSampleMask) sample_mask &= info->pSampleMask[0]; - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_MULTISAMPLE), ms) { ms.NumberofMultisamples = log2_samples; ms.PixelLocation = CENTER; @@ -770,7 +770,7 @@ emit_ms_state(struct anv_pipeline *pipeline, #endif } - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_SAMPLE_MASK), sm) { sm.SampleMask = sample_mask; } } @@ -978,7 +978,7 @@ sanitize_ds_state(VkPipelineDepthStencilStateCreateInfo *state, } static void -emit_ds_state(struct anv_pipeline *pipeline, +emit_ds_state(struct anv_graphics_pipeline *pipeline, const VkPipelineDepthStencilStateCreateInfo *pCreateInfo, const struct anv_render_pass *pass, const struct anv_subpass *subpass) @@ -1056,11 +1056,11 @@ is_dual_src_blend_factor(VkBlendFactor factor) } static void -emit_cb_state(struct anv_pipeline *pipeline, +emit_cb_state(struct anv_graphics_pipeline *pipeline, const VkPipelineColorBlendStateCreateInfo *info, const VkPipelineMultisampleStateCreateInfo *ms_info) { - struct anv_device *device = pipeline->device; + struct anv_device *device = pipeline->base.device; const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); struct GENX(BLEND_STATE) blend_state = { @@ -1201,7 +1201,7 @@ emit_cb_state(struct anv_pipeline *pipeline, } #if GEN_GEN >= 8 - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS_BLEND), blend) { blend.AlphaToCoverageEnable = blend_state.AlphaToCoverageEnable; blend.HasWriteableRT = has_writeable_rt; blend.ColorBufferBlendEnable = bs0.ColorBufferBlendEnable; @@ -1219,7 +1219,7 @@ emit_cb_state(struct anv_pipeline *pipeline, GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { bsp.BlendStatePointer = pipeline->blend_state.offset; #if GEN_GEN >= 8 bsp.BlendStatePointerValid = true; @@ -1228,14 +1228,14 @@ emit_cb_state(struct anv_pipeline *pipeline, } static void -emit_3dstate_clip(struct anv_pipeline *pipeline, +emit_3dstate_clip(struct anv_graphics_pipeline *pipeline, const VkPipelineInputAssemblyStateCreateInfo *ia_info, const VkPipelineViewportStateCreateInfo *vp_info, const VkPipelineRasterizationStateCreateInfo *rs_info) { const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); (void) wm_prog_data; - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_CLIP), clip) { clip.ClipEnable = true; clip.StatisticsEnable = true; clip.EarlyCullEnable = true; @@ -1302,7 +1302,7 @@ emit_3dstate_clip(struct anv_pipeline *pipeline, } static void -emit_3dstate_streamout(struct anv_pipeline *pipeline, +emit_3dstate_streamout(struct anv_graphics_pipeline *pipeline, const VkPipelineRasterizationStateCreateInfo *rs_info) { #if GEN_GEN >= 8 @@ -1319,7 +1319,7 @@ emit_3dstate_streamout(struct anv_pipeline *pipeline, xfb_info = pipeline->shaders[MESA_SHADER_VERTEX]->xfb_info; #endif - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_STREAMOUT), so) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_STREAMOUT), so) { so.RenderingDisable = rs_info->rasterizerDiscardEnable; #if GEN_GEN >= 8 @@ -1435,7 +1435,7 @@ emit_3dstate_streamout(struct anv_pipeline *pipeline, sbs[xfb_info->buffer_to_stream[b]] |= 1 << b; } - uint32_t *dw = anv_batch_emitn(&pipeline->batch, 3 + 2 * max_decls, + uint32_t *dw = anv_batch_emitn(&pipeline->base.batch, 3 + 2 * max_decls, GENX(3DSTATE_SO_DECL_LIST), .StreamtoBufferSelects0 = sbs[0], .StreamtoBufferSelects1 = sbs[1], @@ -1497,16 +1497,16 @@ get_scratch_space(const struct anv_shader_bin *bin) } static void -emit_3dstate_vs(struct anv_pipeline *pipeline) +emit_3dstate_vs(struct anv_graphics_pipeline *pipeline) { - const struct gen_device_info *devinfo = &pipeline->device->info; + const struct gen_device_info *devinfo = &pipeline->base.device->info; const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); const struct anv_shader_bin *vs_bin = pipeline->shaders[MESA_SHADER_VERTEX]; assert(anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX)); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VS), vs) { vs.Enable = true; vs.StatisticsEnable = true; vs.KernelStartPointer = vs_bin->kernel.offset; @@ -1568,22 +1568,22 @@ emit_3dstate_vs(struct anv_pipeline *pipeline) vs.PerThreadScratchSpace = get_scratch_space(vs_bin); vs.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_VERTEX, vs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_VERTEX, vs_bin); } } static void -emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, +emit_3dstate_hs_te_ds(struct anv_graphics_pipeline *pipeline, const VkPipelineTessellationStateCreateInfo *tess_info) { if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_HS), hs); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_TE), te); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_DS), ds); return; } - const struct gen_device_info *devinfo = &pipeline->device->info; + const struct gen_device_info *devinfo = &pipeline->base.device->info; const struct anv_shader_bin *tcs_bin = pipeline->shaders[MESA_SHADER_TESS_CTRL]; const struct anv_shader_bin *tes_bin = @@ -1592,7 +1592,7 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, const struct brw_tcs_prog_data *tcs_prog_data = get_tcs_prog_data(pipeline); const struct brw_tes_prog_data *tes_prog_data = get_tes_prog_data(pipeline); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_HS), hs) { hs.Enable = true; hs.StatisticsEnable = true; hs.KernelStartPointer = tcs_bin->kernel.offset; @@ -1621,7 +1621,7 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, hs.PerThreadScratchSpace = get_scratch_space(tcs_bin); hs.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_TESS_CTRL, tcs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_TESS_CTRL, tcs_bin); #if GEN_GEN >= 9 hs.DispatchMode = tcs_prog_data->base.dispatch_mode; @@ -1636,7 +1636,7 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, domain_origin_state ? domain_origin_state->domainOrigin : VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT; - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_TE), te) { te.Partitioning = tes_prog_data->partitioning; if (uv_origin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT) { @@ -1658,7 +1658,7 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, te.MaximumTessellationFactorNotOdd = 64.0; } - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_DS), ds) { ds.Enable = true; ds.StatisticsEnable = true; ds.KernelStartPointer = tes_bin->kernel.offset; @@ -1694,25 +1694,25 @@ emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, ds.PerThreadScratchSpace = get_scratch_space(tes_bin); ds.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_TESS_EVAL, tes_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_TESS_EVAL, tes_bin); } } static void -emit_3dstate_gs(struct anv_pipeline *pipeline) +emit_3dstate_gs(struct anv_graphics_pipeline *pipeline) { - const struct gen_device_info *devinfo = &pipeline->device->info; + const struct gen_device_info *devinfo = &pipeline->base.device->info; const struct anv_shader_bin *gs_bin = pipeline->shaders[MESA_SHADER_GEOMETRY]; if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_GS), gs); return; } const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_GS), gs) { gs.Enable = true; gs.StatisticsEnable = true; gs.KernelStartPointer = gs_bin->kernel.offset; @@ -1762,12 +1762,12 @@ emit_3dstate_gs(struct anv_pipeline *pipeline) gs.PerThreadScratchSpace = get_scratch_space(gs_bin); gs.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_GEOMETRY, gs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_GEOMETRY, gs_bin); } } static bool -has_color_buffer_write_enabled(const struct anv_pipeline *pipeline, +has_color_buffer_write_enabled(const struct anv_graphics_pipeline *pipeline, const VkPipelineColorBlendStateCreateInfo *blend) { const struct anv_shader_bin *shader_bin = @@ -1793,7 +1793,7 @@ has_color_buffer_write_enabled(const struct anv_pipeline *pipeline, } static void -emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass, +emit_3dstate_wm(struct anv_graphics_pipeline *pipeline, struct anv_subpass *subpass, const VkPipelineInputAssemblyStateCreateInfo *ia, const VkPipelineRasterizationStateCreateInfo *raster, const VkPipelineColorBlendStateCreateInfo *blend, @@ -1802,7 +1802,7 @@ emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass, { const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_WM), wm) { wm.StatisticsEnable = true; wm.LineEndCapAntialiasingRegionWidth = _05pixels; wm.LineAntialiasingRegionWidth = _10pixels; @@ -1883,16 +1883,16 @@ emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass, } static void -emit_3dstate_ps(struct anv_pipeline *pipeline, +emit_3dstate_ps(struct anv_graphics_pipeline *pipeline, const VkPipelineColorBlendStateCreateInfo *blend, const VkPipelineMultisampleStateCreateInfo *multisample) { - UNUSED const struct gen_device_info *devinfo = &pipeline->device->info; + UNUSED const struct gen_device_info *devinfo = &pipeline->base.device->info; const struct anv_shader_bin *fs_bin = pipeline->shaders[MESA_SHADER_FRAGMENT]; if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS), ps) { #if GEN_GEN == 7 /* Even if no fragments are ever dispatched, gen7 hardware hangs if * we don't at least set the maximum number of threads. @@ -1927,7 +1927,7 @@ emit_3dstate_ps(struct anv_pipeline *pipeline, } #endif - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS), ps) { ps._8PixelDispatchEnable = wm_prog_data->dispatch_8; ps._16PixelDispatchEnable = wm_prog_data->dispatch_16; ps._32PixelDispatchEnable = wm_prog_data->dispatch_32; @@ -1992,23 +1992,23 @@ emit_3dstate_ps(struct anv_pipeline *pipeline, ps.PerThreadScratchSpace = get_scratch_space(fs_bin); ps.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_FRAGMENT, fs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_FRAGMENT, fs_bin); } } #if GEN_GEN >= 8 static void -emit_3dstate_ps_extra(struct anv_pipeline *pipeline, +emit_3dstate_ps_extra(struct anv_graphics_pipeline *pipeline, struct anv_subpass *subpass) { const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps); + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS_EXTRA), ps); return; } - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS_EXTRA), ps) { ps.PixelShaderValid = true; ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0; ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask; @@ -2044,24 +2044,24 @@ emit_3dstate_ps_extra(struct anv_pipeline *pipeline, } static void -emit_3dstate_vf_topology(struct anv_pipeline *pipeline) +emit_3dstate_vf_topology(struct anv_graphics_pipeline *pipeline) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_TOPOLOGY), vft) { vft.PrimitiveTopologyType = pipeline->topology; } } #endif static void -emit_3dstate_vf_statistics(struct anv_pipeline *pipeline) +emit_3dstate_vf_statistics(struct anv_graphics_pipeline *pipeline) { - anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_STATISTICS), vfs) { + anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_VF_STATISTICS), vfs) { vfs.StatisticsEnable = true; } } static void -compute_kill_pixel(struct anv_pipeline *pipeline, +compute_kill_pixel(struct anv_graphics_pipeline *pipeline, const VkPipelineMultisampleStateCreateInfo *ms_info, const struct anv_subpass *subpass) { @@ -2103,7 +2103,7 @@ genX(graphics_pipeline_create)( ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass); struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass]; - struct anv_pipeline *pipeline; + struct anv_graphics_pipeline *pipeline; VkResult result; assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO); @@ -2201,9 +2201,9 @@ genX(graphics_pipeline_create)( #endif emit_3dstate_vf_statistics(pipeline); - *pPipeline = anv_pipeline_to_handle(pipeline); + *pPipeline = anv_pipeline_to_handle(&pipeline->base); - return pipeline->batch.status; + return pipeline->base.batch.status; } static VkResult @@ -2216,7 +2216,7 @@ compute_pipeline_create( { ANV_FROM_HANDLE(anv_device, device, _device); const struct gen_device_info *devinfo = &device->info; - struct anv_pipeline *pipeline; + struct anv_compute_pipeline *pipeline; VkResult result; assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO); @@ -2230,46 +2230,43 @@ compute_pipeline_create( if (pipeline == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - pipeline->device = device; - pipeline->type = ANV_PIPELINE_COMPUTE; - - pipeline->blend_state.map = NULL; + pipeline->base.device = device; + pipeline->base.type = ANV_PIPELINE_COMPUTE; const VkAllocationCallbacks *alloc = pAllocator ? pAllocator : &device->alloc; - result = anv_reloc_list_init(&pipeline->batch_relocs, alloc); + result = anv_reloc_list_init(&pipeline->base.batch_relocs, alloc); if (result != VK_SUCCESS) { vk_free2(&device->alloc, pAllocator, pipeline); return result; } - pipeline->batch.alloc = alloc; - pipeline->batch.next = pipeline->batch.start = pipeline->batch_data; - pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data); - pipeline->batch.relocs = &pipeline->batch_relocs; - pipeline->batch.status = VK_SUCCESS; - - pipeline->mem_ctx = ralloc_context(NULL); - pipeline->flags = pCreateInfo->flags; + pipeline->base.batch.alloc = alloc; + pipeline->base.batch.next = pipeline->base.batch.start = pipeline->base.batch_data; + pipeline->base.batch.end = pipeline->base.batch.start + sizeof(pipeline->base.batch_data); + pipeline->base.batch.relocs = &pipeline->base.batch_relocs; + pipeline->base.batch.status = VK_SUCCESS; + + pipeline->base.mem_ctx = ralloc_context(NULL); + pipeline->base.flags = pCreateInfo->flags; pipeline->cs = NULL; - util_dynarray_init(&pipeline->executables, pipeline->mem_ctx); + util_dynarray_init(&pipeline->base.executables, pipeline->base.mem_ctx); assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT); - pipeline->active_stages |= VK_SHADER_STAGE_COMPUTE_BIT; ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->stage.module); result = anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module, pCreateInfo->stage.pName, pCreateInfo->stage.pSpecializationInfo); if (result != VK_SUCCESS) { - ralloc_free(pipeline->mem_ctx); + ralloc_free(pipeline->base.mem_ctx); vk_free2(&device->alloc, pAllocator, pipeline); return result; } const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline); - anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0); + anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0); uint32_t group_size = cs_prog_data->local_size[0] * cs_prog_data->local_size[1] * cs_prog_data->local_size[2]; @@ -2288,7 +2285,7 @@ compute_pipeline_create( const struct anv_shader_bin *cs_bin = pipeline->cs; - anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) { + anv_batch_emit(&pipeline->base.batch, GENX(MEDIA_VFE_STATE), vfe) { #if GEN_GEN > 7 vfe.StackSize = 0; #else @@ -2327,7 +2324,7 @@ compute_pipeline_create( cs_bin->prog_data->total_scratch / 1024 - 1; } vfe.ScratchSpaceBasePointer = - get_scratch_address(pipeline, MESA_SHADER_COMPUTE, cs_bin); + get_scratch_address(&pipeline->base, MESA_SHADER_COMPUTE, cs_bin); } } @@ -2370,9 +2367,9 @@ compute_pipeline_create( pipeline->interface_descriptor_data, &desc); - *pPipeline = anv_pipeline_to_handle(pipeline); + *pPipeline = anv_pipeline_to_handle(&pipeline->base); - return pipeline->batch.status; + return pipeline->base.batch.status; } VkResult genX(CreateGraphicsPipelines)( |