diff options
author | Jason Ekstrand <[email protected]> | 2017-06-24 15:14:50 -0700 |
---|---|---|
committer | Jason Ekstrand <[email protected]> | 2017-07-05 14:22:40 -0700 |
commit | 0673bbfd9ba16be81b6ab84a587735496af4fb16 (patch) | |
tree | 80b6f4ec20ce520b1cebfc2d29eb3fbfc5c855a2 /src/mesa | |
parent | 95731b7ccc605bbfe2c3cb3d533219bc0788cbaa (diff) |
i965: Move surface resolves back to draw/dispatch time
This is effectively a revert of 388f02729bbf88ba104f4f8ee1fdf005a240969c
though much code has been added since. Kristian initially moved it to
try and avoid locking problems with meta-based resolves. Now that meta
is gone from the resolve path (for good this time, we hope), we can move
it back. The problem with having it in intel_update_state was that the
UpdateState hook gets called by core mesa directly and all sorts of
things will cause a UpdateState to get called which may trigger resolves
at inopportune times. In particular, it gets called by _mesa_Clear and,
if we have a HiZ buffer in the INVALID_AUX state, causes a HiZ resolve
right before the clear which is pointless. By moving it back to
try_draw_prims time, we know it will only get called right before a draw
which is where we want it.
Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/mesa')
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_compute.c | 2 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_context.c | 121 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_context.h | 2 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_draw.c | 139 |
4 files changed, 143 insertions, 121 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_compute.c b/src/mesa/drivers/dri/i965/brw_compute.c index 80461536359..2867a142f13 100644 --- a/src/mesa/drivers/dri/i965/brw_compute.c +++ b/src/mesa/drivers/dri/i965/brw_compute.c @@ -188,6 +188,8 @@ brw_dispatch_compute_common(struct gl_context *ctx) brw_validate_textures(brw); + brw_predraw_resolve_inputs(brw); + const int sampler_state_size = 16; /* 16 bytes */ estimated_buffer_space_needed = 512; /* batchbuffer commands */ estimated_buffer_space_needed += (BRW_MAX_TEX_UNIT * diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c index e921a41c827..0b3fdc68429 100644 --- a/src/mesa/drivers/dri/i965/brw_context.c +++ b/src/mesa/drivers/dri/i965/brw_context.c @@ -170,39 +170,17 @@ intel_update_framebuffer(struct gl_context *ctx, fb->DefaultGeometry.NumSamples); } -static bool -intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo) -{ - const struct gl_framebuffer *fb = brw->ctx.DrawBuffer; - bool found = false; - - for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) { - const struct intel_renderbuffer *irb = - intel_renderbuffer(fb->_ColorDrawBuffers[i]); - - if (irb && irb->mt->bo == bo) { - found = brw->draw_aux_buffer_disabled[i] = true; - } - } - - return found; -} - static void intel_update_state(struct gl_context * ctx) { GLuint new_state = ctx->NewState; struct brw_context *brw = brw_context(ctx); - struct intel_texture_object *tex_obj; - struct intel_renderbuffer *depth_irb; if (ctx->swrast_context) _swrast_InvalidateState(ctx, new_state); brw->NewGLState |= new_state; - _mesa_unlock_context_textures(ctx); - if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT)) _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer); @@ -218,105 +196,6 @@ intel_update_state(struct gl_context * ctx) intel_prepare_render(brw); - /* Resolve the depth buffer's HiZ buffer. */ - depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH); - if (depth_irb && depth_irb->mt) { - intel_miptree_prepare_depth(brw, depth_irb->mt, - depth_irb->mt_level, - depth_irb->mt_layer, - depth_irb->layer_count); - } - - memset(brw->draw_aux_buffer_disabled, 0, - sizeof(brw->draw_aux_buffer_disabled)); - - /* Resolve depth buffer and render cache of each enabled texture. */ - int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit; - for (int i = 0; i <= maxEnabledUnit; i++) { - if (!ctx->Texture.Unit[i]._Current) - continue; - tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current); - if (!tex_obj || !tex_obj->mt) - continue; - - /* We need inte_texture_object::_Format to be valid */ - intel_finalize_mipmap_tree(brw, i); - - bool aux_supported; - intel_miptree_prepare_texture(brw, tex_obj->mt, tex_obj->_Format, - &aux_supported); - - if (!aux_supported && brw->gen >= 9 && - intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) { - perf_debug("Sampling renderbuffer with non-compressible format - " - "turning off compression"); - } - - brw_render_cache_set_check_flush(brw, tex_obj->mt->bo); - - if (tex_obj->base.StencilSampling || - tex_obj->mt->format == MESA_FORMAT_S_UINT8) { - intel_update_r8stencil(brw, tex_obj->mt); - } - } - - /* Resolve color for each active shader image. */ - for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { - const struct gl_program *prog = ctx->_Shader->CurrentProgram[i]; - - if (unlikely(prog && prog->info.num_images)) { - for (unsigned j = 0; j < prog->info.num_images; j++) { - struct gl_image_unit *u = - &ctx->ImageUnits[prog->sh.ImageUnits[j]]; - tex_obj = intel_texture_object(u->TexObj); - - if (tex_obj && tex_obj->mt) { - intel_miptree_prepare_image(brw, tex_obj->mt); - - if (intel_miptree_is_lossless_compressed(brw, tex_obj->mt) && - intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) { - perf_debug("Using renderbuffer as shader image - turning " - "off lossless compression"); - } - - brw_render_cache_set_check_flush(brw, tex_obj->mt->bo); - } - } - } - } - - /* Resolve color buffers for non-coherent framebuffer fetch. */ - if (!ctx->Extensions.MESA_shader_framebuffer_fetch && - ctx->FragmentProgram._Current && - ctx->FragmentProgram._Current->info.outputs_read) { - const struct gl_framebuffer *fb = ctx->DrawBuffer; - - for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) { - const struct intel_renderbuffer *irb = - intel_renderbuffer(fb->_ColorDrawBuffers[i]); - - if (irb) { - intel_miptree_prepare_fb_fetch(brw, irb->mt, irb->mt_level, - irb->mt_layer, irb->layer_count); - } - } - } - - struct gl_framebuffer *fb = ctx->DrawBuffer; - for (int i = 0; i < fb->_NumColorDrawBuffers; i++) { - struct intel_renderbuffer *irb = - intel_renderbuffer(fb->_ColorDrawBuffers[i]); - - if (irb == NULL || irb->mt == NULL) - continue; - - intel_miptree_prepare_render(brw, irb->mt, irb->mt_level, - irb->mt_layer, irb->layer_count, - ctx->Color.sRGBEnabled); - } - - _mesa_lock_context_textures(ctx); - if (new_state & _NEW_BUFFERS) { intel_update_framebuffer(ctx, ctx->DrawBuffer); if (ctx->DrawBuffer != ctx->ReadBuffer) diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h index 817396dddf6..3a613ff63e6 100644 --- a/src/mesa/drivers/dri/i965/brw_context.h +++ b/src/mesa/drivers/dri/i965/brw_context.h @@ -1250,6 +1250,8 @@ void intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable); void intel_prepare_render(struct brw_context *brw); +void brw_predraw_resolve_inputs(struct brw_context *brw); + void intel_resolve_for_dri2_flush(struct brw_context *brw, __DRIdrawable *drawable); diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c index 821f1e24e94..2ac35032c94 100644 --- a/src/mesa/drivers/dri/i965/brw_draw.c +++ b/src/mesa/drivers/dri/i965/brw_draw.c @@ -341,6 +341,138 @@ brw_merge_inputs(struct brw_context *brw, } } +static bool +intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo) +{ + const struct gl_framebuffer *fb = brw->ctx.DrawBuffer; + bool found = false; + + for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) { + const struct intel_renderbuffer *irb = + intel_renderbuffer(fb->_ColorDrawBuffers[i]); + + if (irb && irb->mt->bo == bo) { + found = brw->draw_aux_buffer_disabled[i] = true; + } + } + + return found; +} + +/** + * \brief Resolve buffers before drawing. + * + * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each + * enabled depth texture, and flush the render cache for any dirty textures. + */ +void +brw_predraw_resolve_inputs(struct brw_context *brw) +{ + struct gl_context *ctx = &brw->ctx; + struct intel_texture_object *tex_obj; + + memset(brw->draw_aux_buffer_disabled, 0, + sizeof(brw->draw_aux_buffer_disabled)); + + /* Resolve depth buffer and render cache of each enabled texture. */ + int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit; + for (int i = 0; i <= maxEnabledUnit; i++) { + if (!ctx->Texture.Unit[i]._Current) + continue; + tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current); + if (!tex_obj || !tex_obj->mt) + continue; + + bool aux_supported; + intel_miptree_prepare_texture(brw, tex_obj->mt, tex_obj->_Format, + &aux_supported); + + if (!aux_supported && brw->gen >= 9 && + intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) { + perf_debug("Sampling renderbuffer with non-compressible format - " + "turning off compression"); + } + + brw_render_cache_set_check_flush(brw, tex_obj->mt->bo); + + if (tex_obj->base.StencilSampling || + tex_obj->mt->format == MESA_FORMAT_S_UINT8) { + intel_update_r8stencil(brw, tex_obj->mt); + } + } + + /* Resolve color for each active shader image. */ + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + const struct gl_program *prog = ctx->_Shader->CurrentProgram[i]; + + if (unlikely(prog && prog->info.num_images)) { + for (unsigned j = 0; j < prog->info.num_images; j++) { + struct gl_image_unit *u = + &ctx->ImageUnits[prog->sh.ImageUnits[j]]; + tex_obj = intel_texture_object(u->TexObj); + + if (tex_obj && tex_obj->mt) { + intel_miptree_prepare_image(brw, tex_obj->mt); + + if (intel_miptree_is_lossless_compressed(brw, tex_obj->mt) && + intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) { + perf_debug("Using renderbuffer as shader image - turning " + "off lossless compression"); + } + + brw_render_cache_set_check_flush(brw, tex_obj->mt->bo); + } + } + } + } +} + +static void +brw_predraw_resolve_framebuffer(struct brw_context *brw) +{ + struct gl_context *ctx = &brw->ctx; + struct intel_renderbuffer *depth_irb; + + /* Resolve the depth buffer's HiZ buffer. */ + depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH); + if (depth_irb && depth_irb->mt) { + intel_miptree_prepare_depth(brw, depth_irb->mt, + depth_irb->mt_level, + depth_irb->mt_layer, + depth_irb->layer_count); + } + + /* Resolve color buffers for non-coherent framebuffer fetch. */ + if (!ctx->Extensions.MESA_shader_framebuffer_fetch && + ctx->FragmentProgram._Current && + ctx->FragmentProgram._Current->info.outputs_read) { + const struct gl_framebuffer *fb = ctx->DrawBuffer; + + for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) { + const struct intel_renderbuffer *irb = + intel_renderbuffer(fb->_ColorDrawBuffers[i]); + + if (irb) { + intel_miptree_prepare_fb_fetch(brw, irb->mt, irb->mt_level, + irb->mt_layer, irb->layer_count); + } + } + } + + struct gl_framebuffer *fb = ctx->DrawBuffer; + for (int i = 0; i < fb->_NumColorDrawBuffers; i++) { + struct intel_renderbuffer *irb = + intel_renderbuffer(fb->_ColorDrawBuffers[i]); + + if (irb == NULL || irb->mt == NULL) + continue; + + intel_miptree_prepare_render(brw, irb->mt, irb->mt_level, + irb->mt_layer, irb->layer_count, + ctx->Color.sRGBEnabled); + } +} + /** * \brief Call this after drawing to mark which buffers need resolving * @@ -513,6 +645,13 @@ brw_try_draw_prims(struct gl_context *ctx, */ brw_workaround_depthstencil_alignment(brw, 0); + /* Resolves must occur after updating renderbuffers, updating context state, + * and finalizing textures but before setting up any hardware state for + * this draw call. + */ + brw_predraw_resolve_inputs(brw); + brw_predraw_resolve_framebuffer(brw); + /* Bind all inputs, derive varying and size information: */ brw_merge_inputs(brw, arrays); |