diff options
author | Kenneth Graunke <[email protected]> | 2017-10-13 00:35:30 -0700 |
---|---|---|
committer | Kenneth Graunke <[email protected]> | 2017-10-13 11:16:41 -0700 |
commit | 77d3d71f23b32550b045b5f74b7a5e6ca54d83c5 (patch) | |
tree | d81c67a4f43e975277a11a79cd23f47f5dd59514 /src/mesa/drivers/dri | |
parent | d22bc4ba528b6e0e3f9ba08cfd0cc0ce980d1e58 (diff) |
i965: Rename brw->no_batch_wrap to intel_batchbuffer::no_wrap
This really makes more sense in the intel_batchbuffer struct.
Reviewed-by: Chris Wilson <[email protected]>
Diffstat (limited to 'src/mesa/drivers/dri')
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_compute.c | 4 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_context.h | 2 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_draw.c | 4 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/genX_blorp_exec.c | 4 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/intel_batchbuffer.c | 10 |
5 files changed, 12 insertions, 12 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_compute.c b/src/mesa/drivers/dri/i965/brw_compute.c index 7f0278ac92b..c1b2df590b7 100644 --- a/src/mesa/drivers/dri/i965/brw_compute.c +++ b/src/mesa/drivers/dri/i965/brw_compute.c @@ -187,12 +187,12 @@ brw_dispatch_compute_common(struct gl_context *ctx) intel_batchbuffer_save_state(brw); retry: - brw->no_batch_wrap = true; + brw->batch.no_wrap = true; brw_upload_compute_state(brw); brw_emit_gpgpu_walker(brw); - brw->no_batch_wrap = false; + brw->batch.no_wrap = false; if (!brw_batch_has_aperture_space(brw, 0)) { if (!fail_next) { diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h index 72d7b8a62d4..679b848c505 100644 --- a/src/mesa/drivers/dri/i965/brw_context.h +++ b/src/mesa/drivers/dri/i965/brw_context.h @@ -465,6 +465,7 @@ struct intel_batchbuffer { bool use_batch_first; bool needs_sol_reset; bool state_base_address_emitted; + bool no_wrap; struct brw_reloc_list batch_relocs; struct brw_reloc_list state_relocs; @@ -695,7 +696,6 @@ struct brw_context uint32_t reset_count; struct intel_batchbuffer batch; - bool no_batch_wrap; struct { struct brw_bo *bo; diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c index 0364d442f21..2721c722693 100644 --- a/src/mesa/drivers/dri/i965/brw_draw.c +++ b/src/mesa/drivers/dri/i965/brw_draw.c @@ -792,13 +792,13 @@ retry: * brw->ctx.NewDriverState. */ if (brw->ctx.NewDriverState) { - brw->no_batch_wrap = true; + brw->batch.no_wrap = true; brw_upload_render_state(brw); } brw_emit_prim(brw, prim, brw->primitive, xfb_obj, stream); - brw->no_batch_wrap = false; + brw->batch.no_wrap = false; if (!brw_batch_has_aperture_space(brw, 0)) { if (!fail_next) { diff --git a/src/mesa/drivers/dri/i965/genX_blorp_exec.c b/src/mesa/drivers/dri/i965/genX_blorp_exec.c index 3fe81c7c6a1..3c7a7b47dbd 100644 --- a/src/mesa/drivers/dri/i965/genX_blorp_exec.c +++ b/src/mesa/drivers/dri/i965/genX_blorp_exec.c @@ -224,7 +224,7 @@ retry: intel_batchbuffer_require_space(brw, 1400, RENDER_RING); brw_require_statebuffer_space(brw, 600); intel_batchbuffer_save_state(brw); - brw->no_batch_wrap = true; + brw->batch.no_wrap = true; #if GEN_GEN == 6 /* Emit workaround flushes when we switch from drawing to blorping. */ @@ -252,7 +252,7 @@ retry: blorp_exec(batch, params); - brw->no_batch_wrap = false; + brw->batch.no_wrap = false; /* Check if the blorp op we just did would make our batch likely to fail to * map all the BOs into the GPU at batch exec time later. If so, flush the diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c index d564510d06a..c96e2827f28 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c @@ -371,7 +371,7 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, const unsigned batch_used = USED_BATCH(*batch) * 4; if (batch_used + sz >= BATCH_SZ) { - if (!brw->no_batch_wrap) { + if (!batch->no_wrap) { intel_batchbuffer_flush(brw); } else { const unsigned new_size = @@ -631,7 +631,7 @@ brw_finish_batch(struct brw_context *brw) { const struct gen_device_info *devinfo = &brw->screen->devinfo; - brw->no_batch_wrap = true; + brw->batch.no_wrap = true; /* Capture the closing pipeline statistics register values necessary to * support query objects (in the non-hardware context world). @@ -675,7 +675,7 @@ brw_finish_batch(struct brw_context *brw) intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP); } - brw->no_batch_wrap = false; + brw->batch.no_wrap = false; } static void @@ -891,7 +891,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw, return 0; /* Check that we didn't just wrap our batchbuffer at a bad time. */ - assert(!brw->no_batch_wrap); + assert(!brw->batch.no_wrap); brw_finish_batch(brw); intel_upload_finish(brw); @@ -1048,7 +1048,7 @@ brw_state_batch(struct brw_context *brw, uint32_t offset = ALIGN(batch->state_used, alignment); if (offset + size >= STATE_SZ) { - if (!brw->no_batch_wrap) { + if (!batch->no_wrap) { intel_batchbuffer_flush(brw); offset = ALIGN(batch->state_used, alignment); } else { |