summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2013-07-03 14:21:19 -0700
committerKenneth Graunke <[email protected]>2013-07-09 14:09:08 -0700
commit329779a0b45b63be17627f026533c80b2c8f7991 (patch)
tree88ee98c79bdfdacaf50b53d7303caf2f1d63688b /src
parent5d8186ac1a22afbaa6ed68e0fb67d1f150f798cb (diff)
i965: Move intel_context::batch to brw_context.
Signed-off-by: Kenneth Graunke <[email protected]> Acked-by: Chris Forbes <[email protected]> Acked-by: Paul Berry <[email protected]> Acked-by: Anuj Phogat <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/i965/brw_blorp.cpp2
-rw-r--r--src/mesa/drivers/dri/i965/brw_cc.c6
-rw-r--r--src/mesa/drivers/dri/i965/brw_clip_state.c6
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.c2
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.h4
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_fs.cpp6
-rw-r--r--src/mesa/drivers/dri/i965/brw_misc_state.c20
-rw-r--r--src/mesa/drivers/dri/i965/brw_queryobj.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_sf_state.c6
-rw-r--r--src/mesa/drivers/dri/i965/brw_state_batch.c10
-rw-r--r--src/mesa/drivers/dri/i965/brw_state_dump.c63
-rw-r--r--src/mesa/drivers/dri/i965/brw_urb.c6
-rw-r--r--src/mesa/drivers/dri/i965/brw_vec4.cpp6
-rw-r--r--src/mesa/drivers/dri/i965/brw_vs_state.c2
-rw-r--r--src/mesa/drivers/dri/i965/brw_vtbl.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm_sampler_state.c6
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm_state.c8
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm_surface_state.c12
-rw-r--r--src/mesa/drivers/dri/i965/gen6_blorp.cpp12
-rw-r--r--src/mesa/drivers/dri/i965/gen6_queryobj.c5
-rw-r--r--src/mesa/drivers/dri/i965/gen7_blorp.cpp2
-rw-r--r--src/mesa/drivers/dri/i965/gen7_sol_state.c3
-rw-r--r--src/mesa/drivers/dri/i965/gen7_wm_surface_state.c12
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.c115
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.h20
-rw-r--r--src/mesa/drivers/dri/i965/intel_blit.c5
-rw-r--r--src/mesa/drivers/dri/i965/intel_buffer_objects.c7
-rw-r--r--src/mesa/drivers/dri/i965/intel_context.c9
-rw-r--r--src/mesa/drivers/dri/i965/intel_context.h2
-rw-r--r--src/mesa/drivers/dri/i965/intel_screen.c2
-rw-r--r--src/mesa/drivers/dri/i965/intel_syncobj.c3
-rw-r--r--src/mesa/drivers/dri/i965/intel_tex_subimage.c2
33 files changed, 170 insertions, 206 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_blorp.cpp b/src/mesa/drivers/dri/i965/brw_blorp.cpp
index 2da46d67d18..cba0ce4fe8d 100644
--- a/src/mesa/drivers/dri/i965/brw_blorp.cpp
+++ b/src/mesa/drivers/dri/i965/brw_blorp.cpp
@@ -215,7 +215,7 @@ brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params)
brw->state.dirty.brw = ~0;
brw->state.dirty.cache = ~0;
brw->state_batch_count = 0;
- intel->batch.need_workaround_flush = true;
+ brw->batch.need_workaround_flush = true;
/* Flush the sampler cache so any texturing from the destination is
* coherent.
diff --git a/src/mesa/drivers/dri/i965/brw_cc.c b/src/mesa/drivers/dri/i965/brw_cc.c
index 698af6db87c..f3e5b7cfb4a 100644
--- a/src/mesa/drivers/dri/i965/brw_cc.c
+++ b/src/mesa/drivers/dri/i965/brw_cc.c
@@ -215,16 +215,16 @@ static void upload_cc_unit(struct brw_context *brw)
cc->cc5.statistics_enable = 1;
/* CACHE_NEW_CC_VP */
- cc->cc4.cc_viewport_state_offset = (intel->batch.bo->offset +
+ cc->cc4.cc_viewport_state_offset = (brw->batch.bo->offset +
brw->cc.vp_offset) >> 5; /* reloc */
brw->state.dirty.cache |= CACHE_NEW_CC_UNIT;
/* Emit CC viewport relocation */
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
(brw->cc.state_offset +
offsetof(struct brw_cc_unit_state, cc4)),
- intel->batch.bo, brw->cc.vp_offset,
+ brw->batch.bo, brw->cc.vp_offset,
I915_GEM_DOMAIN_INSTRUCTION, 0);
}
diff --git a/src/mesa/drivers/dri/i965/brw_clip_state.c b/src/mesa/drivers/dri/i965/brw_clip_state.c
index 7f6674322fe..d1c2938eee0 100644
--- a/src/mesa/drivers/dri/i965/brw_clip_state.c
+++ b/src/mesa/drivers/dri/i965/brw_clip_state.c
@@ -128,13 +128,13 @@ brw_upload_clip_unit(struct brw_context *brw)
{
clip->clip5.guard_band_enable = 1;
clip->clip6.clipper_viewport_state_ptr =
- (intel->batch.bo->offset + brw->clip.vp_offset) >> 5;
+ (brw->batch.bo->offset + brw->clip.vp_offset) >> 5;
/* emit clip viewport relocation */
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
(brw->clip.state_offset +
offsetof(struct brw_clip_unit_state, clip6)),
- intel->batch.bo, brw->clip.vp_offset,
+ brw->batch.bo, brw->clip.vp_offset,
I915_GEM_DOMAIN_INSTRUCTION, 0);
}
diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c
index d170b24b76c..f0f472349d0 100644
--- a/src/mesa/drivers/dri/i965/brw_context.c
+++ b/src/mesa/drivers/dri/i965/brw_context.c
@@ -447,7 +447,7 @@ brwCreateContext(int api,
brw->emit_state_always = 0;
- intel->batch.need_workaround_flush = true;
+ brw->batch.need_workaround_flush = true;
ctx->VertexProgram._MaintainTnlProgram = true;
ctx->FragmentProgram._MaintainTexEnvProgram = true;
diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h
index 414a009c68f..fc3208ba026 100644
--- a/src/mesa/drivers/dri/i965/brw_context.h
+++ b/src/mesa/drivers/dri/i965/brw_context.h
@@ -806,6 +806,8 @@ struct brw_context
drm_intel_context *hw_ctx;
+ struct intel_batchbuffer batch;
+
/**
* Set if rendering has occured to the drawable's front buffer.
*
@@ -1411,7 +1413,7 @@ brw_program_reloc(struct brw_context *brw, uint32_t state_offset,
return prog_offset;
}
- drm_intel_bo_emit_reloc(intel->batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
state_offset,
brw->cache.bo,
prog_offset,
diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c
index 01e6b2443a7..b338fe0fd9f 100644
--- a/src/mesa/drivers/dri/i965/brw_draw.c
+++ b/src/mesa/drivers/dri/i965/brw_draw.c
@@ -210,7 +210,7 @@ static void brw_emit_prim(struct brw_context *brw,
OUT_BATCH(base_vertex_location);
ADVANCE_BATCH();
- intel->batch.need_workaround_flush = true;
+ brw->batch.need_workaround_flush = true;
if (brw->always_flush_cache) {
intel_batchbuffer_emit_mi_flush(brw);
@@ -454,7 +454,7 @@ retry:
intel->no_batch_wrap = false;
- if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
+ if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
if (!fail_next) {
intel_batchbuffer_reset_to_saved(brw);
intel_batchbuffer_flush(brw);
diff --git a/src/mesa/drivers/dri/i965/brw_fs.cpp b/src/mesa/drivers/dri/i965/brw_fs.cpp
index 97cd291f974..4ee91f613a2 100644
--- a/src/mesa/drivers/dri/i965/brw_fs.cpp
+++ b/src/mesa/drivers/dri/i965/brw_fs.cpp
@@ -3021,8 +3021,8 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
float start_time = 0;
if (unlikely(intel->perf_debug)) {
- start_busy = (intel->batch.last_bo &&
- drm_intel_bo_busy(intel->batch.last_bo));
+ start_busy = (brw->batch.last_bo &&
+ drm_intel_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
@@ -3082,7 +3082,7 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
brw_wm_debug_recompile(brw, prog, &c->key);
shader->compiled_once = true;
- if (start_busy && !drm_intel_bo_busy(intel->batch.last_bo)) {
+ if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
perf_debug("FS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
diff --git a/src/mesa/drivers/dri/i965/brw_misc_state.c b/src/mesa/drivers/dri/i965/brw_misc_state.c
index e7c14f05d04..7203802f913 100644
--- a/src/mesa/drivers/dri/i965/brw_misc_state.c
+++ b/src/mesa/drivers/dri/i965/brw_misc_state.c
@@ -152,20 +152,20 @@ static void upload_pipelined_state_pointers(struct brw_context *brw )
BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
- OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
brw->vs.state_offset);
if (brw->gs.prog_active)
- OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
brw->gs.state_offset | 1);
else
OUT_BATCH(0);
- OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
brw->clip.state_offset | 1);
- OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
brw->sf.state_offset);
- OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
brw->wm.state_offset);
- OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
brw->cc.state_offset);
ADVANCE_BATCH();
@@ -1050,7 +1050,7 @@ static void upload_state_base_address( struct brw_context *brw )
* BINDING_TABLE_STATE
* SURFACE_STATE
*/
- OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
/* Dynamic state base address:
* SAMPLER_STATE
* SAMPLER_BORDER_COLOR_STATE
@@ -1061,7 +1061,7 @@ static void upload_state_base_address( struct brw_context *brw )
* Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
* Disable is clear, which we rely on)
*/
- OUT_RELOC(intel->batch.bo, (I915_GEM_DOMAIN_RENDER |
+ OUT_RELOC(brw->batch.bo, (I915_GEM_DOMAIN_RENDER |
I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
@@ -1082,7 +1082,7 @@ static void upload_state_base_address( struct brw_context *brw )
BEGIN_BATCH(8);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
OUT_BATCH(1); /* General state base address */
- OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
1); /* Surface state base address */
OUT_BATCH(1); /* Indirect object base address */
OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
@@ -1095,7 +1095,7 @@ static void upload_state_base_address( struct brw_context *brw )
BEGIN_BATCH(6);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
OUT_BATCH(1); /* General state base address */
- OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
1); /* Surface state base address */
OUT_BATCH(1); /* Indirect object base address */
OUT_BATCH(1); /* General state upper bound */
diff --git a/src/mesa/drivers/dri/i965/brw_queryobj.c b/src/mesa/drivers/dri/i965/brw_queryobj.c
index 8165320c127..66026885884 100644
--- a/src/mesa/drivers/dri/i965/brw_queryobj.c
+++ b/src/mesa/drivers/dri/i965/brw_queryobj.c
@@ -137,7 +137,7 @@ brw_queryobj_get_results(struct gl_context *ctx,
* still contributing to it, flush it now so the results will be present
* when mapped.
*/
- if (drm_intel_bo_references(intel->batch.bo, query->bo))
+ if (drm_intel_bo_references(brw->batch.bo, query->bo))
intel_batchbuffer_flush(brw);
if (unlikely(intel->perf_debug)) {
@@ -402,7 +402,7 @@ static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
* not ready yet on the first time it is queried. This ensures that
* the async query will return true in finite time.
*/
- if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
+ if (query->bo && drm_intel_bo_references(brw->batch.bo, query->bo))
intel_batchbuffer_flush(brw);
if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
diff --git a/src/mesa/drivers/dri/i965/brw_sf_state.c b/src/mesa/drivers/dri/i965/brw_sf_state.c
index 4b5e7cc93f1..6515e28cf57 100644
--- a/src/mesa/drivers/dri/i965/brw_sf_state.c
+++ b/src/mesa/drivers/dri/i965/brw_sf_state.c
@@ -129,7 +129,7 @@ static void upload_sf_unit( struct brw_context *brw )
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
struct brw_sf_unit_state *sf;
- drm_intel_bo *bo = intel->batch.bo;
+ drm_intel_bo *bo = brw->batch.bo;
int chipset_max_threads;
bool render_to_fbo = _mesa_is_user_fbo(brw->intel.ctx.DrawBuffer);
@@ -175,7 +175,7 @@ static void upload_sf_unit( struct brw_context *brw )
sf->thread4.stats_enable = 1;
/* CACHE_NEW_SF_VP */
- sf->sf5.sf_viewport_state_offset = (intel->batch.bo->offset +
+ sf->sf5.sf_viewport_state_offset = (brw->batch.bo->offset +
brw->sf.vp_offset) >> 5; /* reloc */
sf->sf5.viewport_transform = 1;
@@ -290,7 +290,7 @@ static void upload_sf_unit( struct brw_context *brw )
/* Emit SF viewport relocation */
drm_intel_bo_emit_reloc(bo, (brw->sf.state_offset +
offsetof(struct brw_sf_unit_state, sf5)),
- intel->batch.bo, (brw->sf.vp_offset |
+ brw->batch.bo, (brw->sf.vp_offset |
sf->sf5.front_winding |
(sf->sf5.viewport_transform << 1)),
I915_GEM_DOMAIN_INSTRUCTION, 0);
diff --git a/src/mesa/drivers/dri/i965/brw_state_batch.c b/src/mesa/drivers/dri/i965/brw_state_batch.c
index ea1fe8148e5..c71d2f301d2 100644
--- a/src/mesa/drivers/dri/i965/brw_state_batch.c
+++ b/src/mesa/drivers/dri/i965/brw_state_batch.c
@@ -40,7 +40,7 @@ brw_track_state_batch(struct brw_context *brw,
uint32_t offset,
int size)
{
- struct intel_batchbuffer *batch = &brw->intel.batch;
+ struct intel_batchbuffer *batch = &brw->batch;
if (!brw->state_batch_list) {
/* Our structs are always aligned to at least 32 bytes, so
@@ -81,13 +81,11 @@ make_annotation(drm_intel_aub_annotation *annotation, uint32_t type,
void
brw_annotate_aub(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
unsigned annotation_count = 2 * brw->state_batch_count + 1;
drm_intel_aub_annotation annotations[annotation_count];
int a = 0;
make_annotation(&annotations[a++], AUB_TRACE_TYPE_BATCH, 0,
- 4*intel->batch.used);
+ 4*brw->batch.used);
for (int i = brw->state_batch_count; i-- > 0; ) {
uint32_t type = brw->state_batch_list[i].type;
uint32_t start_offset = brw->state_batch_list[i].offset;
@@ -98,7 +96,7 @@ brw_annotate_aub(struct brw_context *brw)
AUB_TRACE_SUBTYPE(type), end_offset);
}
assert(a == annotation_count);
- drm_intel_bufmgr_gem_set_aub_annotations(intel->batch.bo, annotations,
+ drm_intel_bufmgr_gem_set_aub_annotations(brw->batch.bo, annotations,
annotation_count);
}
@@ -123,7 +121,7 @@ brw_state_batch(struct brw_context *brw,
int alignment,
uint32_t *out_offset)
{
- struct intel_batchbuffer *batch = &brw->intel.batch;
+ struct intel_batchbuffer *batch = &brw->batch;
uint32_t offset;
assert(size < batch->bo->size);
diff --git a/src/mesa/drivers/dri/i965/brw_state_dump.c b/src/mesa/drivers/dri/i965/brw_state_dump.c
index 1b5a93355db..89df522bb19 100644
--- a/src/mesa/drivers/dri/i965/brw_state_dump.c
+++ b/src/mesa/drivers/dri/i965/brw_state_dump.c
@@ -39,8 +39,7 @@ static void
batch_out(struct brw_context *brw, const char *name, uint32_t offset,
int index, char *fmt, ...)
{
- struct intel_context *intel = &brw->intel;
- uint32_t *data = intel->batch.bo->virtual + offset;
+ uint32_t *data = brw->batch.bo->virtual + offset;
va_list va;
fprintf(stderr, "0x%08x: 0x%08x: %8s: ",
@@ -79,9 +78,8 @@ get_965_surface_format(unsigned int surface_format)
static void dump_vs_state(struct brw_context *brw, uint32_t offset)
{
- struct intel_context *intel = &brw->intel;
const char *name = "VS_STATE";
- struct brw_vs_unit_state *vs = intel->batch.bo->virtual + offset;
+ struct brw_vs_unit_state *vs = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0, "thread0\n");
batch_out(brw, name, offset, 1, "thread1\n");
@@ -95,9 +93,8 @@ static void dump_vs_state(struct brw_context *brw, uint32_t offset)
static void dump_gs_state(struct brw_context *brw, uint32_t offset)
{
- struct intel_context *intel = &brw->intel;
const char *name = "GS_STATE";
- struct brw_gs_unit_state *gs = intel->batch.bo->virtual + offset;
+ struct brw_gs_unit_state *gs = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0, "thread0\n");
batch_out(brw, name, offset, 1, "thread1\n");
@@ -111,9 +108,8 @@ static void dump_gs_state(struct brw_context *brw, uint32_t offset)
static void dump_clip_state(struct brw_context *brw, uint32_t offset)
{
- struct intel_context *intel = &brw->intel;
const char *name = "CLIP_STATE";
- struct brw_clip_unit_state *clip = intel->batch.bo->virtual + offset;
+ struct brw_clip_unit_state *clip = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0, "thread0\n");
batch_out(brw, name, offset, 1, "thread1\n");
@@ -131,9 +127,8 @@ static void dump_clip_state(struct brw_context *brw, uint32_t offset)
static void dump_sf_state(struct brw_context *brw, uint32_t offset)
{
- struct intel_context *intel = &brw->intel;
const char *name = "SF_STATE";
- struct brw_sf_unit_state *sf = intel->batch.bo->virtual + offset;
+ struct brw_sf_unit_state *sf = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0, "thread0\n");
batch_out(brw, name, offset, 1, "thread1\n");
@@ -148,9 +143,8 @@ static void dump_sf_state(struct brw_context *brw, uint32_t offset)
static void dump_wm_state(struct brw_context *brw, uint32_t offset)
{
- struct intel_context *intel = &brw->intel;
const char *name = "WM_STATE";
- struct brw_wm_unit_state *wm = intel->batch.bo->virtual + offset;
+ struct brw_wm_unit_state *wm = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0, "thread0\n");
batch_out(brw, name, offset, 1, "thread1\n");
@@ -177,7 +171,7 @@ static void dump_wm_state(struct brw_context *brw, uint32_t offset)
static void dump_surface_state(struct brw_context *brw, uint32_t offset)
{
const char *name = "SURF";
- uint32_t *surf = brw->intel.batch.bo->virtual + offset;
+ uint32_t *surf = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0, "%s %s\n",
get_965_surfacetype(GET_FIELD(surf[0], BRW_SURFACE_TYPE)),
@@ -201,7 +195,7 @@ static void dump_surface_state(struct brw_context *brw, uint32_t offset)
static void dump_gen7_surface_state(struct brw_context *brw, uint32_t offset)
{
const char *name = "SURF";
- uint32_t *surf = brw->intel.batch.bo->virtual + offset;
+ uint32_t *surf = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0, "%s %s\n",
get_965_surfacetype(GET_FIELD(surf[0], BRW_SURFACE_TYPE)),
@@ -228,7 +222,7 @@ dump_sdc(struct brw_context *brw, uint32_t offset)
struct intel_context *intel = &brw->intel;
if (intel->gen >= 5 && intel->gen <= 6) {
- struct gen5_sampler_default_color *sdc = (intel->batch.bo->virtual +
+ struct gen5_sampler_default_color *sdc = (brw->batch.bo->virtual +
offset);
batch_out(brw, name, offset, 0, "unorm rgba\n");
batch_out(brw, name, offset, 1, "r %f\n", sdc->f[0]);
@@ -243,7 +237,7 @@ dump_sdc(struct brw_context *brw, uint32_t offset)
batch_out(brw, name, offset, 10, "s16 ba\n");
batch_out(brw, name, offset, 11, "s8 rgba\n");
} else {
- struct brw_sampler_default_color *sdc = (intel->batch.bo->virtual +
+ struct brw_sampler_default_color *sdc = (brw->batch.bo->virtual +
offset);
batch_out(brw, name, offset, 0, "r %f\n", sdc->color[0]);
batch_out(brw, name, offset, 1, "g %f\n", sdc->color[1]);
@@ -257,7 +251,7 @@ static void dump_sampler_state(struct brw_context *brw,
{
struct intel_context *intel = &brw->intel;
int i;
- struct brw_sampler_state *samp = intel->batch.bo->virtual + offset;
+ struct brw_sampler_state *samp = brw->batch.bo->virtual + offset;
assert(intel->gen < 7);
@@ -279,7 +273,7 @@ static void dump_gen7_sampler_state(struct brw_context *brw,
uint32_t offset, uint32_t size)
{
struct intel_context *intel = &brw->intel;
- struct gen7_sampler_state *samp = intel->batch.bo->virtual + offset;
+ struct gen7_sampler_state *samp = brw->batch.bo->virtual + offset;
int i;
assert(intel->gen >= 7);
@@ -304,7 +298,7 @@ static void dump_sf_viewport_state(struct brw_context *brw,
{
struct intel_context *intel = &brw->intel;
const char *name = "SF VP";
- struct brw_sf_viewport *vp = intel->batch.bo->virtual + offset;
+ struct brw_sf_viewport *vp = brw->batch.bo->virtual + offset;
assert(intel->gen < 7);
@@ -326,7 +320,7 @@ static void dump_clip_viewport_state(struct brw_context *brw,
{
struct intel_context *intel = &brw->intel;
const char *name = "CLIP VP";
- struct brw_clipper_viewport *vp = intel->batch.bo->virtual + offset;
+ struct brw_clipper_viewport *vp = brw->batch.bo->virtual + offset;
assert(intel->gen < 7);
@@ -341,7 +335,7 @@ static void dump_sf_clip_viewport_state(struct brw_context *brw,
{
struct intel_context *intel = &brw->intel;
const char *name = "SF_CLIP VP";
- struct gen7_sf_clip_viewport *vp = intel->batch.bo->virtual + offset;
+ struct gen7_sf_clip_viewport *vp = brw->batch.bo->virtual + offset;
assert(intel->gen >= 7);
@@ -361,7 +355,7 @@ static void dump_sf_clip_viewport_state(struct brw_context *brw,
static void dump_cc_viewport_state(struct brw_context *brw, uint32_t offset)
{
const char *name = "CC VP";
- struct brw_cc_viewport *vp = brw->intel.batch.bo->virtual + offset;
+ struct brw_cc_viewport *vp = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0, "min_depth = %f\n", vp->min_depth);
batch_out(brw, name, offset, 1, "max_depth = %f\n", vp->max_depth);
@@ -370,7 +364,7 @@ static void dump_cc_viewport_state(struct brw_context *brw, uint32_t offset)
static void dump_depth_stencil_state(struct brw_context *brw, uint32_t offset)
{
const char *name = "D_S";
- struct gen6_depth_stencil_state *ds = brw->intel.batch.bo->virtual + offset;
+ struct gen6_depth_stencil_state *ds = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0,
"stencil %sable, func %d, write %sable\n",
@@ -404,7 +398,7 @@ static void dump_cc_state_gen4(struct brw_context *brw, uint32_t offset)
static void dump_cc_state_gen6(struct brw_context *brw, uint32_t offset)
{
const char *name = "CC";
- struct gen6_color_calc_state *cc = brw->intel.batch.bo->virtual + offset;
+ struct gen6_color_calc_state *cc = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0,
"alpha test format %s, round disable %d, stencil ref %d, "
@@ -432,8 +426,7 @@ static void
dump_scissor(struct brw_context *brw, uint32_t offset)
{
const char *name = "SCISSOR";
- struct intel_context *intel = &brw->intel;
- struct gen6_scissor_rect *scissor = intel->batch.bo->virtual + offset;
+ struct gen6_scissor_rect *scissor = brw->batch.bo->virtual + offset;
batch_out(brw, name, offset, 0, "xmin %d, ymin %d\n",
scissor->xmin, scissor->ymin);
@@ -445,9 +438,8 @@ static void
dump_vs_constants(struct brw_context *brw, uint32_t offset, uint32_t size)
{
const char *name = "VS_CONST";
- struct intel_context *intel = &brw->intel;
- uint32_t *as_uint = intel->batch.bo->virtual + offset;
- float *as_float = intel->batch.bo->virtual + offset;
+ uint32_t *as_uint = brw->batch.bo->virtual + offset;
+ float *as_float = brw->batch.bo->virtual + offset;
int i;
for (i = 0; i < size / 4; i += 4) {
@@ -462,9 +454,8 @@ static void
dump_wm_constants(struct brw_context *brw, uint32_t offset, uint32_t size)
{
const char *name = "WM_CONST";
- struct intel_context *intel = &brw->intel;
- uint32_t *as_uint = intel->batch.bo->virtual + offset;
- float *as_float = intel->batch.bo->virtual + offset;
+ uint32_t *as_uint = brw->batch.bo->virtual + offset;
+ float *as_float = brw->batch.bo->virtual + offset;
int i;
for (i = 0; i < size / 4; i += 4) {
@@ -480,7 +471,7 @@ static void dump_binding_table(struct brw_context *brw, uint32_t offset,
{
char name[20];
int i;
- uint32_t *data = brw->intel.batch.bo->virtual + offset;
+ uint32_t *data = brw->batch.bo->virtual + offset;
for (i = 0; i < size / 4; i++) {
if (data[i] == 0)
@@ -643,11 +634,9 @@ dump_state_batch(struct brw_context *brw)
*/
void brw_debug_batch(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
- drm_intel_bo_map(intel->batch.bo, false);
+ drm_intel_bo_map(brw->batch.bo, false);
dump_state_batch(brw);
- drm_intel_bo_unmap(intel->batch.bo);
+ drm_intel_bo_unmap(brw->batch.bo);
if (0)
dump_prog_cache(brw);
diff --git a/src/mesa/drivers/dri/i965/brw_urb.c b/src/mesa/drivers/dri/i965/brw_urb.c
index 3f42ba82ba5..43b9ba4d4a6 100644
--- a/src/mesa/drivers/dri/i965/brw_urb.c
+++ b/src/mesa/drivers/dri/i965/brw_urb.c
@@ -249,10 +249,10 @@ void brw_upload_urb_fence(struct brw_context *brw)
uf.bits1.cs_fence = brw->urb.size;
/* erratum: URB_FENCE must not cross a 64byte cacheline */
- if ((brw->intel.batch.used & 15) > 12) {
- int pad = 16 - (brw->intel.batch.used & 15);
+ if ((brw->batch.used & 15) > 12) {
+ int pad = 16 - (brw->batch.used & 15);
do
- brw->intel.batch.map[brw->intel.batch.used++] = MI_NOOP;
+ brw->batch.map[brw->batch.used++] = MI_NOOP;
while (--pad);
}
diff --git a/src/mesa/drivers/dri/i965/brw_vec4.cpp b/src/mesa/drivers/dri/i965/brw_vec4.cpp
index eb192a3df9b..04a26ea8aa2 100644
--- a/src/mesa/drivers/dri/i965/brw_vec4.cpp
+++ b/src/mesa/drivers/dri/i965/brw_vec4.cpp
@@ -1509,8 +1509,8 @@ brw_vs_emit(struct brw_context *brw,
float start_time = 0;
if (unlikely(intel->perf_debug)) {
- start_busy = (intel->batch.last_bo &&
- drm_intel_bo_busy(intel->batch.last_bo));
+ start_busy = (brw->batch.last_bo &&
+ drm_intel_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
@@ -1552,7 +1552,7 @@ brw_vs_emit(struct brw_context *brw,
if (shader->compiled_once) {
brw_vs_debug_recompile(brw, prog, &c->key);
}
- if (start_busy && !drm_intel_bo_busy(intel->batch.last_bo)) {
+ if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
perf_debug("VS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
diff --git a/src/mesa/drivers/dri/i965/brw_vs_state.c b/src/mesa/drivers/dri/i965/brw_vs_state.c
index bb42bd002df..722afc536ee 100644
--- a/src/mesa/drivers/dri/i965/brw_vs_state.c
+++ b/src/mesa/drivers/dri/i965/brw_vs_state.c
@@ -147,7 +147,7 @@ brw_upload_vs_unit(struct brw_context *brw)
/* Emit scratch space relocation */
if (brw->vs.prog_data->base.total_scratch != 0) {
- drm_intel_bo_emit_reloc(intel->batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
brw->vs.state_offset +
offsetof(struct brw_vs_unit_state, thread2),
brw->vs.scratch_bo,
diff --git a/src/mesa/drivers/dri/i965/brw_vtbl.c b/src/mesa/drivers/dri/i965/brw_vtbl.c
index d19ca0be513..3d62051b6d0 100644
--- a/src/mesa/drivers/dri/i965/brw_vtbl.c
+++ b/src/mesa/drivers/dri/i965/brw_vtbl.c
@@ -117,8 +117,6 @@ brw_finish_batch(struct brw_context *brw)
static void
brw_new_batch(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
/* If the kernel supports hardware contexts, then most hardware state is
* preserved between batches; we only need to re-emit state that is required
* to be in every batch. Otherwise we need to re-emit all the state that
@@ -133,7 +131,7 @@ brw_new_batch(struct brw_context *brw)
/* Assume that the last command before the start of our batch was a
* primitive, for safety.
*/
- intel->batch.need_workaround_flush = true;
+ brw->batch.need_workaround_flush = true;
brw->state_batch_count = 0;
diff --git a/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c b/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c
index 006aa68a0b4..b75882d20bf 100644
--- a/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c
+++ b/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c
@@ -342,14 +342,14 @@ static void brw_update_sampler_state(struct brw_context *brw,
sampler->ss2.default_color_pointer = brw->wm.sdc_offset[ss_index] >> 5;
} else {
/* reloc */
- sampler->ss2.default_color_pointer = (intel->batch.bo->offset +
+ sampler->ss2.default_color_pointer = (brw->batch.bo->offset +
brw->wm.sdc_offset[ss_index]) >> 5;
- drm_intel_bo_emit_reloc(intel->batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
brw->sampler.offset +
ss_index * sizeof(struct brw_sampler_state) +
offsetof(struct brw_sampler_state, ss2),
- intel->batch.bo, brw->wm.sdc_offset[ss_index],
+ brw->batch.bo, brw->wm.sdc_offset[ss_index],
I915_GEM_DOMAIN_SAMPLER, 0);
}
diff --git a/src/mesa/drivers/dri/i965/brw_wm_state.c b/src/mesa/drivers/dri/i965/brw_wm_state.c
index 4bac56ab747..63b8916deb5 100644
--- a/src/mesa/drivers/dri/i965/brw_wm_state.c
+++ b/src/mesa/drivers/dri/i965/brw_wm_state.c
@@ -142,7 +142,7 @@ brw_upload_wm_unit(struct brw_context *brw)
if (brw->sampler.count) {
/* reloc */
- wm->wm4.sampler_state_pointer = (intel->batch.bo->offset +
+ wm->wm4.sampler_state_pointer = (brw->batch.bo->offset +
brw->sampler.offset) >> 5;
} else {
wm->wm4.sampler_state_pointer = 0;
@@ -209,7 +209,7 @@ brw_upload_wm_unit(struct brw_context *brw)
/* Emit scratch space relocation */
if (brw->wm.prog_data->total_scratch != 0) {
- drm_intel_bo_emit_reloc(intel->batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.state_offset +
offsetof(struct brw_wm_unit_state, thread2),
brw->wm.scratch_bo,
@@ -219,10 +219,10 @@ brw_upload_wm_unit(struct brw_context *brw)
/* Emit sampler state relocation */
if (brw->sampler.count != 0) {
- drm_intel_bo_emit_reloc(intel->batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.state_offset +
offsetof(struct brw_wm_unit_state, wm4),
- intel->batch.bo, (brw->sampler.offset |
+ brw->batch.bo, (brw->sampler.offset |
wm->wm4.stats_enable |
(wm->wm4.sampler_count << 2)),
I915_GEM_DOMAIN_INSTRUCTION, 0);
diff --git a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
index 87ec97a93d7..ff333572c91 100644
--- a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
+++ b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
@@ -226,7 +226,7 @@ brw_update_buffer_texture_surface(struct gl_context *ctx,
surf[1] = bo->offset; /* reloc */
/* Emit relocation to surface contents. */
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
binding_table[surf_index] + 4,
bo, 0, I915_GEM_DOMAIN_SAMPLER, 0);
@@ -303,7 +303,7 @@ brw_update_texture_surface(struct gl_context *ctx,
(mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
/* Emit relocation to surface contents */
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
binding_table[surf_index] + 4,
intelObj->mt->region->bo,
surf[1] - intelObj->mt->region->bo->offset,
@@ -353,7 +353,7 @@ brw_create_constant_surface(struct brw_context *brw,
* bspec ("Data Cache") says that the data cache does not exist as
* a separate cache and is just the sampler cache.
*/
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
*out_offset + 4,
bo, offset,
I915_GEM_DOMAIN_SAMPLER, 0);
@@ -439,7 +439,7 @@ brw_update_sol_surface(struct brw_context *brw,
surf[5] = 0;
/* Emit relocation to surface contents. */
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
*out_offset + 4,
bo, offset_bytes,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
@@ -591,7 +591,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
surf[5] = 0;
if (bo) {
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.surf_offset[unit] + 4,
bo, 0,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
@@ -697,7 +697,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw,
}
}
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.surf_offset[unit] + 4,
region->bo,
surf[1] - region->bo->offset,
diff --git a/src/mesa/drivers/dri/i965/gen6_blorp.cpp b/src/mesa/drivers/dri/i965/gen6_blorp.cpp
index 6e7440ae893..50d89cc7873 100644
--- a/src/mesa/drivers/dri/i965/gen6_blorp.cpp
+++ b/src/mesa/drivers/dri/i965/gen6_blorp.cpp
@@ -74,15 +74,13 @@ void
gen6_blorp_emit_state_base_address(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(10);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
OUT_BATCH(1); /* GeneralStateBaseAddressModifyEnable */
/* SurfaceStateBaseAddress */
- OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
/* DynamicStateBaseAddress */
- OUT_RELOC(intel->batch.bo, (I915_GEM_DOMAIN_RENDER |
+ OUT_RELOC(brw->batch.bo, (I915_GEM_DOMAIN_RENDER |
I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
OUT_BATCH(1); /* IndirectObjectBaseAddress */
if (params->use_wm_prog) {
@@ -170,10 +168,10 @@ gen6_blorp_emit_vertices(struct brw_context *brw,
OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (batch_length - 2));
OUT_BATCH(dw0);
/* start address */
- OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_VERTEX, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_VERTEX, 0,
vertex_offset);
/* end address */
- OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_VERTEX, 0,
+ OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_VERTEX, 0,
vertex_offset + GEN6_BLORP_VBO_SIZE - 1);
OUT_BATCH(0);
ADVANCE_BATCH();
@@ -438,7 +436,7 @@ gen6_blorp_emit_surface_state(struct brw_context *brw,
BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
/* Emit relocation to surface contents */
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
wm_surf_offset + 4,
region->bo,
surf[1] - region->bo->offset,
diff --git a/src/mesa/drivers/dri/i965/gen6_queryobj.c b/src/mesa/drivers/dri/i965/gen6_queryobj.c
index 8a051b78a2e..a3af19e1535 100644
--- a/src/mesa/drivers/dri/i965/gen6_queryobj.c
+++ b/src/mesa/drivers/dri/i965/gen6_queryobj.c
@@ -166,7 +166,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
* still contributing to it, flush it now so the results will be present
* when mapped.
*/
- if (drm_intel_bo_references(intel->batch.bo, query->bo))
+ if (drm_intel_bo_references(brw->batch.bo, query->bo))
intel_batchbuffer_flush(brw);
if (unlikely(intel->perf_debug)) {
@@ -361,7 +361,6 @@ static void gen6_wait_query(struct gl_context *ctx, struct gl_query_object *q)
static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
/* From the GL_ARB_occlusion_query spec:
@@ -371,7 +370,7 @@ static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
* not ready yet on the first time it is queried. This ensures that
* the async query will return true in finite time.
*/
- if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
+ if (query->bo && drm_intel_bo_references(brw->batch.bo, query->bo))
intel_batchbuffer_flush(brw);
if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
diff --git a/src/mesa/drivers/dri/i965/gen7_blorp.cpp b/src/mesa/drivers/dri/i965/gen7_blorp.cpp
index c8135632dd7..2d52211a4c2 100644
--- a/src/mesa/drivers/dri/i965/gen7_blorp.cpp
+++ b/src/mesa/drivers/dri/i965/gen7_blorp.cpp
@@ -202,7 +202,7 @@ gen7_blorp_emit_surface_state(struct brw_context *brw,
}
/* Emit relocation to surface contents */
- drm_intel_bo_emit_reloc(intel->batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
wm_surf_offset + 4,
region->bo,
surf[1] - region->bo->offset,
diff --git a/src/mesa/drivers/dri/i965/gen7_sol_state.c b/src/mesa/drivers/dri/i965/gen7_sol_state.c
index 2a9d8f1985c..ff14c2f4a6b 100644
--- a/src/mesa/drivers/dri/i965/gen7_sol_state.c
+++ b/src/mesa/drivers/dri/i965/gen7_sol_state.c
@@ -258,10 +258,9 @@ gen7_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
struct gl_transform_feedback_object *obj)
{
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = &brw->intel;
intel_batchbuffer_flush(brw);
- intel->batch.needs_sol_reset = true;
+ brw->batch.needs_sol_reset = true;
}
void
diff --git a/src/mesa/drivers/dri/i965/gen7_wm_surface_state.c b/src/mesa/drivers/dri/i965/gen7_wm_surface_state.c
index c983abaa264..ee1fe4ba3e7 100644
--- a/src/mesa/drivers/dri/i965/gen7_wm_surface_state.c
+++ b/src/mesa/drivers/dri/i965/gen7_wm_surface_state.c
@@ -128,7 +128,7 @@ gen7_set_surface_mcs_info(struct brw_context *brw,
SET_FIELD(pitch_tiles - 1, GEN7_SURFACE_MCS_PITCH) |
mcs_mt->region->bo->offset;
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
surf_offset + 6 * 4,
mcs_mt->region->bo,
surf[6] & 0xfff,
@@ -264,7 +264,7 @@ gen7_update_buffer_texture_surface(struct gl_context *ctx,
* bspec ("Data Cache") says that the data cache does not exist as
* a separate cache and is just the sampler cache.
*/
- drm_intel_bo_emit_reloc(intel->batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
binding_table[surf_index] + 4,
bo, 0,
I915_GEM_DOMAIN_SAMPLER, 0);
@@ -367,7 +367,7 @@ gen7_update_texture_surface(struct gl_context *ctx,
}
/* Emit relocation to surface contents */
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
binding_table[surf_index] + 4,
intelObj->mt->region->bo,
surf[1] - intelObj->mt->region->bo->offset,
@@ -421,7 +421,7 @@ gen7_create_constant_surface(struct brw_context *brw,
* bspec ("Data Cache") says that the data cache does not exist as
* a separate cache and is just the sampler cache.
*/
- drm_intel_bo_emit_reloc(intel->batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
*out_offset + 4,
bo, offset,
I915_GEM_DOMAIN_SAMPLER, 0);
@@ -462,7 +462,7 @@ gen7_create_shader_time_surface(struct brw_context *brw, uint32_t *out_offset)
* bspec ("Data Cache") says that the data cache does not exist as
* a separate cache and is just the sampler cache.
*/
- drm_intel_bo_emit_reloc(intel->batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
*out_offset + 4,
brw->shader_time.bo, 0,
I915_GEM_DOMAIN_SAMPLER, 0);
@@ -619,7 +619,7 @@ gen7_update_renderbuffer_surface(struct brw_context *brw,
SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A));
}
- drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.surf_offset[unit] + 4,
region->bo,
surf[1] - region->bo->offset,
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index 70a1270b1be..8f032183cf4 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -44,8 +44,7 @@ struct cached_batch_item {
static void
clear_cache(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- struct cached_batch_item *item = intel->batch.cached_items;
+ struct cached_batch_item *item = brw->batch.cached_items;
while (item) {
struct cached_batch_item *next = item->next;
@@ -53,7 +52,7 @@ clear_cache(struct brw_context *brw)
item = next;
}
- intel->batch.cached_items = NULL;
+ brw->batch.cached_items = NULL;
}
void
@@ -67,14 +66,14 @@ intel_batchbuffer_init(struct brw_context *brw)
* the gen6 workaround because it involves actually writing to
* the buffer, and the kernel doesn't let us write to the batch.
*/
- intel->batch.workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
+ brw->batch.workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
"pipe_control workaround",
4096, 4096);
}
if (!intel->has_llc) {
- intel->batch.cpu_map = malloc(BATCH_SZ);
- intel->batch.map = intel->batch.cpu_map;
+ brw->batch.cpu_map = malloc(BATCH_SZ);
+ brw->batch.map = brw->batch.cpu_map;
}
}
@@ -82,43 +81,41 @@ static void
intel_batchbuffer_reset(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
- if (intel->batch.last_bo != NULL) {
- drm_intel_bo_unreference(intel->batch.last_bo);
- intel->batch.last_bo = NULL;
+ if (brw->batch.last_bo != NULL) {
+ drm_intel_bo_unreference(brw->batch.last_bo);
+ brw->batch.last_bo = NULL;
}
- intel->batch.last_bo = intel->batch.bo;
+ brw->batch.last_bo = brw->batch.bo;
clear_cache(brw);
- intel->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
+ brw->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
BATCH_SZ, 4096);
if (intel->has_llc) {
- drm_intel_bo_map(intel->batch.bo, true);
- intel->batch.map = intel->batch.bo->virtual;
+ drm_intel_bo_map(brw->batch.bo, true);
+ brw->batch.map = brw->batch.bo->virtual;
}
- intel->batch.reserved_space = BATCH_RESERVED;
- intel->batch.state_batch_offset = intel->batch.bo->size;
- intel->batch.used = 0;
- intel->batch.needs_sol_reset = false;
+ brw->batch.reserved_space = BATCH_RESERVED;
+ brw->batch.state_batch_offset = brw->batch.bo->size;
+ brw->batch.used = 0;
+ brw->batch.needs_sol_reset = false;
}
void
intel_batchbuffer_save_state(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- intel->batch.saved.used = intel->batch.used;
- intel->batch.saved.reloc_count =
- drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
+ brw->batch.saved.used = brw->batch.used;
+ brw->batch.saved.reloc_count =
+ drm_intel_gem_bo_get_reloc_count(brw->batch.bo);
}
void
intel_batchbuffer_reset_to_saved(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
+ drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
- intel->batch.used = intel->batch.saved.used;
+ brw->batch.used = brw->batch.saved.used;
/* Cached batch state is dead, since we just cleared some unknown part of the
* batchbuffer. Assume that the caller resets any other state necessary.
@@ -129,11 +126,10 @@ intel_batchbuffer_reset_to_saved(struct brw_context *brw)
void
intel_batchbuffer_free(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- free(intel->batch.cpu_map);
- drm_intel_bo_unreference(intel->batch.last_bo);
- drm_intel_bo_unreference(intel->batch.bo);
- drm_intel_bo_unreference(intel->batch.workaround_bo);
+ free(brw->batch.cpu_map);
+ drm_intel_bo_unreference(brw->batch.last_bo);
+ drm_intel_bo_unreference(brw->batch.bo);
+ drm_intel_bo_unreference(brw->batch.workaround_bo);
clear_cache(brw);
}
@@ -142,7 +138,7 @@ do_batch_dump(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
struct drm_intel_decode *decode;
- struct intel_batchbuffer *batch = &intel->batch;
+ struct intel_batchbuffer *batch = &brw->batch;
int ret;
decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID);
@@ -183,7 +179,7 @@ static int
do_flush_locked(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
- struct intel_batchbuffer *batch = &intel->batch;
+ struct intel_batchbuffer *batch = &brw->batch;
int ret = 0;
if (intel->has_llc) {
@@ -242,26 +238,26 @@ _intel_batchbuffer_flush(struct brw_context *brw,
struct intel_context *intel = &brw->intel;
int ret;
- if (intel->batch.used == 0)
+ if (brw->batch.used == 0)
return 0;
if (intel->first_post_swapbuffers_batch == NULL) {
- intel->first_post_swapbuffers_batch = intel->batch.bo;
+ intel->first_post_swapbuffers_batch = brw->batch.bo;
drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
- 4*intel->batch.used);
+ 4*brw->batch.used);
- intel->batch.reserved_space = 0;
+ brw->batch.reserved_space = 0;
if (brw->vtbl.finish_batch)
brw->vtbl.finish_batch(brw);
/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
- if (intel->batch.used & 1) {
+ if (brw->batch.used & 1) {
/* Round batchbuffer usage to 2 DWORDs. */
intel_batchbuffer_emit_dword(brw, MI_NOOP);
}
@@ -275,7 +271,7 @@ _intel_batchbuffer_flush(struct brw_context *brw,
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
- drm_intel_bo_wait_rendering(intel->batch.bo);
+ drm_intel_bo_wait_rendering(brw->batch.bo);
}
/* Reset the buffer:
@@ -294,10 +290,9 @@ intel_batchbuffer_emit_reloc(struct brw_context *brw,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
- struct intel_context *intel = &brw->intel;
int ret;
- ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
+ ret = drm_intel_bo_emit_reloc(brw->batch.bo, 4*brw->batch.used,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
@@ -320,10 +315,9 @@ intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
uint32_t write_domain,
uint32_t delta)
{
- struct intel_context *intel = &brw->intel;
int ret;
- ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
+ ret = drm_intel_bo_emit_reloc_fence(brw->batch.bo, 4*brw->batch.used,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
@@ -343,35 +337,33 @@ void
intel_batchbuffer_data(struct brw_context *brw,
const void *data, GLuint bytes, bool is_blit)
{
- struct intel_context *intel = &brw->intel;
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(brw, bytes, is_blit);
- __memcpy(intel->batch.map + intel->batch.used, data, bytes);
- intel->batch.used += bytes >> 2;
+ __memcpy(brw->batch.map + brw->batch.used, data, bytes);
+ brw->batch.used += bytes >> 2;
}
void
intel_batchbuffer_cached_advance(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- struct cached_batch_item **prev = &intel->batch.cached_items, *item;
- uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
- uint32_t *start = intel->batch.map + intel->batch.emit;
+ struct cached_batch_item **prev = &brw->batch.cached_items, *item;
+ uint32_t sz = (brw->batch.used - brw->batch.emit) * sizeof(uint32_t);
+ uint32_t *start = brw->batch.map + brw->batch.emit;
uint16_t op = *start >> 16;
while (*prev) {
uint32_t *old;
item = *prev;
- old = intel->batch.map + item->header;
+ old = brw->batch.map + item->header;
if (op == *old >> 16) {
if (item->size == sz && memcmp(old, start, sz) == 0) {
- if (prev != &intel->batch.cached_items) {
+ if (prev != &brw->batch.cached_items) {
*prev = item->next;
- item->next = intel->batch.cached_items;
- intel->batch.cached_items = item;
+ item->next = brw->batch.cached_items;
+ brw->batch.cached_items = item;
}
- intel->batch.used = intel->batch.emit;
+ brw->batch.used = brw->batch.emit;
return;
}
@@ -384,12 +376,12 @@ intel_batchbuffer_cached_advance(struct brw_context *brw)
if (item == NULL)
return;
- item->next = intel->batch.cached_items;
- intel->batch.cached_items = item;
+ item->next = brw->batch.cached_items;
+ brw->batch.cached_items = item;
emit:
item->size = sz;
- item->header = intel->batch.emit;
+ item->header = brw->batch.emit;
}
/**
@@ -449,7 +441,7 @@ gen7_emit_vs_workaround_flush(struct brw_context *brw)
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
- OUT_RELOC(intel->batch.workaround_bo,
+ OUT_RELOC(brw->batch.workaround_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
@@ -495,8 +487,7 @@ gen7_emit_vs_workaround_flush(struct brw_context *brw)
void
intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- if (!intel->batch.need_workaround_flush)
+ if (!brw->batch.need_workaround_flush)
return;
BEGIN_BATCH(4);
@@ -510,12 +501,12 @@ intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
- OUT_RELOC(intel->batch.workaround_bo,
+ OUT_RELOC(brw->batch.workaround_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
- intel->batch.need_workaround_flush = false;
+ brw->batch.need_workaround_flush = false;
}
/* Emit a pipelined flush to either flush render and texture cache for
@@ -529,7 +520,7 @@ intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
if (intel->gen >= 6) {
- if (intel->batch.is_blit) {
+ if (brw->batch.is_blit) {
BEGIN_BATCH_BLT(4);
OUT_BATCH(MI_FLUSH_DW);
OUT_BATCH(0);
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
index 48439513d58..4e73f61db0d 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
@@ -78,20 +78,18 @@ static INLINE uint32_t float_as_int(float f)
static INLINE unsigned
intel_batchbuffer_space(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- return (intel->batch.state_batch_offset - intel->batch.reserved_space)
- - intel->batch.used*4;
+ return (brw->batch.state_batch_offset - brw->batch.reserved_space)
+ - brw->batch.used*4;
}
static INLINE void
intel_batchbuffer_emit_dword(struct brw_context *brw, GLuint dword)
{
- struct intel_context *intel = &brw->intel;
#ifdef DEBUG
assert(intel_batchbuffer_space(brw) >= 4);
#endif
- intel->batch.map[intel->batch.used++] = dword;
+ brw->batch.map[brw->batch.used++] = dword;
}
static INLINE void
@@ -105,11 +103,11 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit)
{
struct intel_context *intel = &brw->intel;
if (intel->gen >= 6 &&
- intel->batch.is_blit != is_blit && intel->batch.used) {
+ brw->batch.is_blit != is_blit && brw->batch.used) {
intel_batchbuffer_flush(brw);
}
- intel->batch.is_blit = is_blit;
+ brw->batch.is_blit = is_blit;
#ifdef DEBUG
assert(sz < BATCH_SZ - BATCH_RESERVED);
@@ -121,12 +119,11 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit)
static INLINE void
intel_batchbuffer_begin(struct brw_context *brw, int n, bool is_blit)
{
- struct intel_context *intel = &brw->intel;
intel_batchbuffer_require_space(brw, n * 4, is_blit);
- intel->batch.emit = intel->batch.used;
+ brw->batch.emit = brw->batch.used;
#ifdef DEBUG
- intel->batch.total = n;
+ brw->batch.total = n;
#endif
}
@@ -134,8 +131,7 @@ static INLINE void
intel_batchbuffer_advance(struct brw_context *brw)
{
#ifdef DEBUG
- struct intel_context *intel = &brw->intel;
- struct intel_batchbuffer *batch = &intel->batch;
+ struct intel_batchbuffer *batch = &brw->batch;
unsigned int _n = batch->used - batch->emit;
assert(batch->total != 0);
if (_n != batch->total) {
diff --git a/src/mesa/drivers/dri/i965/intel_blit.c b/src/mesa/drivers/dri/i965/intel_blit.c
index 7d57d6e998d..3d2181de8b2 100644
--- a/src/mesa/drivers/dri/i965/intel_blit.c
+++ b/src/mesa/drivers/dri/i965/intel_blit.c
@@ -302,7 +302,7 @@ intelEmitCopyBlit(struct brw_context *brw,
/* do space check before going any further */
do {
- aper_array[0] = intel->batch.bo;
+ aper_array[0] = brw->batch.bo;
aper_array[1] = dst_buffer;
aper_array[2] = src_buffer;
@@ -537,7 +537,6 @@ intel_miptree_set_alpha_to_one(struct brw_context *brw,
struct intel_mipmap_tree *mt,
int x, int y, int width, int height)
{
- struct intel_context *intel = &brw->intel;
struct intel_region *region = mt->region;
uint32_t BR13, CMD;
int pitch, cpp;
@@ -561,7 +560,7 @@ intel_miptree_set_alpha_to_one(struct brw_context *brw,
BR13 |= pitch;
/* do space check before going any further */
- aper_array[0] = intel->batch.bo;
+ aper_array[0] = brw->batch.bo;
aper_array[1] = region->bo;
if (drm_intel_bufmgr_check_aperture_space(aper_array,
diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.c b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
index bec2c8fe177..aae9f9e0088 100644
--- a/src/mesa/drivers/dri/i965/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
@@ -173,7 +173,7 @@ intel_bufferobj_subdata(struct gl_context * ctx,
busy =
drm_intel_bo_busy(intel_obj->buffer) ||
- drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
+ drm_intel_bo_references(brw->batch.bo, intel_obj->buffer);
if (busy) {
if (size == intel_obj->Base.Size) {
@@ -214,10 +214,9 @@ intel_bufferobj_get_subdata(struct gl_context * ctx,
{
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = intel_context(ctx);
assert(intel_obj);
- if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
+ if (drm_intel_bo_references(brw->batch.bo, intel_obj->buffer)) {
intel_batchbuffer_flush(brw);
}
drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
@@ -273,7 +272,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
* achieve the required synchronization.
*/
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
- if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
+ if (drm_intel_bo_references(brw->batch.bo, intel_obj->buffer)) {
if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
drm_intel_bo_unreference(intel_obj->buffer);
intel_bufferobj_alloc_buffer(brw, intel_obj);
diff --git a/src/mesa/drivers/dri/i965/intel_context.c b/src/mesa/drivers/dri/i965/intel_context.c
index 9d95f765566..53ae742dfa7 100644
--- a/src/mesa/drivers/dri/i965/intel_context.c
+++ b/src/mesa/drivers/dri/i965/intel_context.c
@@ -341,9 +341,8 @@ void
_intel_flush(struct gl_context *ctx, const char *file, int line)
{
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = intel_context(ctx);
- if (intel->batch.used)
+ if (brw->batch.used)
_intel_batchbuffer_flush(brw, file, line);
}
@@ -362,13 +361,13 @@ intel_glFlush(struct gl_context *ctx)
void
intelFinish(struct gl_context * ctx)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
intel_flush(ctx);
intel_flush_front(ctx);
- if (intel->batch.last_bo)
- drm_intel_bo_wait_rendering(intel->batch.last_bo);
+ if (brw->batch.last_bo)
+ drm_intel_bo_wait_rendering(brw->batch.last_bo);
}
void
diff --git a/src/mesa/drivers/dri/i965/intel_context.h b/src/mesa/drivers/dri/i965/intel_context.h
index 4ca5081f9ab..b1798cf3598 100644
--- a/src/mesa/drivers/dri/i965/intel_context.h
+++ b/src/mesa/drivers/dri/i965/intel_context.h
@@ -129,8 +129,6 @@ struct intel_context
bool has_llc;
bool has_swizzling;
- struct intel_batchbuffer batch;
-
drm_intel_bo *first_post_swapbuffers_batch;
bool need_throttle;
bool no_batch_wrap;
diff --git a/src/mesa/drivers/dri/i965/intel_screen.c b/src/mesa/drivers/dri/i965/intel_screen.c
index c122c46221e..41090e8bc59 100644
--- a/src/mesa/drivers/dri/i965/intel_screen.c
+++ b/src/mesa/drivers/dri/i965/intel_screen.c
@@ -164,7 +164,7 @@ intelDRI2Flush(__DRIdrawable *drawable)
intel_resolve_for_dri2_flush(brw, drawable);
intel->need_throttle = true;
- if (intel->batch.used)
+ if (brw->batch.used)
intel_batchbuffer_flush(brw);
if (INTEL_DEBUG & DEBUG_AUB) {
diff --git a/src/mesa/drivers/dri/i965/intel_syncobj.c b/src/mesa/drivers/dri/i965/intel_syncobj.c
index b6dfc5b059e..8f075dd6f6a 100644
--- a/src/mesa/drivers/dri/i965/intel_syncobj.c
+++ b/src/mesa/drivers/dri/i965/intel_syncobj.c
@@ -69,13 +69,12 @@ intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLenum condition, GLbitfield flags)
{
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = intel_context(ctx);
struct intel_sync_object *sync = (struct intel_sync_object *)s;
assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
intel_batchbuffer_emit_mi_flush(brw);
- sync->bo = intel->batch.bo;
+ sync->bo = brw->batch.bo;
drm_intel_bo_reference(sync->bo);
intel_flush(ctx);
diff --git a/src/mesa/drivers/dri/i965/intel_tex_subimage.c b/src/mesa/drivers/dri/i965/intel_tex_subimage.c
index e96c29a1436..b5bf477848b 100644
--- a/src/mesa/drivers/dri/i965/intel_tex_subimage.c
+++ b/src/mesa/drivers/dri/i965/intel_tex_subimage.c
@@ -215,7 +215,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
bo = image->mt->region->bo;
- if (drm_intel_bo_references(intel->batch.bo, bo)) {
+ if (drm_intel_bo_references(brw->batch.bo, bo)) {
perf_debug("Flushing before mapping a referenced bo.\n");
intel_batchbuffer_flush(brw);
}