diff options
author | Kenneth Graunke <[email protected]> | 2013-07-06 00:36:46 -0700 |
---|---|---|
committer | Kenneth Graunke <[email protected]> | 2013-07-09 14:09:34 -0700 |
commit | 53631be4ebaa4fb13a7f129727c1cdd32fcc6f3d (patch) | |
tree | ddad922e67aee2521ea03acb27bcf38085d836c8 | |
parent | 2e26afb37b83effe44b218d5b2a305020b8ad22f (diff) |
i965: Move intel_context::gen and gt fields to brw_context.
Most functions no longer use intel_context, so this patch additionally
removes the local "intel" variables to avoid compiler warnings.
Signed-off-by: Kenneth Graunke <[email protected]>
Acked-by: Chris Forbes <[email protected]>
Acked-by: Paul Berry <[email protected]>
Acked-by: Anuj Phogat <[email protected]>
67 files changed, 483 insertions, 622 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_blorp.cpp b/src/mesa/drivers/dri/i965/brw_blorp.cpp index cba0ce4fe8d..20ea09e46f2 100644 --- a/src/mesa/drivers/dri/i965/brw_blorp.cpp +++ b/src/mesa/drivers/dri/i965/brw_blorp.cpp @@ -191,9 +191,7 @@ intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt, void brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params) { - struct intel_context *intel = &brw->intel; - - switch (intel->gen) { + switch (brw->gen) { case 6: gen6_blorp_exec(brw, params); break; diff --git a/src/mesa/drivers/dri/i965/brw_blorp_blit.cpp b/src/mesa/drivers/dri/i965/brw_blorp_blit.cpp index 28405927976..b56289c495b 100644 --- a/src/mesa/drivers/dri/i965/brw_blorp_blit.cpp +++ b/src/mesa/drivers/dri/i965/brw_blorp_blit.cpp @@ -359,7 +359,7 @@ brw_blorp_copytexsubimage(struct brw_context *brw, struct intel_mipmap_tree *dst_mt = intel_image->mt; /* BLORP is not supported before Gen6. */ - if (intel->gen < 6) + if (brw->gen < 6) return false; if (!color_formats_match(src_mt->format, dst_mt->format)) { @@ -435,10 +435,8 @@ brw_blorp_framebuffer(struct brw_context *brw, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter) { - struct intel_context *intel = &brw->intel; - /* BLORP is not supported before Gen6. */ - if (intel->gen < 6) + if (brw->gen < 6) return mask; static GLbitfield buffer_bits[] = { @@ -844,7 +842,7 @@ brw_blorp_blit_program::compile(struct brw_context *brw, * irrelevant, because we are going to fetch all samples. */ if (key->blend && !key->blit_scaled) { - if (brw->intel.gen == 6) { + if (brw->gen == 6) { /* Gen6 hardware an automatically blend using the SAMPLE message */ single_to_blend(); sample(texture_data[0]); @@ -1802,7 +1800,7 @@ brw_blorp_blit_program::texel_fetch(struct brw_reg dst) SAMPLER_MESSAGE_ARG_V_INT }; - switch (brw->intel.gen) { + switch (brw->gen) { case 6: texture_lookup(dst, GEN5_SAMPLER_MESSAGE_SAMPLE_LD, gen6_args, s_is_zero ? 2 : 5); @@ -2023,7 +2021,7 @@ compute_msaa_layout_for_pipeline(struct brw_context *brw, unsigned num_samples, } /* Prior to Gen7, all MSAA surfaces use IMS layout. */ - if (brw->intel.gen == 6) { + if (brw->gen == 6) { assert(true_layout == INTEL_MSAA_LAYOUT_IMS); } @@ -2078,7 +2076,7 @@ brw_blorp_blit_params::brw_blorp_blit_params(struct brw_context *brw, break; } - if (brw->intel.gen > 6) { + if (brw->gen > 6) { /* Gen7's rendering hardware only supports the IMS layout for depth and * stencil render targets. Blorp always maps its destination surface as * a color render target (even if it's actually a depth or stencil diff --git a/src/mesa/drivers/dri/i965/brw_clear.c b/src/mesa/drivers/dri/i965/brw_clear.c index b0a9fe74a28..216ab9e93a9 100644 --- a/src/mesa/drivers/dri/i965/brw_clear.c +++ b/src/mesa/drivers/dri/i965/brw_clear.c @@ -105,13 +105,12 @@ static bool brw_fast_clear_depth(struct gl_context *ctx) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); struct gl_framebuffer *fb = ctx->DrawBuffer; struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH); struct intel_mipmap_tree *mt = depth_irb->mt; - if (intel->gen < 6) + if (brw->gen < 6) return false; if (!intel_renderbuffer_has_hiz(depth_irb)) @@ -155,7 +154,7 @@ brw_fast_clear_depth(struct gl_context *ctx) * width of the map (LOD0) is not multiple of 16, fast clear * optimization must be disabled. */ - if (intel->gen == 6 && (mt->level[depth_irb->mt_level].width % 16) != 0) + if (brw->gen == 6 && (mt->level[depth_irb->mt_level].width % 16) != 0) return false; /* FALLTHROUGH */ @@ -184,7 +183,7 @@ brw_fast_clear_depth(struct gl_context *ctx) intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer, GEN6_HIZ_OP_DEPTH_CLEAR); - if (intel->gen == 6) { + if (brw->gen == 6) { /* From the Sandy Bridge PRM, volume 2 part 1, page 314: * * "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be followed @@ -231,7 +230,7 @@ brw_clear(struct gl_context *ctx, GLbitfield mask) } /* BLORP is currently only supported on Gen6+. */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { if (mask & BUFFER_BITS_COLOR) { if (brw_blorp_clear_color(brw, fb, partial_clear)) { debug_mask("blorp color", mask & BUFFER_BITS_COLOR); diff --git a/src/mesa/drivers/dri/i965/brw_clip.c b/src/mesa/drivers/dri/i965/brw_clip.c index cdc3b7a0e4d..56a8f7c093d 100644 --- a/src/mesa/drivers/dri/i965/brw_clip.c +++ b/src/mesa/drivers/dri/i965/brw_clip.c @@ -51,7 +51,6 @@ static void compile_clip_prog( struct brw_context *brw, struct brw_clip_prog_key *key ) { - struct intel_context *intel = &brw->intel; struct brw_clip_compile c; const GLuint *program; void *mem_ctx; @@ -117,7 +116,7 @@ static void compile_clip_prog( struct brw_context *brw, printf("clip:\n"); for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) brw_disasm(stdout, &((struct brw_instruction *)program)[i], - intel->gen); + brw->gen); printf("\n"); } @@ -153,7 +152,7 @@ brw_upload_clip_prog(struct brw_context *brw) /* _NEW_TRANSFORM (also part of VUE map)*/ key.nr_userclip = _mesa_bitcount_64(ctx->Transform.ClipPlanesEnabled); - if (intel->gen == 5) + if (brw->gen == 5) key.clip_mode = BRW_CLIPMODE_KERNEL_CLIP; else key.clip_mode = BRW_CLIPMODE_NORMAL; diff --git a/src/mesa/drivers/dri/i965/brw_clip_line.c b/src/mesa/drivers/dri/i965/brw_clip_line.c index f7c8d099a54..9ce80b83b9f 100644 --- a/src/mesa/drivers/dri/i965/brw_clip_line.c +++ b/src/mesa/drivers/dri/i965/brw_clip_line.c @@ -45,7 +45,7 @@ static void brw_clip_line_alloc_regs( struct brw_clip_compile *c ) { - struct intel_context *intel = &c->func.brw->intel; + struct brw_context *brw = c->func.brw; GLuint i = 0,j; /* Register usage is static, precompute here: @@ -85,7 +85,7 @@ static void brw_clip_line_alloc_regs( struct brw_clip_compile *c ) i++; } - if (intel->gen == 5) { + if (brw->gen == 5) { c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD); i++; } diff --git a/src/mesa/drivers/dri/i965/brw_clip_state.c b/src/mesa/drivers/dri/i965/brw_clip_state.c index e0cc6499dd3..96a82a80fcb 100644 --- a/src/mesa/drivers/dri/i965/brw_clip_state.c +++ b/src/mesa/drivers/dri/i965/brw_clip_state.c @@ -105,7 +105,7 @@ brw_upload_clip_unit(struct brw_context *brw) /* Although up to 16 concurrent Clip threads are allowed on Ironlake, * only 2 threads can output VUEs at a time. */ - if (intel->gen == 5) + if (brw->gen == 5) clip->thread4.max_threads = 16 - 1; else clip->thread4.max_threads = 2 - 1; diff --git a/src/mesa/drivers/dri/i965/brw_clip_tri.c b/src/mesa/drivers/dri/i965/brw_clip_tri.c index 72de3043476..bea08530ec0 100644 --- a/src/mesa/drivers/dri/i965/brw_clip_tri.c +++ b/src/mesa/drivers/dri/i965/brw_clip_tri.c @@ -50,7 +50,7 @@ static void release_tmps( struct brw_clip_compile *c ) void brw_clip_tri_alloc_regs( struct brw_clip_compile *c, GLuint nr_verts ) { - struct intel_context *intel = &c->func.brw->intel; + struct brw_context *brw = c->func.brw; GLuint i = 0,j; /* Register usage is static, precompute here: @@ -122,7 +122,7 @@ void brw_clip_tri_alloc_regs( struct brw_clip_compile *c, c->reg.vertex_src_mask = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD); i++; - if (intel->gen == 5) { + if (brw->gen == 5) { c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD); i++; } diff --git a/src/mesa/drivers/dri/i965/brw_clip_util.c b/src/mesa/drivers/dri/i965/brw_clip_util.c index 8d90017b046..37b77341648 100644 --- a/src/mesa/drivers/dri/i965/brw_clip_util.c +++ b/src/mesa/drivers/dri/i965/brw_clip_util.c @@ -362,11 +362,10 @@ void brw_clip_init_clipmask( struct brw_clip_compile *c ) void brw_clip_ff_sync(struct brw_clip_compile *c) { - struct intel_context *intel = &c->func.brw->intel; - - if (intel->gen == 5) { - struct brw_compile *p = &c->func; + struct brw_compile *p = &c->func; + struct brw_context *brw = p->brw; + if (brw->gen == 5) { brw_set_conditionalmod(p, BRW_CONDITIONAL_Z); brw_AND(p, brw_null_reg(), c->reg.ff_sync, brw_imm_ud(0x1)); brw_IF(p, BRW_EXECUTE_1); @@ -387,9 +386,9 @@ void brw_clip_ff_sync(struct brw_clip_compile *c) void brw_clip_init_ff_sync(struct brw_clip_compile *c) { - struct intel_context *intel = &c->func.brw->intel; + struct brw_context *brw = c->func.brw; - if (intel->gen == 5) { + if (brw->gen == 5) { struct brw_compile *p = &c->func; brw_MOV(p, c->reg.ff_sync, brw_imm_ud(0)); diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c index 90198bc3cdd..12f107c5ab2 100644 --- a/src/mesa/drivers/dri/i965/brw_context.c +++ b/src/mesa/drivers/dri/i965/brw_context.c @@ -62,11 +62,11 @@ static size_t brw_query_samples_for_format(struct gl_context *ctx, GLenum target, GLenum internalFormat, int samples[16]) { - struct intel_context *intel = intel_context(ctx); + struct brw_context *brw = brw_context(ctx); (void) target; - switch (intel->gen) { + switch (brw->gen) { case 7: samples[0] = 8; samples[1] = 4; @@ -136,7 +136,7 @@ brw_initialize_context_constants(struct brw_context *brw) ctx->Const.Max3DTextureLevels = 9; ctx->Const.MaxCubeTextureLevels = 12; - if (intel->gen >= 7) + if (brw->gen >= 7) ctx->Const.MaxArrayTextureLayers = 2048; else ctx->Const.MaxArrayTextureLayers = 512; @@ -167,12 +167,12 @@ brw_initialize_context_constants(struct brw_context *brw) ctx->Const.MaxTransformFeedbackSeparateComponents = BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS; - if (intel->gen == 6) { + if (brw->gen == 6) { ctx->Const.MaxSamples = 4; ctx->Const.MaxColorTextureSamples = 4; ctx->Const.MaxDepthTextureSamples = 4; ctx->Const.MaxIntegerSamples = 4; - } else if (intel->gen >= 7) { + } else if (brw->gen >= 7) { ctx->Const.MaxSamples = 8; ctx->Const.MaxColorTextureSamples = 8; ctx->Const.MaxDepthTextureSamples = 8; @@ -191,7 +191,7 @@ brw_initialize_context_constants(struct brw_context *brw) ctx->Const.MaxPointSizeAA = 255.0; ctx->Const.PointSizeGranularity = 1.0; - if (intel->gen >= 6) + if (brw->gen >= 6) ctx->Const.MaxClipPlanes = 8; ctx->Const.VertexProgram.MaxNativeInstructions = 16 * 1024; @@ -235,7 +235,7 @@ brw_initialize_context_constants(struct brw_context *brw) * that affect provoking vertex decision. Always use last vertex * convention for quad primitive which works as expected for now. */ - if (intel->gen >= 6) + if (brw->gen >= 6) ctx->Const.QuadsFollowProvokingVertexConvention = false; ctx->Const.NativeIntegers = true; @@ -250,7 +250,7 @@ brw_initialize_context_constants(struct brw_context *brw) /* We want the GLSL compiler to emit code that uses condition codes */ for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) { - ctx->ShaderCompilerOptions[i].MaxIfDepth = intel->gen < 6 ? 16 : UINT_MAX; + ctx->ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX; ctx->ShaderCompilerOptions[i].EmitCondCodes = true; ctx->ShaderCompilerOptions[i].EmitNoNoise = true; ctx->ShaderCompilerOptions[i].EmitNoMainReturn = true; @@ -291,7 +291,7 @@ brwCreateContext(int api, /* brwInitVtbl needs to know the chipset generation so that it can set the * right pointers. */ - brw->intel.gen = screen->gen; + brw->gen = screen->gen; brwInitVtbl( brw ); @@ -313,7 +313,7 @@ brwCreateContext(int api, /* Reinitialize the context point state. It depends on ctx->Const values. */ _mesa_init_point(ctx); - if (intel->gen >= 6) { + if (brw->gen >= 6) { /* Create a new hardware context. Using a hardware context means that * our GPU state will be saved/restored on context switch, allowing us * to assume that the GPU is in the same state we left it in. @@ -341,11 +341,11 @@ brwCreateContext(int api, ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD; ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER; - if (brw->is_g4x || intel->gen >= 5) { + if (brw->is_g4x || brw->gen >= 5) { brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS; brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45; brw->has_surface_tile_offset = true; - if (intel->gen < 6) + if (brw->gen < 6) brw->has_compr4 = true; brw->has_aa_line_parameters = true; brw->has_pln = true; @@ -355,37 +355,37 @@ brwCreateContext(int api, } /* WM maximum threads is number of EUs times number of threads per EU. */ - assert(intel->gen <= 7); + assert(brw->gen <= 7); if (brw->is_haswell) { - if (intel->gt == 1) { + if (brw->gt == 1) { brw->max_wm_threads = 102; brw->max_vs_threads = 70; brw->urb.size = 128; brw->urb.max_vs_entries = 640; brw->urb.max_gs_entries = 256; - } else if (intel->gt == 2) { + } else if (brw->gt == 2) { brw->max_wm_threads = 204; brw->max_vs_threads = 280; brw->urb.size = 256; brw->urb.max_vs_entries = 1664; brw->urb.max_gs_entries = 640; - } else if (intel->gt == 3) { + } else if (brw->gt == 3) { brw->max_wm_threads = 408; brw->max_vs_threads = 280; brw->urb.size = 512; brw->urb.max_vs_entries = 1664; brw->urb.max_gs_entries = 640; } - } else if (intel->gen == 7) { - if (intel->gt == 1) { + } else if (brw->gen == 7) { + if (brw->gt == 1) { brw->max_wm_threads = 48; brw->max_vs_threads = 36; brw->max_gs_threads = 36; brw->urb.size = 128; brw->urb.max_vs_entries = 512; brw->urb.max_gs_entries = 192; - } else if (intel->gt == 2) { + } else if (brw->gt == 2) { brw->max_wm_threads = 172; brw->max_vs_threads = 128; brw->max_gs_threads = 128; @@ -395,8 +395,8 @@ brwCreateContext(int api, } else { assert(!"Unknown gen7 device."); } - } else if (intel->gen == 6) { - if (intel->gt == 2) { + } else if (brw->gen == 6) { + if (brw->gt == 2) { brw->max_wm_threads = 80; brw->max_vs_threads = 60; brw->max_gs_threads = 60; @@ -412,7 +412,7 @@ brwCreateContext(int api, brw->urb.max_gs_entries = 256; } brw->urb.gen6_gs_previously_active = false; - } else if (intel->gen == 5) { + } else if (brw->gen == 5) { brw->urb.size = 1024; brw->max_vs_threads = 72; brw->max_gs_threads = 32; @@ -422,7 +422,7 @@ brwCreateContext(int api, brw->max_vs_threads = 32; brw->max_gs_threads = 2; brw->max_wm_threads = 10 * 5; - } else if (intel->gen < 6) { + } else if (brw->gen < 6) { brw->urb.size = 256; brw->max_vs_threads = 16; brw->max_gs_threads = 2; @@ -430,7 +430,7 @@ brwCreateContext(int api, brw->has_negative_rhw_bug = true; } - if (intel->gen <= 7) { + if (brw->gen <= 7) { brw->needs_unlit_centroid_workaround = true; } diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h index d5c60602e4f..4e436f7cde9 100644 --- a/src/mesa/drivers/dri/i965/brw_context.h +++ b/src/mesa/drivers/dri/i965/brw_context.h @@ -877,6 +877,9 @@ struct brw_context bool emit_state_always; + int gen; + int gt; + bool is_g4x; bool is_baytrail; bool is_haswell; @@ -1445,9 +1448,7 @@ static inline uint32_t brw_program_reloc(struct brw_context *brw, uint32_t state_offset, uint32_t prog_offset) { - struct intel_context *intel = &brw->intel; - - if (intel->gen >= 5) { + if (brw->gen >= 5) { /* Using state base address. */ return prog_offset; } diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c index 3651b93e161..ea28d9eaff1 100644 --- a/src/mesa/drivers/dri/i965/brw_draw.c +++ b/src/mesa/drivers/dri/i965/brw_draw.c @@ -160,7 +160,6 @@ static void brw_emit_prim(struct brw_context *brw, const struct _mesa_prim *prim, uint32_t hw_prim) { - struct intel_context *intel = &brw->intel; int verts_per_instance; int vertex_access_type; int start_vertex_location; @@ -181,7 +180,7 @@ static void brw_emit_prim(struct brw_context *brw, } /* We only need to trim the primitive count on pre-Gen6. */ - if (intel->gen < 6) + if (brw->gen < 6) verts_per_instance = trim(prim->mode, prim->count); else verts_per_instance = prim->count; @@ -363,7 +362,6 @@ static bool brw_try_draw_prims( struct gl_context *ctx, GLuint min_index, GLuint max_index ) { - struct intel_context *intel = intel_context(ctx); struct brw_context *brw = brw_context(ctx); bool retval = true; GLuint i; @@ -431,7 +429,7 @@ static bool brw_try_draw_prims( struct gl_context *ctx, brw->basevertex = prim->basevertex; brw->state.dirty.brw |= BRW_NEW_VERTICES; } - if (intel->gen < 6) + if (brw->gen < 6) brw_set_prim(brw, &prim[i]); else gen6_set_prim(brw, &prim[i]); @@ -447,7 +445,7 @@ retry: brw_upload_state(brw); } - if (intel->gen >= 7) + if (brw->gen >= 7) gen7_emit_prim(brw, &prim[i], brw->primitive); else brw_emit_prim(brw, &prim[i], brw->primitive); diff --git a/src/mesa/drivers/dri/i965/brw_draw_upload.c b/src/mesa/drivers/dri/i965/brw_draw_upload.c index 55b07b56f45..17a5629de91 100644 --- a/src/mesa/drivers/dri/i965/brw_draw_upload.c +++ b/src/mesa/drivers/dri/i965/brw_draw_upload.c @@ -226,7 +226,6 @@ static unsigned get_surface_type(struct brw_context *brw, const struct gl_client_array *glarray) { - struct intel_context *intel = &brw->intel; int size = glarray->Size; if (unlikely(INTEL_DEBUG & DEBUG_VERTS)) @@ -265,7 +264,7 @@ get_surface_type(struct brw_context *brw, return ubyte_types_norm[size]; } case GL_FIXED: - if (intel->gen >= 8 || brw->is_haswell) + if (brw->gen >= 8 || brw->is_haswell) return fixed_point_types[size]; /* This produces GL_FIXED inputs as values between INT32_MIN and @@ -279,7 +278,7 @@ get_surface_type(struct brw_context *brw, */ case GL_INT_2_10_10_10_REV: assert(size == 4); - if (intel->gen >= 8 || brw->is_haswell) { + if (brw->gen >= 8 || brw->is_haswell) { return glarray->Format == GL_BGRA ? BRW_SURFACEFORMAT_B10G10R10A2_SNORM : BRW_SURFACEFORMAT_R10G10B10A2_SNORM; @@ -287,7 +286,7 @@ get_surface_type(struct brw_context *brw, return BRW_SURFACEFORMAT_R10G10B10A2_UINT; case GL_UNSIGNED_INT_2_10_10_10_REV: assert(size == 4); - if (intel->gen >= 8 || brw->is_haswell) { + if (brw->gen >= 8 || brw->is_haswell) { return glarray->Format == GL_BGRA ? BRW_SURFACEFORMAT_B10G10R10A2_UNORM : BRW_SURFACEFORMAT_R10G10B10A2_UNORM; @@ -304,7 +303,7 @@ get_surface_type(struct brw_context *brw, */ if (glarray->Type == GL_INT_2_10_10_10_REV) { assert(size == 4); - if (intel->gen >= 8 || brw->is_haswell) { + if (brw->gen >= 8 || brw->is_haswell) { return glarray->Format == GL_BGRA ? BRW_SURFACEFORMAT_B10G10R10A2_SSCALED : BRW_SURFACEFORMAT_R10G10B10A2_SSCALED; @@ -312,7 +311,7 @@ get_surface_type(struct brw_context *brw, return BRW_SURFACEFORMAT_R10G10B10A2_UINT; } else if (glarray->Type == GL_UNSIGNED_INT_2_10_10_10_REV) { assert(size == 4); - if (intel->gen >= 8 || brw->is_haswell) { + if (brw->gen >= 8 || brw->is_haswell) { return glarray->Format == GL_BGRA ? BRW_SURFACEFORMAT_B10G10R10A2_USCALED : BRW_SURFACEFORMAT_R10G10B10A2_USCALED; @@ -331,7 +330,7 @@ get_surface_type(struct brw_context *brw, case GL_UNSIGNED_SHORT: return ushort_types_scale[size]; case GL_UNSIGNED_BYTE: return ubyte_types_scale[size]; case GL_FIXED: - if (intel->gen >= 8 || brw->is_haswell) + if (brw->gen >= 8 || brw->is_haswell) return fixed_point_types[size]; /* This produces GL_FIXED inputs as values between INT32_MIN and @@ -401,7 +400,6 @@ copy_array_to_vbo_array(struct brw_context *brw, static void brw_prepare_vertices(struct brw_context *brw) { struct gl_context *ctx = &brw->intel.ctx; - struct intel_context *intel = intel_context(ctx); /* CACHE_NEW_VS_PROG */ GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read; const unsigned char *ptr = NULL; @@ -420,7 +418,7 @@ static void brw_prepare_vertices(struct brw_context *brw) * is passed sideband through the fixed function units. So, we need to * prepare the vertex buffer for it, but it's not present in inputs_read. */ - if (intel->gen >= 6 && (ctx->Polygon.FrontMode != GL_FILL || + if (brw->gen >= 6 && (ctx->Polygon.FrontMode != GL_FILL || ctx->Polygon.BackMode != GL_FILL)) { vs_inputs |= VERT_BIT_EDGEFLAG; } @@ -592,8 +590,6 @@ static void brw_prepare_vertices(struct brw_context *brw) static void brw_emit_vertices(struct brw_context *brw) { - struct gl_context *ctx = &brw->intel.ctx; - struct intel_context *intel = intel_context(ctx); GLuint i, nr_elements; brw_prepare_vertices(brw); @@ -612,7 +608,7 @@ static void brw_emit_vertices(struct brw_context *brw) if (nr_elements == 0) { BEGIN_BATCH(3); OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1); - if (intel->gen >= 6) { + if (brw->gen >= 6) { OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) | GEN6_VE0_VALID | (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) | @@ -635,7 +631,7 @@ static void brw_emit_vertices(struct brw_context *brw) */ if (brw->vb.nr_buffers) { - if (intel->gen >= 6) { + if (brw->gen >= 6) { assert(brw->vb.nr_buffers <= 33); } else { assert(brw->vb.nr_buffers <= 17); @@ -647,7 +643,7 @@ static void brw_emit_vertices(struct brw_context *brw) struct brw_vertex_buffer *buffer = &brw->vb.buffers[i]; uint32_t dw0; - if (intel->gen >= 6) { + if (brw->gen >= 6) { dw0 = buffer->step_rate ? GEN6_VB0_ACCESS_INSTANCEDATA : GEN6_VB0_ACCESS_VERTEXDATA; @@ -659,12 +655,12 @@ static void brw_emit_vertices(struct brw_context *brw) dw0 |= i << BRW_VB0_INDEX_SHIFT; } - if (intel->gen >= 7) + if (brw->gen >= 7) dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE; OUT_BATCH(dw0 | (buffer->stride << BRW_VB0_PITCH_SHIFT)); OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset); - if (intel->gen >= 5) { + if (brw->gen >= 5) { OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->bo->size - 1); } else OUT_BATCH(0); @@ -676,7 +672,7 @@ static void brw_emit_vertices(struct brw_context *brw) /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably * for VertexID/InstanceID. */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { assert(nr_elements <= 34); } else { assert(nr_elements <= 18); @@ -705,7 +701,7 @@ static void brw_emit_vertices(struct brw_context *brw) * of in the VUE. We have to upload it sideband as the last vertex * element according to the B-Spec. */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { gen6_edgeflag_input = input; continue; } @@ -723,7 +719,7 @@ static void brw_emit_vertices(struct brw_context *brw) break; } - if (intel->gen >= 6) { + if (brw->gen >= 6) { OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) | GEN6_VE0_VALID | (format << BRW_VE0_FORMAT_SHIFT) | @@ -735,7 +731,7 @@ static void brw_emit_vertices(struct brw_context *brw) (input->offset << BRW_VE0_SRC_OFFSET_SHIFT)); } - if (intel->gen >= 5) + if (brw->gen >= 5) OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) | (comp1 << BRW_VE1_COMPONENT_1_SHIFT) | (comp2 << BRW_VE1_COMPONENT_2_SHIFT) | @@ -748,7 +744,7 @@ static void brw_emit_vertices(struct brw_context *brw) ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT)); } - if (intel->gen >= 6 && gen6_edgeflag_input) { + if (brw->gen >= 6 && gen6_edgeflag_input) { uint32_t format = get_surface_type(brw, gen6_edgeflag_input->glarray); OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) | @@ -770,7 +766,7 @@ static void brw_emit_vertices(struct brw_context *brw) (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) | (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT)); - if (intel->gen >= 6) { + if (brw->gen >= 6) { dw0 |= GEN6_VE0_VALID; } else { dw0 |= BRW_VE0_VALID; diff --git a/src/mesa/drivers/dri/i965/brw_eu.c b/src/mesa/drivers/dri/i965/brw_eu.c index bab56575db2..983aa4c4945 100644 --- a/src/mesa/drivers/dri/i965/brw_eu.c +++ b/src/mesa/drivers/dri/i965/brw_eu.c @@ -111,7 +111,7 @@ brw_set_compression_control(struct brw_compile *p, { p->compressed = (compression_control == BRW_COMPRESSION_COMPRESSED); - if (p->brw->intel.gen >= 6) { + if (p->brw->gen >= 6) { /* Since we don't use the 32-wide support in gen6, we translate * the pre-gen6 compression control here. */ @@ -154,7 +154,7 @@ void brw_set_saturate( struct brw_compile *p, bool enable ) void brw_set_acc_write_control(struct brw_compile *p, GLuint value) { - if (p->brw->intel.gen >= 6) + if (p->brw->gen >= 6) p->current->header.acc_wr_control = value; } @@ -260,6 +260,6 @@ brw_dump_compile(struct brw_compile *p, FILE *out, int start, int end) offset += 16; } - brw_disasm(stdout, insn, p->brw->intel.gen); + brw_disasm(stdout, insn, p->brw->gen); } } diff --git a/src/mesa/drivers/dri/i965/brw_eu_compact.c b/src/mesa/drivers/dri/i965/brw_eu_compact.c index c7ebf535df0..fa43444d5d1 100644 --- a/src/mesa/drivers/dri/i965/brw_eu_compact.c +++ b/src/mesa/drivers/dri/i965/brw_eu_compact.c @@ -330,7 +330,6 @@ set_control_index(struct brw_context *brw, struct brw_compact_instruction *dst, struct brw_instruction *src) { - struct intel_context *intel = &brw->intel; uint32_t *src_u32 = (uint32_t *)src; uint32_t uncompacted = 0; @@ -339,7 +338,7 @@ set_control_index(struct brw_context *brw, /* On gen7, the flag register number gets integrated into the control * index. */ - if (intel->gen >= 7) + if (brw->gen >= 7) uncompacted |= ((src_u32[2] >> 25) & 0x3) << 17; for (int i = 0; i < 32; i++) { @@ -450,7 +449,6 @@ brw_try_compact_instruction(struct brw_compile *p, struct brw_instruction *src) { struct brw_context *brw = p->brw; - struct intel_context *intel = &brw->intel; struct brw_compact_instruction temp; if (src->header.opcode == BRW_OPCODE_IF || @@ -482,7 +480,7 @@ brw_try_compact_instruction(struct brw_compile *p, return false; temp.dw0.acc_wr_control = src->header.acc_wr_control; temp.dw0.conditionalmod = src->header.destreg__conditionalmod; - if (intel->gen <= 6) + if (brw->gen <= 6) temp.dw0.flag_subreg_nr = src->bits2.da1.flag_subreg_nr; temp.dw0.cmpt_ctrl = 1; if (!set_src0_index(&temp, src)) @@ -503,14 +501,13 @@ set_uncompacted_control(struct brw_context *brw, struct brw_instruction *dst, struct brw_compact_instruction *src) { - struct intel_context *intel = &brw->intel; uint32_t *dst_u32 = (uint32_t *)dst; uint32_t uncompacted = control_index_table[src->dw0.control_index]; dst_u32[0] |= ((uncompacted >> 0) & 0xffff) << 8; dst_u32[0] |= ((uncompacted >> 16) & 0x1) << 31; - if (intel->gen >= 7) + if (brw->gen >= 7) dst_u32[2] |= ((uncompacted >> 17) & 0x3) << 25; } @@ -561,7 +558,6 @@ brw_uncompact_instruction(struct brw_context *brw, struct brw_instruction *dst, struct brw_compact_instruction *src) { - struct intel_context *intel = &brw->intel; memset(dst, 0, sizeof(*dst)); dst->header.opcode = src->dw0.opcode; @@ -572,7 +568,7 @@ brw_uncompact_instruction(struct brw_context *brw, set_uncompacted_subreg(dst, src); dst->header.acc_wr_control = src->dw0.acc_wr_control; dst->header.destreg__conditionalmod = src->dw0.conditionalmod; - if (intel->gen <= 6) + if (brw->gen <= 6) dst->bits2.da1.flag_subreg_nr = src->dw0.flag_subreg_nr; set_uncompacted_src0(dst, src); set_uncompacted_src1(dst, src); @@ -585,15 +581,14 @@ void brw_debug_compact_uncompact(struct brw_context *brw, struct brw_instruction *orig, struct brw_instruction *uncompacted) { - struct intel_context *intel = &brw->intel; fprintf(stderr, "Instruction compact/uncompact changed (gen%d):\n", - intel->gen); + brw->gen); fprintf(stderr, " before: "); - brw_disasm(stderr, orig, intel->gen); + brw_disasm(stderr, orig, brw->gen); fprintf(stderr, " after: "); - brw_disasm(stderr, uncompacted, intel->gen); + brw_disasm(stderr, uncompacted, brw->gen); uint32_t *before_bits = (uint32_t *)orig; uint32_t *after_bits = (uint32_t *)uncompacted; @@ -638,7 +633,6 @@ update_uip_jip(struct brw_instruction *insn, int this_old_ip, void brw_init_compaction_tables(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; assert(gen6_control_index_table[ARRAY_SIZE(gen6_control_index_table) - 1] != 0); assert(gen6_datatype_table[ARRAY_SIZE(gen6_datatype_table) - 1] != 0); assert(gen6_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0); @@ -648,7 +642,7 @@ brw_init_compaction_tables(struct brw_context *brw) assert(gen7_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0); assert(gen7_src_index_table[ARRAY_SIZE(gen6_src_index_table) - 1] != 0); - switch (intel->gen) { + switch (brw->gen) { case 7: control_index_table = gen7_control_index_table; datatype_table = gen7_datatype_table; @@ -670,7 +664,6 @@ void brw_compact_instructions(struct brw_compile *p) { struct brw_context *brw = p->brw; - struct intel_context *intel = &brw->intel; void *store = p->store; /* For an instruction at byte offset 8*i before compaction, this is the number * of compacted instructions that preceded it. @@ -681,7 +674,7 @@ brw_compact_instructions(struct brw_compile *p) */ int old_ip[p->next_insn_offset / 8]; - if (intel->gen < 6) + if (brw->gen < 6) return; int src_offset; @@ -759,7 +752,7 @@ brw_compact_instructions(struct brw_compile *p) case BRW_OPCODE_ELSE: case BRW_OPCODE_ENDIF: case BRW_OPCODE_WHILE: - if (intel->gen == 6) { + if (brw->gen == 6) { target_old_ip = this_old_ip + insn->bits1.branch_gen6.jump_count; target_compacted_count = compacted_counts[target_old_ip]; insn->bits1.branch_gen6.jump_count -= (target_compacted_count - diff --git a/src/mesa/drivers/dri/i965/brw_eu_emit.c b/src/mesa/drivers/dri/i965/brw_eu_emit.c index 41feaa99eb8..cecabc05134 100644 --- a/src/mesa/drivers/dri/i965/brw_eu_emit.c +++ b/src/mesa/drivers/dri/i965/brw_eu_emit.c @@ -63,8 +63,8 @@ gen6_resolve_implied_move(struct brw_compile *p, struct brw_reg *src, GLuint msg_reg_nr) { - struct intel_context *intel = &p->brw->intel; - if (intel->gen < 6) + struct brw_context *brw = p->brw; + if (brw->gen < 6) return; if (src->file == BRW_MESSAGE_REGISTER_FILE) @@ -92,8 +92,8 @@ gen7_convert_mrf_to_grf(struct brw_compile *p, struct brw_reg *reg) * Since we're pretending to have 16 MRFs anyway, we may as well use the * registers required for messages with EOT. */ - struct intel_context *intel = &p->brw->intel; - if (intel->gen == 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) { + struct brw_context *brw = p->brw; + if (brw->gen == 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) { reg->file = BRW_GENERAL_REGISTER_FILE; reg->nr += GEN7_MRF_HACK_START; } @@ -240,14 +240,13 @@ brw_set_src0(struct brw_compile *p, struct brw_instruction *insn, struct brw_reg reg) { struct brw_context *brw = p->brw; - struct intel_context *intel = &brw->intel; if (reg.type != BRW_ARCHITECTURE_REGISTER_FILE) assert(reg.nr < 128); gen7_convert_mrf_to_grf(p, ®); - if (intel->gen >= 6 && (insn->header.opcode == BRW_OPCODE_SEND || + if (brw->gen >= 6 && (insn->header.opcode == BRW_OPCODE_SEND || insn->header.opcode == BRW_OPCODE_SENDC)) { /* Any source modifiers or regions will be ignored, since this just * identifies the MRF/GRF to start reading the message contents from. @@ -416,17 +415,17 @@ brw_set_message_descriptor(struct brw_compile *p, bool header_present, bool end_of_thread) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; brw_set_src1(p, inst, brw_imm_d(0)); - if (intel->gen >= 5) { + if (brw->gen >= 5) { inst->bits3.generic_gen5.header_present = header_present; inst->bits3.generic_gen5.response_length = response_length; inst->bits3.generic_gen5.msg_length = msg_length; inst->bits3.generic_gen5.end_of_thread = end_of_thread; - if (intel->gen >= 6) { + if (brw->gen >= 6) { /* On Gen6+ Message target/SFID goes in bits 27:24 of the header */ inst->header.destreg__conditionalmod = sfid; } else { @@ -450,7 +449,6 @@ static void brw_set_math_message( struct brw_compile *p, GLuint dataType ) { struct brw_context *brw = p->brw; - struct intel_context *intel = &brw->intel; unsigned msg_length; unsigned response_length; @@ -481,7 +479,7 @@ static void brw_set_math_message( struct brw_compile *p, brw_set_message_descriptor(p, insn, BRW_SFID_MATH, msg_length, response_length, false, false); - if (intel->gen == 5) { + if (brw->gen == 5) { insn->bits3.math_gen5.function = function; insn->bits3.math_gen5.int_type = integer_type; insn->bits3.math_gen5.precision = low_precision; @@ -527,11 +525,10 @@ static void brw_set_urb_message( struct brw_compile *p, GLuint swizzle_control ) { struct brw_context *brw = p->brw; - struct intel_context *intel = &brw->intel; brw_set_message_descriptor(p, insn, BRW_SFID_URB, msg_length, response_length, true, end_of_thread); - if (intel->gen == 7) { + if (brw->gen == 7) { insn->bits3.urb_gen7.opcode = 0; /* URB_WRITE_HWORD */ insn->bits3.urb_gen7.offset = offset; assert(swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE); @@ -539,7 +536,7 @@ static void brw_set_urb_message( struct brw_compile *p, /* per_slot_offset = 0 makes it ignore offsets in message header */ insn->bits3.urb_gen7.per_slot_offset = 0; insn->bits3.urb_gen7.complete = complete; - } else if (intel->gen >= 5) { + } else if (brw->gen >= 5) { insn->bits3.urb_gen5.opcode = 0; /* URB_WRITE */ insn->bits3.urb_gen5.offset = offset; insn->bits3.urb_gen5.swizzle_control = swizzle_control; @@ -570,16 +567,15 @@ brw_set_dp_write_message(struct brw_compile *p, GLuint send_commit_msg) { struct brw_context *brw = p->brw; - struct intel_context *intel = &brw->intel; unsigned sfid; - if (intel->gen >= 7) { + if (brw->gen >= 7) { /* Use the Render Cache for RT writes; otherwise use the Data Cache */ if (msg_type == GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE) sfid = GEN6_SFID_DATAPORT_RENDER_CACHE; else sfid = GEN7_SFID_DATAPORT_DATA_CACHE; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { /* Use the render cache for all write messages. */ sfid = GEN6_SFID_DATAPORT_RENDER_CACHE; } else { @@ -589,18 +585,18 @@ brw_set_dp_write_message(struct brw_compile *p, brw_set_message_descriptor(p, insn, sfid, msg_length, response_length, header_present, end_of_thread); - if (intel->gen >= 7) { + if (brw->gen >= 7) { insn->bits3.gen7_dp.binding_table_index = binding_table_index; insn->bits3.gen7_dp.msg_control = msg_control; insn->bits3.gen7_dp.last_render_target = last_render_target; insn->bits3.gen7_dp.msg_type = msg_type; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { insn->bits3.gen6_dp.binding_table_index = binding_table_index; insn->bits3.gen6_dp.msg_control = msg_control; insn->bits3.gen6_dp.last_render_target = last_render_target; insn->bits3.gen6_dp.msg_type = msg_type; insn->bits3.gen6_dp.send_commit_msg = send_commit_msg; - } else if (intel->gen == 5) { + } else if (brw->gen == 5) { insn->bits3.dp_write_gen5.binding_table_index = binding_table_index; insn->bits3.dp_write_gen5.msg_control = msg_control; insn->bits3.dp_write_gen5.last_render_target = last_render_target; @@ -627,12 +623,11 @@ brw_set_dp_read_message(struct brw_compile *p, GLuint response_length) { struct brw_context *brw = p->brw; - struct intel_context *intel = &brw->intel; unsigned sfid; - if (intel->gen >= 7) { + if (brw->gen >= 7) { sfid = GEN7_SFID_DATAPORT_DATA_CACHE; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { if (target_cache == BRW_DATAPORT_READ_TARGET_RENDER_CACHE) sfid = GEN6_SFID_DATAPORT_RENDER_CACHE; else @@ -644,18 +639,18 @@ brw_set_dp_read_message(struct brw_compile *p, brw_set_message_descriptor(p, insn, sfid, msg_length, response_length, header_present, false); - if (intel->gen >= 7) { + if (brw->gen >= 7) { insn->bits3.gen7_dp.binding_table_index = binding_table_index; insn->bits3.gen7_dp.msg_control = msg_control; insn->bits3.gen7_dp.last_render_target = 0; insn->bits3.gen7_dp.msg_type = msg_type; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { insn->bits3.gen6_dp.binding_table_index = binding_table_index; insn->bits3.gen6_dp.msg_control = msg_control; insn->bits3.gen6_dp.last_render_target = 0; insn->bits3.gen6_dp.msg_type = msg_type; insn->bits3.gen6_dp.send_commit_msg = 0; - } else if (intel->gen == 5) { + } else if (brw->gen == 5) { insn->bits3.dp_read_gen5.binding_table_index = binding_table_index; insn->bits3.dp_read_gen5.msg_control = msg_control; insn->bits3.dp_read_gen5.msg_type = msg_type; @@ -686,17 +681,16 @@ brw_set_sampler_message(struct brw_compile *p, GLuint return_format) { struct brw_context *brw = p->brw; - struct intel_context *intel = &brw->intel; brw_set_message_descriptor(p, insn, BRW_SFID_SAMPLER, msg_length, response_length, header_present, false); - if (intel->gen >= 7) { + if (brw->gen >= 7) { insn->bits3.sampler_gen7.binding_table_index = binding_table_index; insn->bits3.sampler_gen7.sampler = sampler; insn->bits3.sampler_gen7.msg_type = msg_type; insn->bits3.sampler_gen7.simd_mode = simd_mode; - } else if (intel->gen >= 5) { + } else if (brw->gen >= 5) { insn->bits3.sampler_gen5.binding_table_index = binding_table_index; insn->bits3.sampler_gen5.sampler = sampler; insn->bits3.sampler_gen5.msg_type = msg_type; @@ -788,7 +782,7 @@ static struct brw_instruction *brw_alu3(struct brw_compile *p, struct brw_reg src1, struct brw_reg src2) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn = next_insn(p, opcode); gen7_convert_mrf_to_grf(p, &dest); @@ -839,7 +833,7 @@ static struct brw_instruction *brw_alu3(struct brw_compile *p, insn->bits1.da3src.src2_abs = src2.abs; insn->bits1.da3src.src2_negate = src2.negate; - if (intel->gen >= 7) { + if (brw->gen >= 7) { /* Set both the source and destination types based on dest.type, * ignoring the source register types. The MAD and LRP emitters ensure * that all four types are float. The BFE and BFI2 emitters, however, @@ -927,7 +921,7 @@ void brw_##OP(struct brw_compile *p, \ brw_set_dest(p, rnd, dest); \ brw_set_src0(p, rnd, src); \ \ - if (p->brw->intel.gen < 6) { \ + if (p->brw->gen < 6) { \ /* turn on round-increments */ \ rnd->header.destreg__conditionalmod = BRW_CONDITIONAL_R; \ add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \ @@ -1145,18 +1139,18 @@ get_inner_do_insn(struct brw_compile *p) struct brw_instruction * brw_IF(struct brw_compile *p, GLuint execute_size) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn; insn = next_insn(p, BRW_OPCODE_IF); /* Override the defaults for this instruction: */ - if (intel->gen < 6) { + if (brw->gen < 6) { brw_set_dest(p, insn, brw_ip_reg()); brw_set_src0(p, insn, brw_ip_reg()); brw_set_src1(p, insn, brw_imm_d(0x0)); - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { brw_set_dest(p, insn, brw_imm_w(0)); insn->bits1.branch_gen6.jump_count = 0; brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D))); @@ -1264,7 +1258,7 @@ patch_IF_ELSE(struct brw_compile *p, struct brw_instruction *else_inst, struct brw_instruction *endif_inst) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; /* We shouldn't be patching IF and ELSE instructions in single program flow * mode when gen < 6, because in single program flow mode on those @@ -1278,7 +1272,7 @@ patch_IF_ELSE(struct brw_compile *p, * instructions to conditional ADDs. So we do patch IF and ELSE * instructions in single program flow mode on those platforms. */ - if (intel->gen < 6) + if (brw->gen < 6) assert(!p->single_program_flow); assert(if_inst != NULL && if_inst->header.opcode == BRW_OPCODE_IF); @@ -1289,7 +1283,7 @@ patch_IF_ELSE(struct brw_compile *p, /* Jump count is for 64bit data chunk each, so one 128bit instruction * requires 2 chunks. */ - if (intel->gen >= 5) + if (brw->gen >= 5) br = 2; assert(endif_inst->header.opcode == BRW_OPCODE_ENDIF); @@ -1297,7 +1291,7 @@ patch_IF_ELSE(struct brw_compile *p, if (else_inst == NULL) { /* Patch IF -> ENDIF */ - if (intel->gen < 6) { + if (brw->gen < 6) { /* Turn it into an IFF, which means no mask stack operations for * all-false and jumping past the ENDIF. */ @@ -1305,7 +1299,7 @@ patch_IF_ELSE(struct brw_compile *p, if_inst->bits3.if_else.jump_count = br * (endif_inst - if_inst + 1); if_inst->bits3.if_else.pop_count = 0; if_inst->bits3.if_else.pad0 = 0; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { /* As of gen6, there is no IFF and IF must point to the ENDIF. */ if_inst->bits1.branch_gen6.jump_count = br * (endif_inst - if_inst); } else { @@ -1316,23 +1310,23 @@ patch_IF_ELSE(struct brw_compile *p, else_inst->header.execution_size = if_inst->header.execution_size; /* Patch IF -> ELSE */ - if (intel->gen < 6) { + if (brw->gen < 6) { if_inst->bits3.if_else.jump_count = br * (else_inst - if_inst); if_inst->bits3.if_else.pop_count = 0; if_inst->bits3.if_else.pad0 = 0; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { if_inst->bits1.branch_gen6.jump_count = br * (else_inst - if_inst + 1); } /* Patch ELSE -> ENDIF */ - if (intel->gen < 6) { + if (brw->gen < 6) { /* BRW_OPCODE_ELSE pre-gen6 should point just past the * matching ENDIF. */ else_inst->bits3.if_else.jump_count = br*(endif_inst - else_inst + 1); else_inst->bits3.if_else.pop_count = 1; else_inst->bits3.if_else.pad0 = 0; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */ else_inst->bits1.branch_gen6.jump_count = br*(endif_inst - else_inst); } else { @@ -1348,16 +1342,16 @@ patch_IF_ELSE(struct brw_compile *p, void brw_ELSE(struct brw_compile *p) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn; insn = next_insn(p, BRW_OPCODE_ELSE); - if (intel->gen < 6) { + if (brw->gen < 6) { brw_set_dest(p, insn, brw_ip_reg()); brw_set_src0(p, insn, brw_ip_reg()); brw_set_src1(p, insn, brw_imm_d(0x0)); - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { brw_set_dest(p, insn, brw_imm_w(0)); insn->bits1.branch_gen6.jump_count = 0; brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); @@ -1381,7 +1375,7 @@ brw_ELSE(struct brw_compile *p) void brw_ENDIF(struct brw_compile *p) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn = NULL; struct brw_instruction *else_inst = NULL; struct brw_instruction *if_inst = NULL; @@ -1400,7 +1394,7 @@ brw_ENDIF(struct brw_compile *p) * instructions to conditional ADDs. So we only do this trick on Gen4 and * Gen5. */ - if (intel->gen < 6 && p->single_program_flow) + if (brw->gen < 6 && p->single_program_flow) emit_endif = false; /* @@ -1426,11 +1420,11 @@ brw_ENDIF(struct brw_compile *p) return; } - if (intel->gen < 6) { + if (brw->gen < 6) { brw_set_dest(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD)); brw_set_src0(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD)); brw_set_src1(p, insn, brw_imm_d(0x0)); - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { brw_set_dest(p, insn, brw_imm_w(0)); brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); @@ -1445,11 +1439,11 @@ brw_ENDIF(struct brw_compile *p) insn->header.thread_control = BRW_THREAD_SWITCH; /* Also pop item off the stack in the endif instruction: */ - if (intel->gen < 6) { + if (brw->gen < 6) { insn->bits3.if_else.jump_count = 0; insn->bits3.if_else.pop_count = 1; insn->bits3.if_else.pad0 = 0; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { insn->bits1.branch_gen6.jump_count = 2; } else { insn->bits3.break_cont.jip = 2; @@ -1459,11 +1453,11 @@ brw_ENDIF(struct brw_compile *p) struct brw_instruction *brw_BREAK(struct brw_compile *p) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn; insn = next_insn(p, BRW_OPCODE_BREAK); - if (intel->gen >= 6) { + if (brw->gen >= 6) { brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); brw_set_src1(p, insn, brw_imm_d(0x0)); @@ -1547,9 +1541,9 @@ struct brw_instruction *gen6_HALT(struct brw_compile *p) */ struct brw_instruction *brw_DO(struct brw_compile *p, GLuint execute_size) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; - if (intel->gen >= 6 || p->single_program_flow) { + if (brw->gen >= 6 || p->single_program_flow) { push_loop_stack(p, &p->store[p->nr_insn]); return &p->store[p->nr_insn]; } else { @@ -1583,10 +1577,10 @@ struct brw_instruction *brw_DO(struct brw_compile *p, GLuint execute_size) static void brw_patch_break_cont(struct brw_compile *p, struct brw_instruction *while_inst) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *do_inst = get_inner_do_insn(p); struct brw_instruction *inst; - int br = (intel->gen == 5) ? 2 : 1; + int br = (brw->gen == 5) ? 2 : 1; for (inst = while_inst - 1; inst != do_inst; inst--) { /* If the jump count is != 0, that means that this instruction has already @@ -1605,14 +1599,14 @@ brw_patch_break_cont(struct brw_compile *p, struct brw_instruction *while_inst) struct brw_instruction *brw_WHILE(struct brw_compile *p) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn, *do_insn; GLuint br = 1; - if (intel->gen >= 5) + if (brw->gen >= 5) br = 2; - if (intel->gen >= 7) { + if (brw->gen >= 7) { insn = next_insn(p, BRW_OPCODE_WHILE); do_insn = get_inner_do_insn(p); @@ -1622,7 +1616,7 @@ struct brw_instruction *brw_WHILE(struct brw_compile *p) insn->bits3.break_cont.jip = br * (do_insn - insn); insn->header.execution_size = BRW_EXECUTE_8; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { insn = next_insn(p, BRW_OPCODE_WHILE); do_insn = get_inner_do_insn(p); @@ -1672,11 +1666,11 @@ struct brw_instruction *brw_WHILE(struct brw_compile *p) */ void brw_land_fwd_jump(struct brw_compile *p, int jmp_insn_idx) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *jmp_insn = &p->store[jmp_insn_idx]; GLuint jmpi = 1; - if (intel->gen >= 5) + if (brw->gen >= 5) jmpi = 2; assert(jmp_insn->header.opcode == BRW_OPCODE_JMPI); @@ -1697,7 +1691,7 @@ void brw_CMP(struct brw_compile *p, struct brw_reg src0, struct brw_reg src1) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn = next_insn(p, BRW_OPCODE_CMP); insn->header.destreg__conditionalmod = conditional; @@ -1725,7 +1719,7 @@ void brw_CMP(struct brw_compile *p, * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't * mentioned on their work-arounds pages. */ - if (intel->gen == 7) { + if (brw->gen == 7) { if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE && dest.nr == BRW_ARF_NULL) { insn->header.thread_control = BRW_THREAD_SWITCH; @@ -1763,21 +1757,21 @@ void brw_math( struct brw_compile *p, GLuint data_type, GLuint precision ) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; - if (intel->gen >= 6) { + if (brw->gen >= 6) { struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH); assert(dest.file == BRW_GENERAL_REGISTER_FILE || - (intel->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE)); + (brw->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE)); assert(src.file == BRW_GENERAL_REGISTER_FILE); assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1); - if (intel->gen == 6) + if (brw->gen == 6) assert(src.hstride == BRW_HORIZONTAL_STRIDE_1); /* Source modifiers are ignored for extended math instructions on Gen6. */ - if (intel->gen == 6) { + if (brw->gen == 6) { assert(!src.negate); assert(!src.abs); } @@ -1826,20 +1820,16 @@ void brw_math2(struct brw_compile *p, struct brw_reg src0, struct brw_reg src1) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH); - assert(intel->gen >= 6); - (void) intel; - - assert(dest.file == BRW_GENERAL_REGISTER_FILE || - (intel->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE)); + (brw->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE)); assert(src0.file == BRW_GENERAL_REGISTER_FILE); assert(src1.file == BRW_GENERAL_REGISTER_FILE); assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1); - if (intel->gen == 6) { + if (brw->gen == 6) { assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1); assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1); } @@ -1855,7 +1845,7 @@ void brw_math2(struct brw_compile *p, } /* Source modifiers are ignored for extended math instructions on Gen6. */ - if (intel->gen == 6) { + if (brw->gen == 6) { assert(!src0.negate); assert(!src0.abs); assert(!src1.negate); @@ -1885,11 +1875,11 @@ void brw_oword_block_write_scratch(struct brw_compile *p, int num_regs, GLuint offset) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; uint32_t msg_control, msg_type; int mlen; - if (intel->gen >= 6) + if (brw->gen >= 6) offset /= 16; mrf = retype(mrf, BRW_REGISTER_TYPE_UD); @@ -1948,7 +1938,7 @@ void brw_oword_block_write_scratch(struct brw_compile *p, * protection. Our use of DP writes is all about register * spilling within a thread. */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW); send_commit_msg = 0; } else { @@ -1957,13 +1947,13 @@ void brw_oword_block_write_scratch(struct brw_compile *p, } brw_set_dest(p, insn, dest); - if (intel->gen >= 6) { + if (brw->gen >= 6) { brw_set_src0(p, insn, mrf); } else { brw_set_src0(p, insn, brw_null_reg()); } - if (intel->gen >= 6) + if (brw->gen >= 6) msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE; else msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE; @@ -1997,11 +1987,11 @@ brw_oword_block_read_scratch(struct brw_compile *p, int num_regs, GLuint offset) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; uint32_t msg_control; int rlen; - if (intel->gen >= 6) + if (brw->gen >= 6) offset /= 16; mrf = retype(mrf, BRW_REGISTER_TYPE_UD); @@ -2040,7 +2030,7 @@ brw_oword_block_read_scratch(struct brw_compile *p, insn->header.destreg__conditionalmod = mrf.nr; brw_set_dest(p, insn, dest); /* UW? */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { brw_set_src0(p, insn, mrf); } else { brw_set_src0(p, insn, brw_null_reg()); @@ -2069,10 +2059,10 @@ void brw_oword_block_read(struct brw_compile *p, uint32_t offset, uint32_t bind_table_index) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; /* On newer hardware, offset is in units of owords. */ - if (intel->gen >= 6) + if (brw->gen >= 6) offset /= 16; mrf = retype(mrf, BRW_REGISTER_TYPE_UD); @@ -2098,7 +2088,7 @@ void brw_oword_block_read(struct brw_compile *p, dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW); brw_set_dest(p, insn, dest); - if (intel->gen >= 6) { + if (brw->gen >= 6) { brw_set_src0(p, insn, mrf); } else { brw_set_src0(p, insn, brw_null_reg()); @@ -2129,7 +2119,7 @@ void brw_fb_WRITE(struct brw_compile *p, bool eot, bool header_present) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn; GLuint msg_type; struct brw_reg dest; @@ -2139,7 +2129,7 @@ void brw_fb_WRITE(struct brw_compile *p, else dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW); - if (intel->gen >= 6) { + if (brw->gen >= 6) { insn = next_insn(p, BRW_OPCODE_SENDC); } else { insn = next_insn(p, BRW_OPCODE_SEND); @@ -2148,7 +2138,7 @@ void brw_fb_WRITE(struct brw_compile *p, insn->header.predicate_control = 0; insn->header.compression_control = BRW_COMPRESSION_NONE; - if (intel->gen >= 6) { + if (brw->gen >= 6) { /* headerless version, just submit color payload */ src0 = brw_message_reg(msg_reg_nr); @@ -2193,7 +2183,7 @@ void brw_SAMPLE(struct brw_compile *p, GLuint simd_mode, GLuint return_format) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn; gen6_resolve_implied_move(p, &src0, msg_reg_nr); @@ -2201,7 +2191,7 @@ void brw_SAMPLE(struct brw_compile *p, insn = next_insn(p, BRW_OPCODE_SEND); insn->header.predicate_control = 0; /* XXX */ insn->header.compression_control = BRW_COMPRESSION_NONE; - if (intel->gen < 6) + if (brw->gen < 6) insn->header.destreg__conditionalmod = msg_reg_nr; brw_set_dest(p, insn, dest); @@ -2234,12 +2224,12 @@ void brw_urb_WRITE(struct brw_compile *p, GLuint offset, GLuint swizzle) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn; gen6_resolve_implied_move(p, &src0, msg_reg_nr); - if (intel->gen == 7) { + if (brw->gen == 7) { /* Enable Channel Masks in the URB_WRITE_HWORD message header */ brw_push_insn_state(p); brw_set_access_mode(p, BRW_ALIGN_1); @@ -2259,7 +2249,7 @@ void brw_urb_WRITE(struct brw_compile *p, brw_set_src0(p, insn, src0); brw_set_src1(p, insn, brw_imm_d(0)); - if (intel->gen < 6) + if (brw->gen < 6) insn->header.destreg__conditionalmod = msg_reg_nr; brw_set_urb_message(p, @@ -2313,7 +2303,7 @@ brw_find_next_block_end(struct brw_compile *p, int start) static int brw_find_loop_end(struct brw_compile *p, int start) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; int ip; int scale = 8; void *store = p->store; @@ -2325,7 +2315,7 @@ brw_find_loop_end(struct brw_compile *p, int start) struct brw_instruction *insn = store + ip; if (insn->header.opcode == BRW_OPCODE_WHILE) { - int jip = intel->gen == 6 ? insn->bits1.branch_gen6.jump_count + int jip = brw->gen == 6 ? insn->bits1.branch_gen6.jump_count : insn->bits3.break_cont.jip; if (ip + jip * scale <= start) return ip; @@ -2341,12 +2331,12 @@ brw_find_loop_end(struct brw_compile *p, int start) void brw_set_uip_jip(struct brw_compile *p) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; int ip; int scale = 8; void *store = p->store; - if (intel->gen < 6) + if (brw->gen < 6) return; for (ip = 0; ip < p->next_insn_offset; ip = next_ip(p, ip)) { @@ -2368,7 +2358,7 @@ brw_set_uip_jip(struct brw_compile *p) /* Gen7 UIP points to WHILE; Gen6 points just after it */ insn->bits3.break_cont.uip = (brw_find_loop_end(p, ip) - ip + - (intel->gen == 6 ? 16 : 0)) / scale; + (brw->gen == 6 ? 16 : 0)) / scale; break; case BRW_OPCODE_CONTINUE: assert(block_end_ip != 0); @@ -2419,7 +2409,7 @@ void brw_ff_sync(struct brw_compile *p, GLuint response_length, bool eot) { - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_instruction *insn; gen6_resolve_implied_move(p, &src0, msg_reg_nr); @@ -2429,7 +2419,7 @@ void brw_ff_sync(struct brw_compile *p, brw_set_src0(p, insn, src0); brw_set_src1(p, insn, brw_imm_d(0)); - if (intel->gen < 6) + if (brw->gen < 6) insn->header.destreg__conditionalmod = msg_reg_nr; brw_set_ff_sync_message(p, @@ -2499,8 +2489,7 @@ void brw_shader_time_add(struct brw_compile *p, uint32_t surf_index) { struct brw_context *brw = p->brw; - struct intel_context *intel = &p->brw->intel; - assert(intel->gen >= 7); + assert(brw->gen >= 7); brw_push_insn_state(p); brw_set_access_mode(p, BRW_ALIGN_1); diff --git a/src/mesa/drivers/dri/i965/brw_fs.cpp b/src/mesa/drivers/dri/i965/brw_fs.cpp index 63e4b5a7ec8..afd29deaede 100644 --- a/src/mesa/drivers/dri/i965/brw_fs.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs.cpp @@ -193,7 +193,7 @@ fs_visitor::IF(uint32_t predicate) fs_inst * fs_visitor::IF(fs_reg src0, fs_reg src1, uint32_t condition) { - assert(intel->gen >= 6); + assert(brw->gen >= 6); fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF, reg_null_d, src0, src1); inst->conditional_mod = condition; @@ -222,7 +222,7 @@ fs_visitor::CMP(fs_reg dst, fs_reg src0, fs_reg src1, uint32_t condition) * mostly work out for float-interpreted-as-int since our comparisons are * for >0, =0, <0. */ - if (intel->gen == 4) { + if (brw->gen == 4) { dst.type = src0.type; if (dst.file == HW_REG) dst.fixed_hw_reg.type = dst.type; @@ -261,7 +261,7 @@ fs_visitor::VARYING_PULL_CONSTANT_LOAD(fs_reg dst, fs_reg surf_index, varying_offset, const_offset & ~3)); int scale = 1; - if (intel->gen == 4 && dispatch_width == 8) { + if (brw->gen == 4 && dispatch_width == 8) { /* Pre-gen5, we can either use a SIMD8 message that requires (header, * u, v, r) as parameters, or we can just use the SIMD16 message * consisting of (header, u). We choose the second, at the cost of a @@ -271,7 +271,7 @@ fs_visitor::VARYING_PULL_CONSTANT_LOAD(fs_reg dst, fs_reg surf_index, } enum opcode op; - if (intel->gen >= 7) + if (brw->gen >= 7) op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7; else op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD; @@ -280,10 +280,10 @@ fs_visitor::VARYING_PULL_CONSTANT_LOAD(fs_reg dst, fs_reg surf_index, inst->regs_written = 4 * scale; instructions.push_tail(inst); - if (intel->gen < 7) { + if (brw->gen < 7) { inst->base_mrf = 13; inst->header_present = true; - if (intel->gen == 4) + if (brw->gen == 4) inst->mlen = 3; else inst->mlen = 1 + dispatch_width / 8; @@ -357,7 +357,7 @@ fs_inst::is_send_from_grf() bool fs_visitor::can_do_source_mods(fs_inst *inst) { - if (intel->gen == 6 && inst->is_math()) + if (brw->gen == 6 && inst->is_math()) return false; if (inst->is_send_from_grf()) @@ -493,7 +493,7 @@ fs_visitor::type_size(const struct glsl_type *type) fs_reg fs_visitor::get_timestamp() { - assert(intel->gen >= 7); + assert(brw->gen >= 7); fs_reg ts = fs_reg(retype(brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE, BRW_ARF_TIMESTAMP, @@ -930,7 +930,7 @@ fs_visitor::emit_fragcoord_interpolation(ir_variable *ir) wpos.reg_offset++; /* gl_FragCoord.z */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { emit(MOV(wpos, fs_reg(brw_vec8_grf(c->source_depth_reg, 0)))); } else { emit(FS_OPCODE_LINTERP, wpos, @@ -952,7 +952,7 @@ fs_visitor::emit_linterp(const fs_reg &attr, const fs_reg &interp, bool is_centroid) { brw_wm_barycentric_interp_mode barycoord_mode; - if (intel->gen >= 6) { + if (brw->gen >= 6) { if (is_centroid) { if (interpolation_mode == INTERP_QUALIFIER_SMOOTH) barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC; @@ -1048,7 +1048,7 @@ fs_visitor::emit_general_interpolation(ir_variable *ir) inst->predicate = BRW_PREDICATE_NORMAL; inst->predicate_inverse = true; } - if (intel->gen < 6) { + if (brw->gen < 6) { emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w); } attr.reg_offset++; @@ -1068,7 +1068,7 @@ fs_visitor::emit_frontfacing_interpolation(ir_variable *ir) fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type); /* The frontfacing comes in as a bit in the thread payload. */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { emit(BRW_OPCODE_ASR, *reg, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)), fs_reg(15)); @@ -1097,14 +1097,14 @@ fs_visitor::fix_math_operand(fs_reg src) * The hardware ignores source modifiers (negate and abs) on math * instructions, so we also move to a temp to set those up. */ - if (intel->gen == 6 && src.file != UNIFORM && src.file != IMM && + if (brw->gen == 6 && src.file != UNIFORM && src.file != IMM && !src.abs && !src.negate) return src; /* Gen7 relaxes most of the above restrictions, but still can't use IMM * operands to math */ - if (intel->gen >= 7 && src.file != IMM) + if (brw->gen >= 7 && src.file != IMM) return src; fs_reg expanded = fs_reg(this, glsl_type::float_type); @@ -1138,12 +1138,12 @@ fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src) * Gen 6 hardware ignores source modifiers (negate and abs) on math * instructions, so we also move to a temp to set those up. */ - if (intel->gen >= 6) + if (brw->gen >= 6) src = fix_math_operand(src); fs_inst *inst = emit(opcode, dst, src); - if (intel->gen < 6) { + if (brw->gen < 6) { inst->base_mrf = 2; inst->mlen = dispatch_width / 8; } @@ -1160,7 +1160,7 @@ fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1) switch (opcode) { case SHADER_OPCODE_INT_QUOTIENT: case SHADER_OPCODE_INT_REMAINDER: - if (intel->gen >= 7 && dispatch_width == 16) + if (brw->gen >= 7 && dispatch_width == 16) fail("16-wide INTDIV unsupported\n"); break; case SHADER_OPCODE_POW: @@ -1170,7 +1170,7 @@ fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1) return NULL; } - if (intel->gen >= 6) { + if (brw->gen >= 6) { src0 = fix_math_operand(src0); src1 = fix_math_operand(src1); @@ -1235,7 +1235,7 @@ fs_visitor::calculate_urb_setup() int urb_next = 0; /* Figure out where each of the incoming setup attributes lands. */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) { if (fp->Base.InputsRead & BITFIELD64_BIT(i)) { urb_setup[i] = urb_next++; @@ -2248,7 +2248,7 @@ fs_visitor::compute_to_mrf() if (scan_inst->mlen) break; - if (intel->gen == 6) { + if (brw->gen == 6) { /* gen6 math instructions must have the destination be * GRF, so no compute-to-MRF for them. */ @@ -2599,7 +2599,7 @@ fs_visitor::insert_gen4_post_send_dependency_workarounds(fs_inst *inst) void fs_visitor::insert_gen4_send_dependency_workarounds() { - if (intel->gen != 4 || brw->is_g4x) + if (brw->gen != 4 || brw->is_g4x) return; /* Note that we're done with register allocation, so GRF fs_regs always @@ -2641,7 +2641,7 @@ fs_visitor::lower_uniform_pull_constant_loads() if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD) continue; - if (intel->gen >= 7) { + if (brw->gen >= 7) { /* The offset arg before was a vec4-aligned byte offset. We need to * turn it into a dword offset. */ @@ -2701,7 +2701,7 @@ fs_visitor::dump_instruction(backend_instruction *be_inst) if (inst->conditional_mod) { printf(".cmod"); if (!inst->predicate && - (intel->gen < 5 || (inst->opcode != BRW_OPCODE_SEL && + (brw->gen < 5 || (inst->opcode != BRW_OPCODE_SEL && inst->opcode != BRW_OPCODE_IF && inst->opcode != BRW_OPCODE_WHILE))) { printf(".f0.%d\n", inst->flag_subreg); @@ -2826,7 +2826,7 @@ fs_visitor::setup_payload_gen6() (fp->Base.InputsRead & (1 << VARYING_SLOT_POS)) != 0; unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes; - assert(intel->gen >= 6); + assert(brw->gen >= 6); /* R0-1: masks, pixel X/Y coordinates. */ c->nr_payload_regs = 2; @@ -2882,7 +2882,7 @@ fs_visitor::run() sanity_param_count = fp->Base.Parameters->NumParameters; uint32_t orig_nr_params = c->prog_data.nr_params; - if (intel->gen >= 6) + if (brw->gen >= 6) setup_payload_gen6(); else setup_payload_gen4(); @@ -2894,7 +2894,7 @@ fs_visitor::run() emit_shader_time_begin(); calculate_urb_setup(); - if (intel->gen < 6) + if (brw->gen < 6) emit_interpolation_setup_gen4(); else emit_interpolation_setup_gen6(); @@ -3016,7 +3016,6 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c, struct gl_shader_program *prog, unsigned *final_assembly_size) { - struct intel_context *intel = &brw->intel; bool start_busy = false; float start_time = 0; @@ -3060,7 +3059,7 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c, exec_list *simd16_instructions = NULL; fs_visitor v2(brw, c, prog, fp, 16); bool no16 = INTEL_DEBUG & DEBUG_NO16; - if (intel->gen >= 5 && c->prog_data.nr_pull_params == 0 && likely(!no16)) { + if (brw->gen >= 5 && c->prog_data.nr_pull_params == 0 && likely(!no16)) { v2.import_uniforms(&v); if (!v2.run()) { perf_debug("16-wide shader failed to compile, falling back to " @@ -3095,7 +3094,6 @@ bool brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = &brw->intel; struct brw_wm_prog_key key; if (!prog->_LinkedShaders[MESA_SHADER_FRAGMENT]) @@ -3108,7 +3106,7 @@ brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog) memset(&key, 0, sizeof(key)); - if (intel->gen < 6) { + if (brw->gen < 6) { if (fp->UsesKill) key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT; @@ -3120,14 +3118,14 @@ brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog) key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT; } - if (intel->gen < 6) + if (brw->gen < 6) key.input_slots_valid |= BITFIELD64_BIT(VARYING_SLOT_POS); for (int i = 0; i < VARYING_SLOT_MAX; i++) { if (!(fp->Base.InputsRead & BITFIELD64_BIT(i))) continue; - if (intel->gen < 6) { + if (brw->gen < 6) { if (_mesa_varying_slot_in_fs((gl_varying_slot) i)) key.input_slots_valid |= BITFIELD64_BIT(i); } diff --git a/src/mesa/drivers/dri/i965/brw_fs_emit.cpp b/src/mesa/drivers/dri/i965/brw_fs_emit.cpp index 5fe231df2b4..9851a69ad65 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_emit.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_emit.cpp @@ -62,7 +62,7 @@ fs_generator::~fs_generator() void fs_generator::patch_discard_jumps_to_fb_writes() { - if (intel->gen < 6 || this->discard_halt_patches.is_empty()) + if (brw->gen < 6 || this->discard_halt_patches.is_empty()) return; /* There is a somewhat strange undocumented requirement of using @@ -111,7 +111,7 @@ fs_generator::generate_fb_write(fs_inst *inst) if (fp->UsesKill) { struct brw_reg pixel_mask; - if (intel->gen >= 6) + if (brw->gen >= 6) pixel_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW); else pixel_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW); @@ -120,7 +120,7 @@ fs_generator::generate_fb_write(fs_inst *inst) } if (inst->header_present) { - if (intel->gen >= 6) { + if (brw->gen >= 6) { brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED); brw_MOV(p, retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD), @@ -222,7 +222,7 @@ fs_generator::generate_linterp(fs_inst *inst, if (brw->has_pln && delta_y.nr == delta_x.nr + 1 && - (intel->gen >= 6 || (delta_x.nr & 1) == 0)) { + (brw->gen >= 6 || (delta_x.nr & 1) == 0)) { brw_PLN(p, dst, interp, delta_x); } else { brw_LINE(p, brw_null_reg(), interp, delta_x); @@ -374,7 +374,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src if (dispatch_width == 16) simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16; - if (intel->gen >= 5) { + if (brw->gen >= 5) { switch (inst->opcode) { case SHADER_OPCODE_TEX: if (inst->shadow_compare) { @@ -413,7 +413,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; break; case SHADER_OPCODE_TXF_MS: - if (intel->gen >= 7) + if (brw->gen >= 7) msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS; else msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; @@ -596,7 +596,7 @@ fs_generator::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src void fs_generator::generate_discard_jump(fs_inst *inst) { - assert(intel->gen >= 6); + assert(brw->gen >= 6); /* This HALT will be patched up at FB write time to point UIP at the end of * the program, and at brw_uip_jip() JIP will be set to the end of the @@ -697,7 +697,7 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst, struct brw_reg index, struct brw_reg offset) { - assert(intel->gen < 7); /* Should use the gen7 variant. */ + assert(brw->gen < 7); /* Should use the gen7 variant. */ assert(inst->header_present); assert(inst->mlen); @@ -714,7 +714,7 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst, rlen = 4; } - if (intel->gen >= 5) + if (brw->gen >= 5) msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; else { /* We always use the SIMD16 message so that we only have to load U, and @@ -738,7 +738,7 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst, send->header.compression_control = BRW_COMPRESSION_NONE; brw_set_dest(p, send, dst); brw_set_src0(p, send, header); - if (intel->gen < 6) + if (brw->gen < 6) send->header.destreg__conditionalmod = inst->base_mrf; /* Our surface is set up as floats, regardless of what actual data is @@ -762,7 +762,7 @@ fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst, struct brw_reg index, struct brw_reg offset) { - assert(intel->gen >= 7); + assert(brw->gen >= 7); /* Varying-offset pull constant loads are treated as a normal expression on * gen7, so the fact that it's a send message is hidden at the IR level. */ @@ -810,7 +810,7 @@ fs_generator::generate_mov_dispatch_to_flags(fs_inst *inst) struct brw_reg flags = brw_flag_reg(0, inst->flag_subreg); struct brw_reg dispatch_mask; - if (intel->gen >= 6) + if (brw->gen >= 6) dispatch_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW); else dispatch_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW); @@ -946,7 +946,7 @@ fs_generator::generate_pack_half_2x16_split(fs_inst *inst, struct brw_reg x, struct brw_reg y) { - assert(intel->gen >= 7); + assert(brw->gen >= 7); assert(dst.type == BRW_REGISTER_TYPE_UD); assert(x.type == BRW_REGISTER_TYPE_F); assert(y.type == BRW_REGISTER_TYPE_F); @@ -984,7 +984,7 @@ fs_generator::generate_unpack_half_2x16_split(fs_inst *inst, struct brw_reg dst, struct brw_reg src) { - assert(intel->gen >= 7); + assert(brw->gen >= 7); assert(dst.type == BRW_REGISTER_TYPE_F); assert(src.type == BRW_REGISTER_TYPE_UD); @@ -1014,7 +1014,7 @@ fs_generator::generate_shader_time_add(fs_inst *inst, struct brw_reg offset, struct brw_reg value) { - assert(intel->gen >= 7); + assert(brw->gen >= 7); brw_push_insn_state(p); brw_set_mask_control(p, true); @@ -1281,7 +1281,7 @@ fs_generator::generate_code(exec_list *instructions) case BRW_OPCODE_IF: if (inst->src[0].file != BAD_FILE) { /* The instruction has an embedded compare (only allowed on gen6) */ - assert(intel->gen == 6); + assert(brw->gen == 6); gen6_IF(p, inst->conditional_mod, src[0], src[1]); } else { brw_IF(p, dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8); @@ -1305,7 +1305,7 @@ fs_generator::generate_code(exec_list *instructions) break; case BRW_OPCODE_CONTINUE: /* FINISHME: We need to write the loop instruction support still. */ - if (intel->gen >= 6) + if (brw->gen >= 6) gen6_CONT(p); else brw_CONT(p); @@ -1323,11 +1323,11 @@ fs_generator::generate_code(exec_list *instructions) case SHADER_OPCODE_LOG2: case SHADER_OPCODE_SIN: case SHADER_OPCODE_COS: - if (intel->gen >= 7) { + if (brw->gen >= 7) { generate_math1_gen7(inst, dst, src[0]); - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { generate_math1_gen6(inst, dst, src[0]); - } else if (intel->gen == 5 || brw->is_g4x) { + } else if (brw->gen == 5 || brw->is_g4x) { generate_math_g45(inst, dst, src[0]); } else { generate_math_gen4(inst, dst, src[0]); @@ -1336,9 +1336,9 @@ fs_generator::generate_code(exec_list *instructions) case SHADER_OPCODE_INT_QUOTIENT: case SHADER_OPCODE_INT_REMAINDER: case SHADER_OPCODE_POW: - if (intel->gen >= 7) { + if (brw->gen >= 7) { generate_math2_gen7(inst, dst, src[0], src[1]); - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { generate_math2_gen6(inst, dst, src[0], src[1]); } else { generate_math_gen4(inst, dst, src[0]); diff --git a/src/mesa/drivers/dri/i965/brw_fs_fp.cpp b/src/mesa/drivers/dri/i965/brw_fs_fp.cpp index 5f92955bc7b..68531e3b2fa 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_fp.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_fp.cpp @@ -500,9 +500,9 @@ fs_visitor::emit_fragment_program_code() } fs_inst *inst; - if (intel->gen >= 7) { + if (brw->gen >= 7) { inst = emit_texture_gen7(ir, dst, coordinate, shadow_c, lod, dpdy, sample_index); - } else if (intel->gen >= 5) { + } else if (brw->gen >= 5) { inst = emit_texture_gen5(ir, dst, coordinate, shadow_c, lod, dpdy, sample_index); } else { inst = emit_texture_gen4(ir, dst, coordinate, shadow_c, lod, dpdy); diff --git a/src/mesa/drivers/dri/i965/brw_fs_reg_allocate.cpp b/src/mesa/drivers/dri/i965/brw_fs_reg_allocate.cpp index 33d4dcef4f5..b9102d97e1f 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_reg_allocate.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_reg_allocate.cpp @@ -73,7 +73,6 @@ fs_visitor::assign_regs_trivial() static void brw_alloc_reg_set(struct brw_context *brw, int reg_width) { - struct intel_context *intel = &brw->intel; int base_reg_count = BRW_MAX_GRF / reg_width; int index = reg_width - 1; @@ -107,7 +106,7 @@ brw_alloc_reg_set(struct brw_context *brw, int reg_width) uint8_t *ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count); struct ra_regs *regs = ra_alloc_reg_set(brw, ra_reg_count); - if (intel->gen >= 6) + if (brw->gen >= 6) ra_set_allocate_round_robin(regs); int *classes = ralloc_array(brw, int, class_count); int aligned_pairs_class = -1; @@ -147,7 +146,7 @@ brw_alloc_reg_set(struct brw_context *brw, int reg_width) /* Add a special class for aligned pairs, which we'll put delta_x/y * in on gen5 so that we can do PLN. */ - if (brw->has_pln && reg_width == 1 && intel->gen < 6) { + if (brw->has_pln && reg_width == 1 && brw->gen < 6) { aligned_pairs_class = ra_alloc_reg_class(regs); for (int i = 0; i < pairs_reg_count; i++) { @@ -285,7 +284,7 @@ fs_visitor::setup_payload_interference(struct ra_graph *g, * two in the arguments (1 node). Pre-gen6, the deltas are computed * in normal VGRFs. */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { int delta_x_arg = 0; if (inst->src[delta_x_arg].file == HW_REG && inst->src[delta_x_arg].fixed_hw_reg.file == @@ -406,7 +405,7 @@ fs_visitor::assign_regs() int first_payload_node = node_count; node_count += payload_node_count; int first_mrf_hack_node = node_count; - if (intel->gen >= 7) + if (brw->gen >= 7) node_count += BRW_MAX_GRF - GEN7_MRF_HACK_START; struct ra_graph *g = ra_alloc_interference_graph(brw->wm.reg_sets[rsi].regs, node_count); @@ -448,7 +447,7 @@ fs_visitor::assign_regs() } setup_payload_interference(g, payload_node_count, first_payload_node); - if (intel->gen >= 7) + if (brw->gen >= 7) setup_mrf_hack_interference(g, first_mrf_hack_node); if (!ra_allocate_no_spills(g)) { diff --git a/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp b/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp index 3ba4ec84bbf..387a91a58ed 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp @@ -200,7 +200,7 @@ fs_visitor::visit(ir_dereference_array *ir) void fs_visitor::emit_lrp(fs_reg dst, fs_reg x, fs_reg y, fs_reg a) { - if (intel->gen < 6 || + if (brw->gen < 6 || !x.is_valid_3src() || !y.is_valid_3src() || !a.is_valid_3src()) { @@ -230,7 +230,7 @@ fs_visitor::emit_minmax(uint32_t conditionalmod, fs_reg dst, { fs_inst *inst; - if (intel->gen >= 6) { + if (brw->gen >= 6) { inst = emit(BRW_OPCODE_SEL, dst, src0, src1); inst->conditional_mod = conditionalmod; } else { @@ -280,7 +280,7 @@ bool fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg) { /* 3-src instructions were introduced in gen6. */ - if (intel->gen < 6) + if (brw->gen < 6) return false; /* MAD can only handle floating-point data. */ @@ -429,7 +429,7 @@ fs_visitor::visit(ir_expression *ir) * FINISHME: Emit just the MUL if we know an operand is small * enough. */ - if (intel->gen >= 7 && dispatch_width == 16) + if (brw->gen >= 7 && dispatch_width == 16) fail("16-wide explicit accumulator operands unsupported\n"); struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); @@ -1321,8 +1321,8 @@ fs_visitor::rescale_texcoord(ir_texture *ir, fs_reg coordinate, * tracking to get the scaling factor. */ if (is_rect && - (intel->gen < 6 || - (intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) || + (brw->gen < 6 || + (brw->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) || c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) { struct gl_program_parameter_list *params = fp->Base.Parameters; int tokens[STATE_LENGTH] = { @@ -1353,7 +1353,7 @@ fs_visitor::rescale_texcoord(ir_texture *ir, fs_reg coordinate, * texture coordinates. We use the program parameter state * tracking to get the scaling factor. */ - if (intel->gen < 6 && is_rect) { + if (brw->gen < 6 && is_rect) { fs_reg dst = fs_reg(this, ir->coordinate->type); fs_reg src = coordinate; coordinate = dst; @@ -1478,10 +1478,10 @@ fs_visitor::visit(ir_texture *ir) */ fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1)); - if (intel->gen >= 7) { + if (brw->gen >= 7) { inst = emit_texture_gen7(ir, dst, coordinate, shadow_comparitor, lod, lod2, sample_index); - } else if (intel->gen >= 5) { + } else if (brw->gen >= 5) { inst = emit_texture_gen5(ir, dst, coordinate, shadow_comparitor, lod, lod2, sample_index); } else { @@ -1607,7 +1607,7 @@ fs_visitor::visit(ir_discard *ir) cmp->predicate = BRW_PREDICATE_NORMAL; cmp->flag_subreg = 1; - if (intel->gen >= 6) { + if (brw->gen >= 6) { /* For performance, after a discard, jump to the end of the shader. * However, many people will do foliage by discarding based on a * texture's alpha mask, and then continue on to texture with the @@ -1722,7 +1722,7 @@ fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir) goto out; case ir_unop_f2b: - if (intel->gen >= 6) { + if (brw->gen >= 6) { emit(CMP(reg_null_d, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ)); } else { inst = emit(MOV(reg_null_f, op[0])); @@ -1731,7 +1731,7 @@ fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir) break; case ir_unop_i2b: - if (intel->gen >= 6) { + if (brw->gen >= 6) { emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_NZ)); } else { inst = emit(MOV(reg_null_d, op[0])); @@ -1841,7 +1841,7 @@ fs_visitor::emit_if_gen6(ir_if *ir) void fs_visitor::visit(ir_if *ir) { - if (intel->gen < 6 && dispatch_width == 16) { + if (brw->gen < 6 && dispatch_width == 16) { fail("Can't support (non-uniform) control flow on 16-wide\n"); } @@ -1850,7 +1850,7 @@ fs_visitor::visit(ir_if *ir) */ this->base_ir = ir->condition; - if (intel->gen == 6) { + if (brw->gen == 6) { emit_if_gen6(ir); } else { emit_bool_to_cond_code(ir->condition); @@ -1884,7 +1884,7 @@ fs_visitor::visit(ir_loop *ir) { fs_reg counter = reg_undef; - if (intel->gen < 6 && dispatch_width == 16) { + if (brw->gen < 6 && dispatch_width == 16) { fail("Can't support (non-uniform) control flow on 16-wide\n"); } @@ -2158,7 +2158,7 @@ fs_visitor::emit_color_write(int target, int index, int first_color_mrf) color.reg_offset += index; - if (dispatch_width == 8 || intel->gen >= 6) { + if (dispatch_width == 8 || brw->gen >= 6) { /* SIMD8 write looks like: * m + 0: r0 * m + 1: r1 @@ -2244,7 +2244,7 @@ fs_visitor::emit_fb_writes() * dispatched. This field is only required for the end-of- * thread message and on all dual-source messages." */ - if (intel->gen >= 6 && + if (brw->gen >= 6 && !this->fp->UsesKill && !do_dual_src && c->key.nr_color_regions == 1) { @@ -2252,7 +2252,7 @@ fs_visitor::emit_fb_writes() } if (header_present) { - src0_alpha_to_render_target = intel->gen >= 6 && + src0_alpha_to_render_target = brw->gen >= 6 && !do_dual_src && c->key.replicate_alpha; /* m2, m3 header */ @@ -2275,7 +2275,7 @@ fs_visitor::emit_fb_writes() nr += reg_width; if (c->source_depth_to_render_target) { - if (intel->gen == 6 && dispatch_width == 16) { + if (brw->gen == 6 && dispatch_width == 16) { /* For outputting oDepth on gen6, SIMD8 writes have to be * used. This would require 8-wide moves of each half to * message regs, kind of like pre-gen5 SIMD16 FB writes. @@ -2449,7 +2449,7 @@ fs_visitor::fs_visitor(struct brw_context *brw, memset(this->outputs, 0, sizeof(this->outputs)); memset(this->output_components, 0, sizeof(this->output_components)); this->first_non_payload_grf = 0; - this->max_grf = intel->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; + this->max_grf = brw->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; this->current_annotation = NULL; this->base_ir = NULL; diff --git a/src/mesa/drivers/dri/i965/brw_gs.c b/src/mesa/drivers/dri/i965/brw_gs.c index f354dd9f625..1a3d1884b89 100644 --- a/src/mesa/drivers/dri/i965/brw_gs.c +++ b/src/mesa/drivers/dri/i965/brw_gs.c @@ -48,7 +48,6 @@ static void compile_gs_prog( struct brw_context *brw, struct brw_gs_prog_key *key ) { - struct intel_context *intel = &brw->intel; struct brw_gs_compile c; const GLuint *program; void *mem_ctx; @@ -73,7 +72,7 @@ static void compile_gs_prog( struct brw_context *brw, */ brw_set_mask_control(&c.func, BRW_MASK_DISABLE); - if (intel->gen >= 6) { + if (brw->gen >= 6) { unsigned num_verts; bool check_edge_flag; /* On Sandybridge, we use the GS for implementing transform feedback @@ -139,7 +138,7 @@ static void compile_gs_prog( struct brw_context *brw, printf("gs:\n"); for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) brw_disasm(stdout, &((struct brw_instruction *)program)[i], - intel->gen); + brw->gen); printf("\n"); } @@ -162,7 +161,6 @@ static void populate_key( struct brw_context *brw, }; struct gl_context *ctx = &brw->intel.ctx; - struct intel_context *intel = &brw->intel; memset(key, 0, sizeof(*key)); @@ -181,10 +179,10 @@ static void populate_key( struct brw_context *brw, key->pv_first = true; } - if (intel->gen >= 7) { + if (brw->gen >= 7) { /* On Gen7 and later, we don't use GS (yet). */ key->need_gs_prog = false; - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { /* On Gen6, GS is used for transform feedback. */ /* BRW_NEW_TRANSFORM_FEEDBACK */ if (_mesa_is_xfb_active_and_unpaused(ctx)) { diff --git a/src/mesa/drivers/dri/i965/brw_gs_emit.c b/src/mesa/drivers/dri/i965/brw_gs_emit.c index e85dcc190c9..6034a9df0b1 100644 --- a/src/mesa/drivers/dri/i965/brw_gs_emit.c +++ b/src/mesa/drivers/dri/i965/brw_gs_emit.c @@ -229,14 +229,14 @@ static void brw_gs_ff_sync(struct brw_gs_compile *c, int num_prim) void brw_gs_quads( struct brw_gs_compile *c, struct brw_gs_prog_key *key ) { - struct intel_context *intel = &c->func.brw->intel; + struct brw_context *brw = c->func.brw; brw_gs_alloc_regs(c, 4, false); brw_gs_initialize_header(c); /* Use polygons for correct edgeflag behaviour. Note that vertex 3 * is the PV for quads, but vertex 0 for polygons: */ - if (intel->gen == 5) + if (brw->gen == 5) brw_gs_ff_sync(c, 1); brw_gs_overwrite_header_dw2( c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT) @@ -267,12 +267,12 @@ void brw_gs_quads( struct brw_gs_compile *c, struct brw_gs_prog_key *key ) void brw_gs_quad_strip( struct brw_gs_compile *c, struct brw_gs_prog_key *key ) { - struct intel_context *intel = &c->func.brw->intel; + struct brw_context *brw = c->func.brw; brw_gs_alloc_regs(c, 4, false); brw_gs_initialize_header(c); - if (intel->gen == 5) + if (brw->gen == 5) brw_gs_ff_sync(c, 1); brw_gs_overwrite_header_dw2( c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT) @@ -303,12 +303,12 @@ void brw_gs_quad_strip( struct brw_gs_compile *c, struct brw_gs_prog_key *key ) void brw_gs_lines( struct brw_gs_compile *c ) { - struct intel_context *intel = &c->func.brw->intel; + struct brw_context *brw = c->func.brw; brw_gs_alloc_regs(c, 2, false); brw_gs_initialize_header(c); - if (intel->gen == 5) + if (brw->gen == 5) brw_gs_ff_sync(c, 1); brw_gs_overwrite_header_dw2( c, ((_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT) diff --git a/src/mesa/drivers/dri/i965/brw_gs_state.c b/src/mesa/drivers/dri/i965/brw_gs_state.c index b2845962175..bee0214eb65 100644 --- a/src/mesa/drivers/dri/i965/brw_gs_state.c +++ b/src/mesa/drivers/dri/i965/brw_gs_state.c @@ -38,7 +38,6 @@ static void brw_upload_gs_unit(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; struct brw_gs_unit_state *gs; gs = brw_state_batch(brw, AUB_TRACE_GS_STATE, @@ -77,7 +76,7 @@ brw_upload_gs_unit(struct brw_context *brw) gs->thread4.max_threads = 0; } - if (intel->gen == 5) + if (brw->gen == 5) gs->thread4.rendering_enable = 1; if (unlikely(INTEL_DEBUG & DEBUG_STATS)) diff --git a/src/mesa/drivers/dri/i965/brw_lower_texture_gradients.cpp b/src/mesa/drivers/dri/i965/brw_lower_texture_gradients.cpp index e4e10b0d76a..1589a20488e 100644 --- a/src/mesa/drivers/dri/i965/brw_lower_texture_gradients.cpp +++ b/src/mesa/drivers/dri/i965/brw_lower_texture_gradients.cpp @@ -168,8 +168,7 @@ bool brw_lower_texture_gradients(struct brw_context *brw, struct exec_list *instructions) { - struct intel_context *intel = &brw->intel; - bool has_sample_d_c = intel->gen >= 8 || brw->is_haswell; + bool has_sample_d_c = brw->gen >= 8 || brw->is_haswell; lower_texture_grad_visitor v(has_sample_d_c); visit_list_elements(&v, instructions); diff --git a/src/mesa/drivers/dri/i965/brw_misc_state.c b/src/mesa/drivers/dri/i965/brw_misc_state.c index 37175204f03..2be9c3d385b 100644 --- a/src/mesa/drivers/dri/i965/brw_misc_state.c +++ b/src/mesa/drivers/dri/i965/brw_misc_state.c @@ -141,9 +141,7 @@ const struct brw_tracked_state gen6_binding_table_pointers = { */ static void upload_pipelined_state_pointers(struct brw_context *brw ) { - struct intel_context *intel = &brw->intel; - - if (intel->gen == 5) { + if (brw->gen == 5) { /* Need to flush before changing clip max threads for errata. */ BEGIN_BATCH(1); OUT_BATCH(MI_FLUSH); @@ -222,7 +220,7 @@ brw_depthbuffer_format(struct brw_context *brw) case MESA_FORMAT_Z32_FLOAT: return BRW_DEPTHFORMAT_D32_FLOAT; case MESA_FORMAT_X8_Z24: - if (intel->gen >= 6) { + if (brw->gen >= 6) { return BRW_DEPTHFORMAT_D24_UNORM_X8_UINT; } else { /* Use D24_UNORM_S8, not D24_UNORM_X8. @@ -385,7 +383,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw, rebase_depth = true; /* We didn't even have intra-tile offsets before g45. */ - if (intel->gen == 4 && !brw->is_g4x) { + if (brw->gen == 4 && !brw->is_g4x) { if (tile_x || tile_y) rebase_depth = true; } @@ -444,7 +442,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw, if (stencil_tile_x & 7 || stencil_tile_y & 7) rebase_stencil = true; - if (intel->gen == 4 && !brw->is_g4x) { + if (brw->gen == 4 && !brw->is_g4x) { if (stencil_tile_x || stencil_tile_y) rebase_stencil = true; } @@ -582,7 +580,7 @@ brw_emit_depthbuffer(struct brw_context *brw) separate_stencil = stencil_mt->format == MESA_FORMAT_S8; /* Gen7 supports only separate stencil */ - assert(separate_stencil || intel->gen < 7); + assert(separate_stencil || brw->gen < 7); } /* If there's a packed depth/stencil bound to stencil only, we need to @@ -602,14 +600,14 @@ brw_emit_depthbuffer(struct brw_context *brw) * set to the same value. Gens after 7 implicitly always set * Separate_Stencil_Enable; software cannot disable it. */ - if ((intel->gen < 7 && hiz) || intel->gen >= 7) { + if ((brw->gen < 7 && hiz) || brw->gen >= 7) { assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format)); } /* Prior to Gen7, if using separate stencil, hiz must be enabled. */ - assert(intel->gen >= 7 || !separate_stencil || hiz); + assert(brw->gen >= 7 || !separate_stencil || hiz); - assert(intel->gen < 6 || depth_mt->region->tiling == I915_TILING_Y); + assert(brw->gen < 6 || depth_mt->region->tiling == I915_TILING_Y); assert(!hiz || depth_mt->region->tiling == I915_TILING_Y); depthbuffer_format = brw_depthbuffer_format(brw); @@ -652,8 +650,6 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw, uint32_t width, uint32_t height, uint32_t tile_x, uint32_t tile_y) { - struct intel_context *intel = &brw->intel; - /* Enable the hiz bit if we're doing separate stencil, because it and the * separate stencil bit must have the same value. From Section 2.11.5.6.1.1 * 3DSTATE_DEPTH_BUFFER, Bit 1.21 "Separate Stencil Enable": @@ -669,15 +665,15 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw, /* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both * non-pipelined state that will need the PIPE_CONTROL workaround. */ - if (intel->gen == 6) { + if (brw->gen == 6) { intel_emit_post_sync_nonzero_flush(brw); intel_emit_depth_stall_flushes(brw); } unsigned int len; - if (intel->gen >= 6) + if (brw->gen >= 6) len = 7; - else if (brw->is_g4x || intel->gen == 5) + else if (brw->is_g4x || brw->gen == 5) len = 6; else len = 5; @@ -705,12 +701,12 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw, ((height + tile_y - 1) << 19)); OUT_BATCH(0); - if (brw->is_g4x || intel->gen >= 5) + if (brw->is_g4x || brw->gen >= 5) OUT_BATCH(tile_x | (tile_y << 16)); else assert(tile_x == 0 && tile_y == 0); - if (intel->gen >= 6) + if (brw->gen >= 6) OUT_BATCH(0); ADVANCE_BATCH(); @@ -775,8 +771,8 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw, * 3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet * when HiZ is enabled and the DEPTH_BUFFER_STATE changes. */ - if (intel->gen >= 6 || hiz) { - if (intel->gen == 6) + if (brw->gen >= 6 || hiz) { + if (brw->gen == 6) intel_emit_post_sync_nonzero_flush(brw); BEGIN_BATCH(2); @@ -805,7 +801,6 @@ const struct brw_tracked_state brw_depthbuffer = { static void upload_polygon_stipple(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx; GLuint i; @@ -813,7 +808,7 @@ static void upload_polygon_stipple(struct brw_context *brw) if (!ctx->Polygon.StippleFlag) return; - if (intel->gen == 6) + if (brw->gen == 6) intel_emit_post_sync_nonzero_flush(brw); BEGIN_BATCH(33); @@ -854,14 +849,13 @@ const struct brw_tracked_state brw_polygon_stipple = { static void upload_polygon_stipple_offset(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx; /* _NEW_POLYGON */ if (!ctx->Polygon.StippleFlag) return; - if (intel->gen == 6) + if (brw->gen == 6) intel_emit_post_sync_nonzero_flush(brw); BEGIN_BATCH(2); @@ -897,13 +891,12 @@ const struct brw_tracked_state brw_polygon_stipple_offset = { */ static void upload_aa_line_parameters(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx; if (!ctx->Line.SmoothFlag || !brw->has_aa_line_parameters) return; - if (intel->gen == 6) + if (brw->gen == 6) intel_emit_post_sync_nonzero_flush(brw); OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2)); @@ -928,7 +921,6 @@ const struct brw_tracked_state brw_aa_line_parameters = { static void upload_line_stipple(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx; GLfloat tmp; GLint tmpi; @@ -936,14 +928,14 @@ static void upload_line_stipple(struct brw_context *brw) if (!ctx->Line.StippleFlag) return; - if (intel->gen == 6) + if (brw->gen == 6) intel_emit_post_sync_nonzero_flush(brw); BEGIN_BATCH(3); OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2)); OUT_BATCH(ctx->Line.StipplePattern); - if (intel->gen >= 7) { + if (brw->gen >= 7) { /* in U1.16 */ tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor; tmpi = tmp * (1<<16); @@ -976,10 +968,8 @@ const struct brw_tracked_state brw_line_stipple = { void brw_upload_invariant_state(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - /* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */ - if (intel->gen == 6) + if (brw->gen == 6) intel_emit_post_sync_nonzero_flush(brw); /* Select the 3D pipeline (as opposed to media) */ @@ -987,7 +977,7 @@ brw_upload_invariant_state(struct brw_context *brw) OUT_BATCH(brw->CMD_PIPELINE_SELECT << 16 | 0); ADVANCE_BATCH(); - if (intel->gen < 6) { + if (brw->gen < 6) { /* Disable depth offset clamping. */ BEGIN_BATCH(2); OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2)); @@ -1027,8 +1017,6 @@ const struct brw_tracked_state brw_invariant_state = { */ static void upload_state_base_address( struct brw_context *brw ) { - struct intel_context *intel = &brw->intel; - /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be * programmed prior to STATE_BASE_ADDRESS. @@ -1038,8 +1026,8 @@ static void upload_state_base_address( struct brw_context *brw ) * maybe this isn't required for us in particular. */ - if (intel->gen >= 6) { - if (intel->gen == 6) + if (brw->gen >= 6) { + if (brw->gen == 6) intel_emit_post_sync_nonzero_flush(brw); BEGIN_BATCH(10); @@ -1078,7 +1066,7 @@ static void upload_state_base_address( struct brw_context *brw ) OUT_BATCH(1); /* Indirect object upper bound */ OUT_BATCH(1); /* Instruction access upper bound */ ADVANCE_BATCH(); - } else if (intel->gen == 5) { + } else if (brw->gen == 5) { BEGIN_BATCH(8); OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2)); OUT_BATCH(1); /* General state base address */ diff --git a/src/mesa/drivers/dri/i965/brw_primitive_restart.c b/src/mesa/drivers/dri/i965/brw_primitive_restart.c index a98556b6328..2af9d32dc45 100644 --- a/src/mesa/drivers/dri/i965/brw_primitive_restart.c +++ b/src/mesa/drivers/dri/i965/brw_primitive_restart.c @@ -80,10 +80,9 @@ can_cut_index_handle_prims(struct gl_context *ctx, const struct _mesa_index_buffer *ib) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); /* Otherwise Haswell can do it all. */ - if (intel->gen >= 8 || brw->is_haswell) + if (brw->gen >= 8 || brw->is_haswell) return true; if (!can_cut_index_handle_restart_index(ctx, ib)) { diff --git a/src/mesa/drivers/dri/i965/brw_queryobj.c b/src/mesa/drivers/dri/i965/brw_queryobj.c index 3d84b67d2f7..127720f33aa 100644 --- a/src/mesa/drivers/dri/i965/brw_queryobj.c +++ b/src/mesa/drivers/dri/i965/brw_queryobj.c @@ -49,10 +49,9 @@ static void write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx) { - struct intel_context *intel = &brw->intel; - if (intel->gen >= 6) { + if (brw->gen >= 6) { /* Emit workaround flushes: */ - if (intel->gen == 6) { + if (brw->gen == 6) { /* The timestamp write below is a non-zero post-sync op, which on * Gen6 necessitates a CS stall. CS stalls need stall at scoreboard * set. See the comments for intel_emit_post_sync_nonzero_flush(). @@ -95,8 +94,7 @@ write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx) static void write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx) { - struct intel_context *intel = &brw->intel; - assert(intel->gen < 6); + assert(brw->gen < 6); BEGIN_BATCH(4); OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) | @@ -123,12 +121,11 @@ brw_queryobj_get_results(struct gl_context *ctx, struct brw_query_object *query) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); int i; uint64_t *results; - assert(intel->gen < 6); + assert(brw->gen < 6); if (query->bo == NULL) return; @@ -245,10 +242,9 @@ static void brw_begin_query(struct gl_context *ctx, struct gl_query_object *q) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); struct brw_query_object *query = (struct brw_query_object *)q; - assert(intel->gen < 6); + assert(brw->gen < 6); switch (query->Base.Target) { case GL_TIME_ELAPSED_EXT: @@ -318,10 +314,9 @@ static void brw_end_query(struct gl_context *ctx, struct gl_query_object *q) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); struct brw_query_object *query = (struct brw_query_object *)q; - assert(intel->gen < 6); + assert(brw->gen < 6); switch (query->Base.Target) { case GL_TIME_ELAPSED_EXT: @@ -375,7 +370,7 @@ static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q) { struct brw_query_object *query = (struct brw_query_object *)q; - assert(intel_context(ctx)->gen < 6); + assert(brw_context(ctx)->gen < 6); brw_queryobj_get_results(ctx, query); query->Base.Ready = true; @@ -390,10 +385,9 @@ static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q) static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); struct brw_query_object *query = (struct brw_query_object *)q; - assert(intel->gen < 6); + assert(brw->gen < 6); /* From the GL_ARB_occlusion_query spec: * @@ -421,9 +415,8 @@ static void ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); - assert(intel->gen < 6); + assert(brw->gen < 6); if (!query->bo || query->last_index * 2 + 1 >= 4096 / sizeof(uint64_t)) { @@ -463,8 +456,7 @@ ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query) void brw_emit_query_begin(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - struct gl_context *ctx = &intel->ctx; + struct gl_context *ctx = &brw->intel.ctx; struct brw_query_object *query = brw->query.obj; if (brw->hw_ctx) diff --git a/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp b/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp index 66593586d8b..37e7db5f22e 100644 --- a/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp +++ b/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp @@ -61,8 +61,6 @@ class schedule_node : public exec_node public: schedule_node(backend_instruction *inst, const struct brw_context *brw) { - const struct intel_context *intel = &brw->intel; - this->inst = inst; this->child_array_size = 0; this->children = NULL; @@ -74,7 +72,7 @@ public: /* We can't measure Gen6 timings directly but expect them to be much * closer to Gen7 than Gen4. */ - if (intel->gen >= 6) + if (brw->gen >= 6) set_latency_gen7(brw->is_haswell); else set_latency_gen4(); diff --git a/src/mesa/drivers/dri/i965/brw_sf.c b/src/mesa/drivers/dri/i965/brw_sf.c index 291a2896c87..1943388c4ad 100644 --- a/src/mesa/drivers/dri/i965/brw_sf.c +++ b/src/mesa/drivers/dri/i965/brw_sf.c @@ -50,7 +50,6 @@ static void compile_sf_prog( struct brw_context *brw, struct brw_sf_prog_key *key ) { - struct intel_context *intel = &brw->intel; struct brw_sf_compile c; const GLuint *program; void *mem_ctx; @@ -118,7 +117,7 @@ static void compile_sf_prog( struct brw_context *brw, printf("sf:\n"); for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) brw_disasm(stdout, &((struct brw_instruction *)program)[i], - intel->gen); + brw->gen); printf("\n"); } diff --git a/src/mesa/drivers/dri/i965/brw_sf_emit.c b/src/mesa/drivers/dri/i965/brw_sf_emit.c index 579adb68acb..bd68f688474 100644 --- a/src/mesa/drivers/dri/i965/brw_sf_emit.c +++ b/src/mesa/drivers/dri/i965/brw_sf_emit.c @@ -165,7 +165,7 @@ static void copy_colors( struct brw_sf_compile *c, static void do_flatshade_triangle( struct brw_sf_compile *c ) { struct brw_compile *p = &c->func; - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_reg ip = brw_ip_reg(); GLuint nr = _mesa_bitcount_64(c->key.attrs & VARYING_SLOT_COLOR_BITS); GLuint jmpi = 1; @@ -178,7 +178,7 @@ static void do_flatshade_triangle( struct brw_sf_compile *c ) if (c->key.primitive == SF_UNFILLED_TRIS) return; - if (intel->gen == 5) + if (brw->gen == 5) jmpi = 2; brw_push_insn_state(p); @@ -204,7 +204,7 @@ static void do_flatshade_triangle( struct brw_sf_compile *c ) static void do_flatshade_line( struct brw_sf_compile *c ) { struct brw_compile *p = &c->func; - struct intel_context *intel = &p->brw->intel; + struct brw_context *brw = p->brw; struct brw_reg ip = brw_ip_reg(); GLuint nr = _mesa_bitcount_64(c->key.attrs & VARYING_SLOT_COLOR_BITS); GLuint jmpi = 1; @@ -217,7 +217,7 @@ static void do_flatshade_line( struct brw_sf_compile *c ) if (c->key.primitive == SF_UNFILLED_TRIS) return; - if (intel->gen == 5) + if (brw->gen == 5) jmpi = 2; brw_push_insn_state(p); diff --git a/src/mesa/drivers/dri/i965/brw_sf_state.c b/src/mesa/drivers/dri/i965/brw_sf_state.c index 86fee6a47b2..7752cb51933 100644 --- a/src/mesa/drivers/dri/i965/brw_sf_state.c +++ b/src/mesa/drivers/dri/i965/brw_sf_state.c @@ -162,7 +162,7 @@ static void upload_sf_unit( struct brw_context *brw ) /* Each SF thread produces 1 PUE, and there can be up to 24 (Pre-Ironlake) or * 48 (Ironlake) threads. */ - if (intel->gen == 5) + if (brw->gen == 5) chipset_max_threads = 48; else chipset_max_threads = 24; diff --git a/src/mesa/drivers/dri/i965/brw_shader.cpp b/src/mesa/drivers/dri/i965/brw_shader.cpp index 822ce377470..3322e805542 100644 --- a/src/mesa/drivers/dri/i965/brw_shader.cpp +++ b/src/mesa/drivers/dri/i965/brw_shader.cpp @@ -89,7 +89,7 @@ brw_lower_packing_builtins(struct brw_context *brw, | LOWER_PACK_UNORM_4x8 | LOWER_UNPACK_UNORM_4x8; - if (brw->intel.gen >= 7) { + if (brw->gen >= 7) { /* Gen7 introduced the f32to16 and f16to32 instructions, which can be * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no * lowering is needed. For SOA code, the Half2x16 ops must be @@ -111,7 +111,6 @@ GLboolean brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = &brw->intel; unsigned int stage; for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) { @@ -146,10 +145,10 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg) */ brw_lower_packing_builtins(brw, (gl_shader_type) stage, shader->ir); do_mat_op_to_vec(shader->ir); - const int bitfield_insert = intel->gen >= 7 + const int bitfield_insert = brw->gen >= 7 ? BITFIELD_INSERT_TO_BFM_BFI : 0; - const int lrp_to_arith = intel->gen < 6 ? LRP_TO_ARITH : 0; + const int lrp_to_arith = brw->gen < 6 ? LRP_TO_ARITH : 0; lower_instructions(shader->ir, MOD_TO_FRACT | DIV_TO_MUL_RCP | @@ -162,7 +161,7 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg) /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this, * if-statements need to be flattened. */ - if (intel->gen < 6) + if (brw->gen < 6) lower_if_to_cond_assign(shader->ir, 16); do_lower_texture_projection(shader->ir); diff --git a/src/mesa/drivers/dri/i965/brw_state_dump.c b/src/mesa/drivers/dri/i965/brw_state_dump.c index 89df522bb19..a42a0491f59 100644 --- a/src/mesa/drivers/dri/i965/brw_state_dump.c +++ b/src/mesa/drivers/dri/i965/brw_state_dump.c @@ -219,9 +219,8 @@ static void dump_sdc(struct brw_context *brw, uint32_t offset) { const char *name = "SDC"; - struct intel_context *intel = &brw->intel; - if (intel->gen >= 5 && intel->gen <= 6) { + if (brw->gen >= 5 && brw->gen <= 6) { struct gen5_sampler_default_color *sdc = (brw->batch.bo->virtual + offset); batch_out(brw, name, offset, 0, "unorm rgba\n"); @@ -249,11 +248,10 @@ dump_sdc(struct brw_context *brw, uint32_t offset) static void dump_sampler_state(struct brw_context *brw, uint32_t offset, uint32_t size) { - struct intel_context *intel = &brw->intel; int i; struct brw_sampler_state *samp = brw->batch.bo->virtual + offset; - assert(intel->gen < 7); + assert(brw->gen < 7); for (i = 0; i < size / sizeof(*samp); i++) { char name[20]; @@ -272,11 +270,10 @@ static void dump_sampler_state(struct brw_context *brw, static void dump_gen7_sampler_state(struct brw_context *brw, uint32_t offset, uint32_t size) { - struct intel_context *intel = &brw->intel; struct gen7_sampler_state *samp = brw->batch.bo->virtual + offset; int i; - assert(intel->gen >= 7); + assert(brw->gen >= 7); for (i = 0; i < size / sizeof(*samp); i++) { char name[20]; @@ -296,11 +293,10 @@ static void dump_gen7_sampler_state(struct brw_context *brw, static void dump_sf_viewport_state(struct brw_context *brw, uint32_t offset) { - struct intel_context *intel = &brw->intel; const char *name = "SF VP"; struct brw_sf_viewport *vp = brw->batch.bo->virtual + offset; - assert(intel->gen < 7); + assert(brw->gen < 7); batch_out(brw, name, offset, 0, "m00 = %f\n", vp->viewport.m00); batch_out(brw, name, offset, 1, "m11 = %f\n", vp->viewport.m11); @@ -318,11 +314,10 @@ static void dump_sf_viewport_state(struct brw_context *brw, static void dump_clip_viewport_state(struct brw_context *brw, uint32_t offset) { - struct intel_context *intel = &brw->intel; const char *name = "CLIP VP"; struct brw_clipper_viewport *vp = brw->batch.bo->virtual + offset; - assert(intel->gen < 7); + assert(brw->gen < 7); batch_out(brw, name, offset, 0, "xmin = %f\n", vp->xmin); batch_out(brw, name, offset, 1, "xmax = %f\n", vp->xmax); @@ -333,11 +328,10 @@ static void dump_clip_viewport_state(struct brw_context *brw, static void dump_sf_clip_viewport_state(struct brw_context *brw, uint32_t offset) { - struct intel_context *intel = &brw->intel; const char *name = "SF_CLIP VP"; struct gen7_sf_clip_viewport *vp = brw->batch.bo->virtual + offset; - assert(intel->gen >= 7); + assert(brw->gen >= 7); batch_out(brw, name, offset, 0, "m00 = %f\n", vp->viewport.m00); batch_out(brw, name, offset, 1, "m11 = %f\n", vp->viewport.m11); @@ -485,7 +479,6 @@ static void dump_binding_table(struct brw_context *brw, uint32_t offset, static void dump_prog_cache(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; struct brw_cache *cache = &brw->cache; unsigned int b, i; uint32_t *data; @@ -528,7 +521,7 @@ dump_prog_cache(struct brw_context *brw) name, data[i * 4], data[i * 4 + 1], data[i * 4 + 2], data[i * 4 + 3]); - brw_disasm(stderr, (void *)(data + i * 4), intel->gen); + brw_disasm(stderr, (void *)(data + i * 4), brw->gen); } } } @@ -539,7 +532,6 @@ dump_prog_cache(struct brw_context *brw) static void dump_state_batch(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; int i; for (i = 0; i < brw->state_batch_count; i++) { @@ -566,7 +558,7 @@ dump_state_batch(struct brw_context *brw) dump_clip_viewport_state(brw, offset); break; case AUB_TRACE_SF_VP_STATE: - if (intel->gen >= 7) { + if (brw->gen >= 7) { dump_sf_clip_viewport_state(brw, offset); } else { dump_sf_viewport_state(brw, offset); @@ -579,7 +571,7 @@ dump_state_batch(struct brw_context *brw) dump_depth_stencil_state(brw, offset); break; case AUB_TRACE_CC_STATE: - if (intel->gen >= 6) + if (brw->gen >= 6) dump_cc_state_gen6(brw, offset); else dump_cc_state_gen4(brw, offset); @@ -591,14 +583,14 @@ dump_state_batch(struct brw_context *brw) dump_binding_table(brw, offset, size); break; case AUB_TRACE_SURFACE_STATE: - if (intel->gen < 7) { + if (brw->gen < 7) { dump_surface_state(brw, offset); } else { dump_gen7_surface_state(brw, offset); } break; case AUB_TRACE_SAMPLER_STATE: - if (intel->gen < 7) { + if (brw->gen < 7) { dump_sampler_state(brw, offset, size); } else { dump_gen7_sampler_state(brw, offset, size); diff --git a/src/mesa/drivers/dri/i965/brw_state_upload.c b/src/mesa/drivers/dri/i965/brw_state_upload.c index 39fe6d26564..e7d88377a77 100644 --- a/src/mesa/drivers/dri/i965/brw_state_upload.c +++ b/src/mesa/drivers/dri/i965/brw_state_upload.c @@ -237,8 +237,6 @@ static const struct brw_tracked_state *gen7_atoms[] = static void brw_upload_initial_gpu_state(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - /* On platforms with hardware contexts, we can set our initial GPU state * right away rather than doing it via state atoms. This saves a small * amount of overhead on every draw call. @@ -248,7 +246,7 @@ brw_upload_initial_gpu_state(struct brw_context *brw) brw_upload_invariant_state(brw); - if (intel->gen >= 7) { + if (brw->gen >= 7) { gen7_allocate_push_constants(brw); } } @@ -260,10 +258,10 @@ void brw_init_state( struct brw_context *brw ) brw_init_caches(brw); - if (brw->intel.gen >= 7) { + if (brw->gen >= 7) { atoms = gen7_atoms; num_atoms = ARRAY_SIZE(gen7_atoms); - } else if (brw->intel.gen == 6) { + } else if (brw->gen == 6) { atoms = gen6_atoms; num_atoms = ARRAY_SIZE(gen6_atoms); } else { diff --git a/src/mesa/drivers/dri/i965/brw_surface_formats.c b/src/mesa/drivers/dri/i965/brw_surface_formats.c index f4feed4dc0b..2f23296d4bd 100644 --- a/src/mesa/drivers/dri/i965/brw_surface_formats.c +++ b/src/mesa/drivers/dri/i965/brw_surface_formats.c @@ -527,12 +527,11 @@ brw_format_for_mesa_format(gl_format mesa_format) void brw_init_surface_formats(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - struct gl_context *ctx = &intel->ctx; + struct gl_context *ctx = &brw->intel.ctx; int gen; gl_format format; - gen = intel->gen * 10; + gen = brw->gen * 10; if (brw->is_g4x) gen += 5; @@ -652,7 +651,6 @@ bool brw_render_target_supported(struct brw_context *brw, struct gl_renderbuffer *rb) { - struct intel_context *intel = &brw->intel; gl_format format = rb->Format; /* Many integer formats are promoted to RGBA (like XRGB8888 is), which means @@ -671,7 +669,7 @@ brw_render_target_supported(struct brw_context *brw, */ if (rb->NumSamples > 0 && _mesa_get_format_bytes(format) > 8) { /* Gen6: MSAA on >64 bit formats is unsupported. */ - if (intel->gen <= 6) + if (brw->gen <= 6) return false; /* Gen7: 8x MSAA on >64 bit formats is unsupported. */ @@ -688,7 +686,6 @@ translate_tex_format(struct brw_context *brw, GLenum depth_mode, GLenum srgb_decode) { - struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx; if (srgb_decode == GL_SKIP_DECODE_EXT) mesa_format = _mesa_get_srgb_format_linear(mesa_format); @@ -715,7 +712,7 @@ translate_tex_format(struct brw_context *brw, return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT; case MESA_FORMAT_SRGB_DXT1: - if (intel->gen == 4 && !brw->is_g4x) { + if (brw->gen == 4 && !brw->is_g4x) { /* Work around missing SRGB DXT1 support on original gen4 by just * skipping SRGB decode. It's not worth not supporting sRGB in * general to prevent this. diff --git a/src/mesa/drivers/dri/i965/brw_tex_layout.c b/src/mesa/drivers/dri/i965/brw_tex_layout.c index a2870a25536..ebc67b1d9d2 100644 --- a/src/mesa/drivers/dri/i965/brw_tex_layout.c +++ b/src/mesa/drivers/dri/i965/brw_tex_layout.c @@ -42,7 +42,6 @@ static unsigned int intel_horizontal_texture_alignment_unit(struct brw_context *brw, gl_format format) { - struct intel_context *intel = &brw->intel; /** * From the "Alignment Unit Size" section of various specs, namely: * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4 @@ -86,7 +85,7 @@ intel_horizontal_texture_alignment_unit(struct brw_context *brw, * offset workaround blits we do, align the X to 8, which depth texturing * can handle (sadly, it can't handle 8 in the Y direction). */ - if (intel->gen >= 7 && + if (brw->gen >= 7 && _mesa_get_format_base_format(format) == GL_DEPTH_COMPONENT) return 8; @@ -97,7 +96,6 @@ static unsigned int intel_vertical_texture_alignment_unit(struct brw_context *brw, gl_format format) { - struct intel_context *intel = &brw->intel; /** * From the "Alignment Unit Size" section of various specs, namely: * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4 @@ -127,11 +125,11 @@ intel_vertical_texture_alignment_unit(struct brw_context *brw, return 4; if (format == MESA_FORMAT_S8) - return intel->gen >= 7 ? 8 : 4; + return brw->gen >= 7 ? 8 : 4; GLenum base_format = _mesa_get_format_base_format(format); - if (intel->gen >= 6 && + if (brw->gen >= 6 && (base_format == GL_DEPTH_COMPONENT || base_format == GL_DEPTH_STENCIL)) { return 4; @@ -210,7 +208,6 @@ static void brw_miptree_layout_texture_array(struct brw_context *brw, struct intel_mipmap_tree *mt) { - struct intel_context *intel = &brw->intel; unsigned qpitch = 0; int h0, h1; @@ -219,7 +216,7 @@ brw_miptree_layout_texture_array(struct brw_context *brw, if (mt->array_spacing_lod0) qpitch = h0; else - qpitch = (h0 + h1 + (intel->gen >= 7 ? 12 : 11) * mt->align_h); + qpitch = (h0 + h1 + (brw->gen >= 7 ? 12 : 11) * mt->align_h); if (mt->compressed) qpitch /= 4; @@ -314,13 +311,12 @@ brw_miptree_layout_texture_3d(struct brw_context *brw, void brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt) { - struct intel_context *intel = &brw->intel; mt->align_w = intel_horizontal_texture_alignment_unit(brw, mt->format); mt->align_h = intel_vertical_texture_alignment_unit(brw, mt->format); switch (mt->target) { case GL_TEXTURE_CUBE_MAP: - if (intel->gen == 4) { + if (brw->gen == 4) { /* Gen4 stores cube maps as 3D textures. */ assert(mt->physical_depth0 == 6); brw_miptree_layout_texture_3d(brw, mt); diff --git a/src/mesa/drivers/dri/i965/brw_urb.c b/src/mesa/drivers/dri/i965/brw_urb.c index 3ac5573257a..c0273c68fa7 100644 --- a/src/mesa/drivers/dri/i965/brw_urb.c +++ b/src/mesa/drivers/dri/i965/brw_urb.c @@ -114,7 +114,6 @@ static bool check_urb_layout(struct brw_context *brw) */ static void recalculate_urb_fence( struct brw_context *brw ) { - struct intel_context *intel = &brw->intel; GLuint csize = brw->curbe.total_size; GLuint vsize = brw->vs.prog_data->base.urb_entry_size; GLuint sfsize = brw->sf.prog_data->urb_entry_size; @@ -148,7 +147,7 @@ static void recalculate_urb_fence( struct brw_context *brw ) brw->urb.constrained = 0; - if (intel->gen == 5) { + if (brw->gen == 5) { brw->urb.nr_vs_entries = 128; brw->urb.nr_sf_entries = 48; if (check_urb_layout(brw)) { diff --git a/src/mesa/drivers/dri/i965/brw_vec4.cpp b/src/mesa/drivers/dri/i965/brw_vec4.cpp index c9367063769..535eca48e7d 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4.cpp @@ -215,7 +215,7 @@ vec4_instruction::is_send_from_grf() bool vec4_visitor::can_do_source_mods(vec4_instruction *inst) { - if (intel->gen == 6 && inst->is_math()) + if (brw->gen == 6 && inst->is_math()) return false; if (inst->is_send_from_grf()) @@ -878,7 +878,7 @@ vec4_visitor::opt_register_coalesce() if (scan_inst->mlen) break; - if (intel->gen == 6) { + if (brw->gen == 6) { /* gen6 math instructions must have the destination be * GRF, so no compute-to-MRF for them. */ @@ -1248,7 +1248,7 @@ vec4_vs_visitor::setup_attributes(int payload_reg) unsigned vue_entries = MAX2(nr_attributes, prog_data->vue_map.num_slots); - if (intel->gen == 6) + if (brw->gen == 6) prog_data->urb_entry_size = ALIGN(vue_entries, 8) / 8; else prog_data->urb_entry_size = ALIGN(vue_entries, 4) / 4; @@ -1262,7 +1262,7 @@ vec4_visitor::setup_uniforms(int reg) /* The pre-gen6 VS requires that some push constants get loaded no * matter what, or the GPU would hang. */ - if (intel->gen < 6 && this->uniforms == 0) { + if (brw->gen < 6 && this->uniforms == 0) { this->uniform_vector_size[this->uniforms] = 1; for (unsigned int i = 0; i < 4; i++) { @@ -1305,7 +1305,7 @@ vec4_visitor::setup_payload(void) src_reg vec4_visitor::get_timestamp() { - assert(intel->gen >= 7); + assert(brw->gen >= 7); src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE, BRW_ARF_TIMESTAMP, diff --git a/src/mesa/drivers/dri/i965/brw_vec4_emit.cpp b/src/mesa/drivers/dri/i965/brw_vec4_emit.cpp index b75155be8ac..a4c96452bb5 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_emit.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_emit.cpp @@ -268,7 +268,7 @@ vec4_generator::generate_tex(vec4_instruction *inst, { int msg_type = -1; - if (intel->gen >= 5) { + if (brw->gen >= 5) { switch (inst->opcode) { case SHADER_OPCODE_TEX: case SHADER_OPCODE_TXL: @@ -291,7 +291,7 @@ vec4_generator::generate_tex(vec4_instruction *inst, msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; break; case SHADER_OPCODE_TXF_MS: - if (intel->gen >= 7) + if (brw->gen >= 7) msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS; else msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; @@ -410,7 +410,7 @@ vec4_generator::generate_oword_dual_block_offsets(struct brw_reg m1, { int second_vertex_offset; - if (intel->gen >= 6) + if (brw->gen >= 6) second_vertex_offset = 1; else second_vertex_offset = 16; @@ -455,9 +455,9 @@ vec4_generator::generate_scratch_read(vec4_instruction *inst, uint32_t msg_type; - if (intel->gen >= 6) + if (brw->gen >= 6) msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; - else if (intel->gen == 5 || brw->is_g4x) + else if (brw->gen == 5 || brw->is_g4x) msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; else msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; @@ -468,7 +468,7 @@ vec4_generator::generate_scratch_read(vec4_instruction *inst, struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); brw_set_dest(p, send, dst); brw_set_src0(p, send, header); - if (intel->gen < 6) + if (brw->gen < 6) send->header.destreg__conditionalmod = inst->base_mrf; brw_set_dp_read_message(p, send, 255, /* binding table index: stateless access */ @@ -505,9 +505,9 @@ vec4_generator::generate_scratch_write(vec4_instruction *inst, uint32_t msg_type; - if (intel->gen >= 7) + if (brw->gen >= 7) msg_type = GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; - else if (intel->gen == 6) + else if (brw->gen == 6) msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; else msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; @@ -519,7 +519,7 @@ vec4_generator::generate_scratch_write(vec4_instruction *inst, * guaranteed and write commits only matter for inter-thread * synchronization. */ - if (intel->gen >= 6) { + if (brw->gen >= 6) { write_commit = false; } else { /* The visitor set up our destination register to be g0. This @@ -539,7 +539,7 @@ vec4_generator::generate_scratch_write(vec4_instruction *inst, struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); brw_set_dest(p, send, dst); brw_set_src0(p, send, header); - if (intel->gen < 6) + if (brw->gen < 6) send->header.destreg__conditionalmod = inst->base_mrf; brw_set_dp_write_message(p, send, 255, /* binding table index: stateless access */ @@ -559,7 +559,7 @@ vec4_generator::generate_pull_constant_load(vec4_instruction *inst, struct brw_reg index, struct brw_reg offset) { - assert(intel->gen <= 7); + assert(brw->gen <= 7); assert(index.file == BRW_IMMEDIATE_VALUE && index.type == BRW_REGISTER_TYPE_UD); uint32_t surf_index = index.dw1.ud; @@ -573,9 +573,9 @@ vec4_generator::generate_pull_constant_load(vec4_instruction *inst, uint32_t msg_type; - if (intel->gen >= 6) + if (brw->gen >= 6) msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; - else if (intel->gen == 5 || brw->is_g4x) + else if (brw->gen == 5 || brw->is_g4x) msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; else msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; @@ -586,7 +586,7 @@ vec4_generator::generate_pull_constant_load(vec4_instruction *inst, struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); brw_set_dest(p, send, dst); brw_set_src0(p, send, header); - if (intel->gen < 6) + if (brw->gen < 6) send->header.destreg__conditionalmod = inst->base_mrf; brw_set_dp_read_message(p, send, surf_index, @@ -757,7 +757,7 @@ vec4_generator::generate_vec4_instruction(vec4_instruction *instruction, case BRW_OPCODE_IF: if (inst->src[0].file != BAD_FILE) { /* The instruction has an embedded compare (only allowed on gen6) */ - assert(intel->gen == 6); + assert(brw->gen == 6); gen6_IF(p, inst->conditional_mod, src[0], src[1]); } else { struct brw_instruction *brw_inst = brw_IF(p, BRW_EXECUTE_8); @@ -782,7 +782,7 @@ vec4_generator::generate_vec4_instruction(vec4_instruction *instruction, break; case BRW_OPCODE_CONTINUE: /* FINISHME: We need to write the loop instruction support still. */ - if (intel->gen >= 6) + if (brw->gen >= 6) gen6_CONT(p); else brw_CONT(p); @@ -800,7 +800,7 @@ vec4_generator::generate_vec4_instruction(vec4_instruction *instruction, case SHADER_OPCODE_LOG2: case SHADER_OPCODE_SIN: case SHADER_OPCODE_COS: - if (intel->gen == 6) { + if (brw->gen == 6) { generate_math1_gen6(inst, dst, src[0]); } else { /* Also works for Gen7. */ @@ -811,9 +811,9 @@ vec4_generator::generate_vec4_instruction(vec4_instruction *instruction, case SHADER_OPCODE_POW: case SHADER_OPCODE_INT_QUOTIENT: case SHADER_OPCODE_INT_REMAINDER: - if (intel->gen >= 7) { + if (brw->gen >= 7) { generate_math2_gen7(inst, dst, src[0], src[1]); - } else if (intel->gen == 6) { + } else if (brw->gen == 6) { generate_math2_gen6(inst, dst, src[0], src[1]); } else { generate_math2_gen4(inst, dst, src[0], src[1]); diff --git a/src/mesa/drivers/dri/i965/brw_vec4_reg_allocate.cpp b/src/mesa/drivers/dri/i965/brw_vec4_reg_allocate.cpp index abba0f2f9b5..10a9c573307 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_reg_allocate.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_reg_allocate.cpp @@ -101,8 +101,6 @@ brw_alloc_reg_set_for_classes(struct brw_context *brw, int class_count, int base_reg_count) { - struct intel_context *intel = &brw->intel; - /* Compute the total number of registers across all classes. */ int ra_reg_count = 0; for (int i = 0; i < class_count; i++) { @@ -113,7 +111,7 @@ brw_alloc_reg_set_for_classes(struct brw_context *brw, brw->vs.ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count); ralloc_free(brw->vs.regs); brw->vs.regs = ra_alloc_reg_set(brw, ra_reg_count); - if (intel->gen >= 6) + if (brw->gen >= 6) ra_set_allocate_round_robin(brw->vs.regs); ralloc_free(brw->vs.classes); brw->vs.classes = ralloc_array(brw, int, class_count + 1); diff --git a/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp b/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp index 162fd55f429..13c10718b7a 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp @@ -160,7 +160,7 @@ vec4_visitor::IF(uint32_t predicate) vec4_instruction * vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition) { - assert(intel->gen >= 6); + assert(brw->gen >= 6); vec4_instruction *inst; @@ -188,7 +188,7 @@ vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, uint32_t condition) * before before comparison, producing garbage results for floating * point comparisons. */ - if (intel->gen == 4) { + if (brw->gen == 4) { dst.type = src0.type; if (dst.file == HW_REG) dst.fixed_hw_reg.type = dst.type; @@ -276,7 +276,7 @@ vec4_visitor::fix_math_operand(src_reg src) * can't use. */ - if (intel->gen == 7 && src.file != IMM) + if (brw->gen == 7 && src.file != IMM) return src; dst_reg expanded = dst_reg(this, glsl_type::vec4_type); @@ -329,7 +329,7 @@ vec4_visitor::emit_math(opcode opcode, dst_reg dst, src_reg src) return; } - if (intel->gen >= 6) { + if (brw->gen >= 6) { return emit_math1_gen6(opcode, dst, src); } else { return emit_math1_gen4(opcode, dst, src); @@ -381,7 +381,7 @@ vec4_visitor::emit_math(enum opcode opcode, return; } - if (intel->gen >= 6) { + if (brw->gen >= 6) { return emit_math2_gen6(opcode, dst, src0, src1); } else { return emit_math2_gen4(opcode, dst, src0, src1); @@ -391,7 +391,7 @@ vec4_visitor::emit_math(enum opcode opcode, void vec4_visitor::emit_pack_half_2x16(dst_reg dst, src_reg src0) { - if (intel->gen < 7) + if (brw->gen < 7) assert(!"ir_unop_pack_half_2x16 should be lowered"); assert(dst.type == BRW_REGISTER_TYPE_UD); @@ -467,7 +467,7 @@ vec4_visitor::emit_pack_half_2x16(dst_reg dst, src_reg src0) void vec4_visitor::emit_unpack_half_2x16(dst_reg dst, src_reg src0) { - if (intel->gen < 7) + if (brw->gen < 7) assert(!"ir_unop_unpack_half_2x16 should be lowered"); assert(dst.type == BRW_REGISTER_TYPE_F); @@ -662,7 +662,7 @@ vec4_visitor::setup_uniform_clipplane_values() { gl_clip_plane *clip_planes = brw_select_clip_planes(ctx); - if (intel->gen < 6) { + if (brw->gen < 6) { /* Pre-Gen6, we compact clip planes. For example, if the user * enables just clip planes 0, 1, and 3, we will enable clip planes * 0, 1, and 2 in the hardware, and we'll move clip plane 3 to clip @@ -783,7 +783,7 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate) break; case ir_unop_f2b: - if (intel->gen >= 6) { + if (brw->gen >= 6) { emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); } else { inst = emit(MOV(dst_null_f(), op[0])); @@ -792,7 +792,7 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate) break; case ir_unop_i2b: - if (intel->gen >= 6) { + if (brw->gen >= 6) { emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); } else { inst = emit(MOV(dst_null_d(), op[0])); @@ -836,7 +836,7 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate) resolve_ud_negate(&this->result); - if (intel->gen >= 6) { + if (brw->gen >= 6) { vec4_instruction *inst = emit(AND(dst_null_d(), this->result, src_reg(1))); inst->conditional_mod = BRW_CONDITIONAL_NZ; @@ -1254,7 +1254,7 @@ bool vec4_visitor::try_emit_mad(ir_expression *ir, int mul_arg) { /* 3-src instructions were introduced in gen6. */ - if (intel->gen < 6) + if (brw->gen < 6) return false; /* MAD can only handle floating-point data. */ @@ -1287,7 +1287,7 @@ vec4_visitor::emit_bool_comparison(unsigned int op, dst_reg dst, src_reg src0, src_reg src1) { /* original gen4 does destination conversion before comparison. */ - if (intel->gen < 5) + if (brw->gen < 5) dst.type = src0.type; emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op))); @@ -1302,7 +1302,7 @@ vec4_visitor::emit_minmax(uint32_t conditionalmod, dst_reg dst, { vec4_instruction *inst; - if (intel->gen >= 6) { + if (brw->gen >= 6) { inst = emit(BRW_OPCODE_SEL, dst, src0, src1); inst->conditional_mod = conditionalmod; } else { @@ -1493,12 +1493,12 @@ vec4_visitor::visit(ir_expression *ir) * 16 bits, though, we can just emit a single MUL. */ if (is_16bit_constant(ir->operands[0])) { - if (intel->gen < 7) + if (brw->gen < 7) emit(MUL(result_dst, op[0], op[1])); else emit(MUL(result_dst, op[1], op[0])); } else if (is_16bit_constant(ir->operands[1])) { - if (intel->gen < 7) + if (brw->gen < 7) emit(MUL(result_dst, op[1], op[0])); else emit(MUL(result_dst, op[0], op[1])); @@ -2355,7 +2355,7 @@ vec4_visitor::visit(ir_texture *ir) bool use_texture_offset = ir->offset != NULL && ir->op != ir_txf; /* Texel offsets go in the message header; Gen4 also requires headers. */ - inst->header_present = use_texture_offset || intel->gen < 5; + inst->header_present = use_texture_offset || brw->gen < 5; inst->base_mrf = 2; inst->mlen = inst->header_present + 1; /* always at least one */ inst->sampler = sampler; @@ -2370,7 +2370,7 @@ vec4_visitor::visit(ir_texture *ir) int param_base = inst->base_mrf + inst->header_present; if (ir->op == ir_txs) { - int writemask = intel->gen == 4 ? WRITEMASK_W : WRITEMASK_X; + int writemask = brw->gen == 4 ? WRITEMASK_W : WRITEMASK_X; emit(MOV(dst_reg(MRF, param_base, lod_type, writemask), lod)); } else { int i, coord_mask = 0, zero_mask = 0; @@ -2416,7 +2416,7 @@ vec4_visitor::visit(ir_texture *ir) /* Load the LOD info */ if (ir->op == ir_tex || ir->op == ir_txl) { int mrf, writemask; - if (intel->gen >= 5) { + if (brw->gen >= 5) { mrf = param_base + 1; if (ir->shadow_comparitor) { writemask = WRITEMASK_Y; @@ -2425,7 +2425,7 @@ vec4_visitor::visit(ir_texture *ir) writemask = WRITEMASK_X; inst->mlen++; } - } else /* intel->gen == 4 */ { + } else /* brw->gen == 4 */ { mrf = param_base; writemask = WRITEMASK_Z; } @@ -2445,7 +2445,7 @@ vec4_visitor::visit(ir_texture *ir) } else if (ir->op == ir_txd) { const glsl_type *type = lod_type; - if (intel->gen >= 5) { + if (brw->gen >= 5) { dPdx.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y); dPdy.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y); emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XZ), dPdx)); @@ -2465,7 +2465,7 @@ vec4_visitor::visit(ir_texture *ir) shadow_comparitor)); } } - } else /* intel->gen == 4 */ { + } else /* brw->gen == 4 */ { emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx)); emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_XYZ), dPdy)); inst->mlen += 2; @@ -2560,7 +2560,7 @@ vec4_visitor::visit(ir_if *ir) */ this->base_ir = ir->condition; - if (intel->gen == 6) { + if (brw->gen == 6) { emit_if_gen6(ir); } else { uint32_t predicate; @@ -2607,7 +2607,7 @@ vec4_visitor::emit_ndc_computation() void vec4_visitor::emit_psiz_and_flags(struct brw_reg reg) { - if (intel->gen < 6 && + if (brw->gen < 6 && ((prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) || key->userclip_active || brw->has_negative_rhw_bug)) { dst_reg header1 = dst_reg(this, glsl_type::uvec4_type); @@ -2660,7 +2660,7 @@ vec4_visitor::emit_psiz_and_flags(struct brw_reg reg) } emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1))); - } else if (intel->gen < 6) { + } else if (brw->gen < 6) { emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u)); } else { emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0))); @@ -2678,7 +2678,7 @@ vec4_visitor::emit_psiz_and_flags(struct brw_reg reg) void vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset) { - if (intel->gen < 6) { + if (brw->gen < 6) { /* Clip distance slots are set aside in gen5, but they are not used. It * is not clear whether we actually need to set aside space for them, * but the performance cost is negligible. @@ -2782,9 +2782,7 @@ vec4_visitor::emit_urb_slot(int mrf, int varying) static int align_interleaved_urb_mlen(struct brw_context *brw, int mlen) { - struct intel_context *intel = &brw->intel; - - if (intel->gen >= 6) { + if (brw->gen >= 6) { /* URB data written (does not include the message header reg) must * be a multiple of 256 bits, or 2 VS registers. See vol5c.5, * section 5.4.3.2.2: URB_INTERLEAVED. @@ -2855,7 +2853,7 @@ vec4_visitor::emit_vertex() */ emit_urb_write_header(mrf++); - if (intel->gen < 6) { + if (brw->gen < 6) { emit_ndc_computation(); } @@ -2924,7 +2922,7 @@ vec4_visitor::get_scratch_offset(vec4_instruction *inst, /* Pre-gen6, the message header uses byte offsets instead of vec4 * (16-byte) offset units. */ - if (intel->gen < 6) + if (brw->gen < 6) message_header_scale *= 16; if (reladdr) { @@ -2952,13 +2950,13 @@ vec4_visitor::get_pull_constant_offset(vec4_instruction *inst, /* Pre-gen6, the message header uses byte offsets instead of vec4 * (16-byte) offset units. */ - if (intel->gen < 6) { + if (brw->gen < 6) { emit_before(inst, MUL(dst_reg(index), index, src_reg(16))); } return index; } else { - int message_header_scale = intel->gen < 6 ? 16 : 1; + int message_header_scale = brw->gen < 6 ? 16 : 1; return src_reg(reg_offset * message_header_scale); } } @@ -3111,7 +3109,7 @@ vec4_visitor::emit_pull_constant_load(vec4_instruction *inst, src_reg offset = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset); vec4_instruction *load; - if (intel->gen >= 7) { + if (brw->gen >= 7) { dst_reg grf_offset = dst_reg(this, glsl_type::int_type); grf_offset.type = offset.type; emit_before(inst, MOV(grf_offset, offset)); @@ -3256,7 +3254,7 @@ vec4_visitor::vec4_visitor(struct brw_context *brw, this->virtual_grf_array_size = 0; this->live_intervals_valid = false; - this->max_grf = intel->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; + this->max_grf = brw->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; this->uniforms = 0; } diff --git a/src/mesa/drivers/dri/i965/brw_vec4_vp.cpp b/src/mesa/drivers/dri/i965/brw_vec4_vp.cpp index 95b39702444..eedf59bc2ee 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_vp.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_vp.cpp @@ -111,7 +111,7 @@ vec4_vs_visitor::emit_program_code() break; case OPCODE_ARL: - if (intel->gen >= 6) { + if (brw->gen >= 6) { dst.writemask = WRITEMASK_X; dst_reg dst_f = dst; dst_f.type = BRW_REGISTER_TYPE_F; @@ -547,7 +547,7 @@ vec4_vs_visitor::get_vp_src_reg(const prog_src_register &src) dst_reladdr.writemask = WRITEMASK_X; emit(ADD(dst_reladdr, this->vp_addr_reg, src_reg(src.Index))); - if (intel->gen < 6) + if (brw->gen < 6) emit(MUL(dst_reladdr, reladdr, src_reg(16))); #if 0 diff --git a/src/mesa/drivers/dri/i965/brw_vs.c b/src/mesa/drivers/dri/i965/brw_vs.c index 03fc72019ea..99784c9a63d 100644 --- a/src/mesa/drivers/dri/i965/brw_vs.c +++ b/src/mesa/drivers/dri/i965/brw_vs.c @@ -61,8 +61,6 @@ void brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map, GLbitfield64 slots_valid, bool userclip_active) { - const struct intel_context *intel = &brw->intel; - vue_map->slots_valid = slots_valid; int i; @@ -83,7 +81,7 @@ brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map, /* VUE header: format depends on chip generation and whether clipping is * enabled. */ - switch (intel->gen) { + switch (brw->gen) { case 4: case 5: /* There are 8 dwords in VUE header pre-Ironlake: @@ -220,7 +218,6 @@ do_vs_prog(struct brw_context *brw, struct brw_vertex_program *vp, struct brw_vs_prog_key *key) { - struct intel_context *intel = &brw->intel; GLuint program_size; const GLuint *program; struct brw_vs_compile c; @@ -269,7 +266,7 @@ do_vs_prog(struct brw_context *brw, prog_data.inputs_read |= VERT_BIT_EDGEFLAG; } - if (intel->gen < 6) { + if (brw->gen < 6) { /* Put dummy slots into the VUE for the SF to put the replaced * point sprite coords in. We shouldn't need these dummy slots, * which take up precious URB space, but it would mean that the SF @@ -406,8 +403,7 @@ brw_vs_debug_recompile(struct brw_context *brw, static void brw_upload_vs_prog(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - struct gl_context *ctx = &intel->ctx; + struct gl_context *ctx = &brw->intel.ctx; struct brw_vs_prog_key key; /* BRW_NEW_VERTEX_PROGRAM */ struct brw_vertex_program *vp = @@ -424,7 +420,7 @@ static void brw_upload_vs_prog(struct brw_context *brw) key.base.userclip_active = (ctx->Transform.ClipPlanesEnabled != 0); key.base.uses_clip_distance = vp->program.UsesClipDistance; if (key.base.userclip_active && !key.base.uses_clip_distance) { - if (intel->gen < 6) { + if (brw->gen < 6) { key.base.nr_userclip_plane_consts = _mesa_bitcount_64(ctx->Transform.ClipPlanesEnabled); key.base.userclip_planes_enabled_gen_4_5 @@ -436,7 +432,7 @@ static void brw_upload_vs_prog(struct brw_context *brw) } /* _NEW_POLYGON */ - if (intel->gen < 6) { + if (brw->gen < 6) { key.copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL || ctx->Polygon.BackMode != GL_FILL); } @@ -445,7 +441,7 @@ static void brw_upload_vs_prog(struct brw_context *brw) key.base.clamp_vertex_color = ctx->Light._ClampVertexColor; /* _NEW_POINT */ - if (intel->gen < 6 && ctx->Point.PointSprite) { + if (brw->gen < 6 && ctx->Point.PointSprite) { for (i = 0; i < 8; i++) { if (ctx->Point.CoordReplace[i]) key.point_coord_replace |= (1 << i); @@ -456,7 +452,7 @@ static void brw_upload_vs_prog(struct brw_context *brw) brw_populate_sampler_prog_key_data(ctx, prog, &key.base.tex); /* BRW_NEW_VERTICES */ - if (intel->gen < 8 && !brw->is_haswell) { + if (brw->gen < 8 && !brw->is_haswell) { /* Prior to Haswell, the hardware can't natively support GL_FIXED or * 2_10_10_10_REV vertex formats. Set appropriate workaround flags. */ diff --git a/src/mesa/drivers/dri/i965/brw_vs_state.c b/src/mesa/drivers/dri/i965/brw_vs_state.c index fe832ced6fc..54ff754b1dd 100644 --- a/src/mesa/drivers/dri/i965/brw_vs_state.c +++ b/src/mesa/drivers/dri/i965/brw_vs_state.c @@ -39,7 +39,6 @@ static void brw_upload_vs_unit(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; struct brw_vs_unit_state *vs; vs = brw_state_batch(brw, AUB_TRACE_VS_STATE, @@ -69,7 +68,7 @@ brw_upload_vs_unit(struct brw_context *brw) * The most notable and reliably failing application is the Humus * demo "CelShading" */ - vs->thread1.single_program_flow = (intel->gen == 5); + vs->thread1.single_program_flow = (brw->gen == 5); vs->thread1.binding_table_entry_count = 0; @@ -93,7 +92,7 @@ brw_upload_vs_unit(struct brw_context *brw) vs->thread3.const_urb_entry_read_offset = brw->curbe.vs_start * 2; /* BRW_NEW_URB_FENCE */ - if (intel->gen == 5) { + if (brw->gen == 5) { switch (brw->urb.nr_vs_entries) { case 8: case 12: diff --git a/src/mesa/drivers/dri/i965/brw_vtbl.c b/src/mesa/drivers/dri/i965/brw_vtbl.c index 3d62051b6d0..9cddbc28fb6 100644 --- a/src/mesa/drivers/dri/i965/brw_vtbl.c +++ b/src/mesa/drivers/dri/i965/brw_vtbl.c @@ -158,11 +158,11 @@ void brwInitVtbl( struct brw_context *brw ) brw->vtbl.finish_batch = brw_finish_batch; brw->vtbl.destroy = brw_destroy_context; - assert(brw->intel.gen >= 4); - if (brw->intel.gen >= 7) { + assert(brw->gen >= 4); + if (brw->gen >= 7) { gen7_init_vtable_surface_functions(brw); brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz; - } else if (brw->intel.gen >= 4) { + } else if (brw->gen >= 4) { gen4_init_vtable_surface_functions(brw); brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz; } diff --git a/src/mesa/drivers/dri/i965/brw_wm.c b/src/mesa/drivers/dri/i965/brw_wm.c index 7ba0ee0265b..51f990d90df 100644 --- a/src/mesa/drivers/dri/i965/brw_wm.c +++ b/src/mesa/drivers/dri/i965/brw_wm.c @@ -349,7 +349,6 @@ static void brw_wm_populate_key( struct brw_context *brw, struct brw_wm_prog_key *key ) { struct gl_context *ctx = &brw->intel.ctx; - struct intel_context *intel = &brw->intel; /* BRW_NEW_FRAGMENT_PROGRAM */ const struct brw_fragment_program *fp = (struct brw_fragment_program *)brw->fragment_program; @@ -362,7 +361,7 @@ static void brw_wm_populate_key( struct brw_context *brw, /* Build the index for table lookup */ - if (intel->gen < 6) { + if (brw->gen < 6) { /* _NEW_COLOR */ if (fp->program.UsesKill || ctx->Color.AlphaEnabled) lookup |= IZ_PS_KILL_ALPHATEST_BIT; @@ -416,7 +415,7 @@ static void brw_wm_populate_key( struct brw_context *brw, key->line_aa = line_aa; - if (intel->gen < 6) + if (brw->gen < 6) key->stats_wm = brw->stats_wm; /* _NEW_LIGHT */ @@ -465,7 +464,7 @@ static void brw_wm_populate_key( struct brw_context *brw, (ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled); /* BRW_NEW_VUE_MAP_GEOM_OUT */ - if (intel->gen < 6) + if (brw->gen < 6) key->input_slots_valid = brw->vue_map_geom_out.slots_valid; /* The unique fragment program ID */ diff --git a/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c b/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c index b75882d20bf..4b171d58ff7 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c +++ b/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c @@ -139,7 +139,7 @@ upload_default_color(struct brw_context *brw, struct gl_sampler_object *sampler, if (firstImage->_BaseFormat == GL_RGB) color[3] = 1.0; - if (intel->gen == 5 || intel->gen == 6) { + if (brw->gen == 5 || brw->gen == 6) { struct gen5_sampler_default_color *sdc; sdc = brw_state_batch(brw, AUB_TRACE_SAMPLER_DEFAULT_COLOR, @@ -268,7 +268,7 @@ static void brw_update_sampler_state(struct brw_context *brw, sampler->ss1.t_wrap_mode = translate_wrap_mode(gl_sampler->WrapT, using_nearest); - if (intel->gen >= 6 && + if (brw->gen >= 6 && sampler->ss0.min_filter != sampler->ss0.mag_filter) sampler->ss0.min_mag_neq = 1; @@ -332,13 +332,13 @@ static void brw_update_sampler_state(struct brw_context *brw, /* On Gen6+, the sampler can handle non-normalized texture * rectangle coordinates natively */ - if (intel->gen >= 6 && texObj->Target == GL_TEXTURE_RECTANGLE) { + if (brw->gen >= 6 && texObj->Target == GL_TEXTURE_RECTANGLE) { sampler->ss3.non_normalized_coord = 1; } upload_default_color(brw, gl_sampler, unit, ss_index); - if (intel->gen >= 6) { + if (brw->gen >= 6) { sampler->ss2.default_color_pointer = brw->wm.sdc_offset[ss_index] >> 5; } else { /* reloc */ diff --git a/src/mesa/drivers/dri/i965/brw_wm_state.c b/src/mesa/drivers/dri/i965/brw_wm_state.c index 79205a83740..660b03247f2 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_state.c +++ b/src/mesa/drivers/dri/i965/brw_wm_state.c @@ -133,7 +133,7 @@ brw_upload_wm_unit(struct brw_context *brw) /* BRW_NEW_CURBE_OFFSETS */ wm->thread3.const_urb_entry_read_offset = brw->curbe.wm_start * 2; - if (intel->gen == 5) + if (brw->gen == 5) wm->wm4.sampler_count = 0; /* hardware requirement */ else { /* CACHE_NEW_SAMPLER */ diff --git a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c index ff333572c91..3b67bd2884e 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c +++ b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c @@ -198,7 +198,6 @@ brw_update_buffer_texture_surface(struct gl_context *ctx, unsigned surf_index) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = &brw->intel; struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current; uint32_t *surf; struct intel_buffer_object *intel_obj = @@ -219,7 +218,7 @@ brw_update_buffer_texture_surface(struct gl_context *ctx, surf[0] = (BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT | (brw_format_for_mesa_format(format) << BRW_SURFACE_FORMAT_SHIFT)); - if (intel->gen >= 6) + if (brw->gen >= 6) surf[0] |= BRW_SURFACE_RC_READ_WRITE; if (bo) { @@ -322,7 +321,6 @@ brw_create_constant_surface(struct brw_context *brw, uint32_t *out_offset, bool dword_pitch) { - struct intel_context *intel = &brw->intel; uint32_t stride = dword_pitch ? 4 : 16; uint32_t elements = ALIGN(size, stride) / stride; const GLint w = elements - 1; @@ -335,7 +333,7 @@ brw_create_constant_surface(struct brw_context *brw, BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT | BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_SURFACE_FORMAT_SHIFT); - if (intel->gen >= 6) + if (brw->gen >= 6) surf[0] |= BRW_SURFACE_RC_READ_WRITE; surf[1] = bo->offset + offset; /* reloc */ @@ -371,7 +369,6 @@ brw_update_sol_surface(struct brw_context *brw, uint32_t *out_offset, unsigned num_vector_components, unsigned stride_dwords, unsigned offset_dwords) { - struct intel_context *intel = &brw->intel; struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj); drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo, INTEL_WRITE_PART); uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, @@ -455,7 +452,6 @@ static void brw_upload_wm_pull_constants(struct brw_context *brw) { struct gl_context *ctx = &brw->intel.ctx; - struct intel_context *intel = &brw->intel; /* BRW_NEW_FRAGMENT_PROGRAM */ struct brw_fragment_program *fp = (struct brw_fragment_program *) brw->fragment_program; @@ -527,8 +523,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit) * * - Surface Format must be R8G8B8A8_UNORM. */ - struct intel_context *intel = &brw->intel; - struct gl_context *ctx = &intel->ctx; + struct gl_context *ctx = &brw->intel.ctx; uint32_t *surf; unsigned surface_type = BRW_SURFACE_NULL; drm_intel_bo *bo = NULL; @@ -570,7 +565,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit) surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT | BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT); - if (intel->gen < 6) { + if (brw->gen < 6) { surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT | 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT | 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT | @@ -609,8 +604,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw, bool layered, unsigned int unit) { - struct intel_context *intel = &brw->intel; - struct gl_context *ctx = &intel->ctx; + struct gl_context *ctx = &brw->intel.ctx; struct intel_renderbuffer *irb = intel_renderbuffer(rb); struct intel_mipmap_tree *mt = irb->mt; struct intel_region *region; @@ -675,7 +669,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw, (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT | (mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0)); - if (intel->gen < 6) { + if (brw->gen < 6) { /* _NEW_COLOR */ if (!ctx->Color.ColorLogicOpEnabled && (ctx->Color.BlendEnabled & (1 << unit))) @@ -756,8 +750,7 @@ const struct brw_tracked_state gen6_renderbuffer_surfaces = { static void brw_update_texture_surfaces(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - struct gl_context *ctx = &intel->ctx; + struct gl_context *ctx = &brw->intel.ctx; /* BRW_NEW_VERTEX_PROGRAM and BRW_NEW_FRAGMENT_PROGRAM: * Unfortunately, we're stuck using the gl_program structs until the diff --git a/src/mesa/drivers/dri/i965/gen6_blorp.cpp b/src/mesa/drivers/dri/i965/gen6_blorp.cpp index 50d89cc7873..7355d31f535 100644 --- a/src/mesa/drivers/dri/i965/gen6_blorp.cpp +++ b/src/mesa/drivers/dri/i965/gen6_blorp.cpp @@ -106,7 +106,6 @@ void gen6_blorp_emit_vertices(struct brw_context *brw, const brw_blorp_params *params) { - struct intel_context *intel = &brw->intel; uint32_t vertex_offset; /* Setup VBO for the rectangle primitive.. @@ -161,7 +160,7 @@ gen6_blorp_emit_vertices(struct brw_context *brw, uint32_t dw0 = GEN6_VB0_ACCESS_VERTEXDATA | (GEN6_BLORP_NUM_VUE_ELEMS * sizeof(float)) << BRW_VB0_PITCH_SHIFT; - if (intel->gen >= 7) + if (brw->gen >= 7) dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE; BEGIN_BATCH(batch_length); @@ -554,9 +553,7 @@ void gen6_blorp_emit_vs_disable(struct brw_context *brw, const brw_blorp_params *params) { - struct intel_context *intel = &brw->intel; - - if (intel->gen == 6) { + if (brw->gen == 6) { /* From the BSpec, Volume 2a, Part 3 "Vertex Shader", Section * 3DSTATE_VS, Dword 5.0 "VS Function Enable": * @@ -816,8 +813,7 @@ static void gen6_blorp_emit_depth_stencil_config(struct brw_context *brw, const brw_blorp_params *params) { - struct intel_context *intel = &brw->intel; - struct gl_context *ctx = &intel->ctx; + struct gl_context *ctx = &brw->intel.ctx; uint32_t draw_x = params->depth.x_offset; uint32_t draw_y = params->depth.y_offset; uint32_t tile_mask_x, tile_mask_y; diff --git a/src/mesa/drivers/dri/i965/gen6_cc.c b/src/mesa/drivers/dri/i965/gen6_cc.c index b4a570960c8..cac32e5a053 100644 --- a/src/mesa/drivers/dri/i965/gen6_cc.c +++ b/src/mesa/drivers/dri/i965/gen6_cc.c @@ -39,7 +39,6 @@ static void gen6_upload_blend_state(struct brw_context *brw) { bool is_buffer_zero_integer_format = false; - struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx; struct gen6_blend_state *blend; int b; @@ -216,7 +215,7 @@ gen6_upload_blend_state(struct brw_context *brw) blend[b].blend1.alpha_to_one = ctx->Multisample._Enabled && ctx->Multisample.SampleAlphaToOne; - blend[b].blend1.alpha_to_coverage_dither = (brw->intel.gen >= 7); + blend[b].blend1.alpha_to_coverage_dither = (brw->gen >= 7); } else { blend[b].blend1.alpha_to_coverage = false; @@ -225,7 +224,7 @@ gen6_upload_blend_state(struct brw_context *brw) } /* Point the GPU at the new indirect state. */ - if (intel->gen == 6) { + if (brw->gen == 6) { BEGIN_BATCH(4); OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2)); OUT_BATCH(brw->cc.blend_state_offset | 1); @@ -255,7 +254,6 @@ static void gen6_upload_color_calc_state(struct brw_context *brw) { struct gl_context *ctx = &brw->intel.ctx; - struct intel_context *intel = &brw->intel; struct gen6_color_calc_state *cc; cc = brw_state_batch(brw, AUB_TRACE_CC_STATE, @@ -277,7 +275,7 @@ gen6_upload_color_calc_state(struct brw_context *brw) cc->constant_a = ctx->Color.BlendColorUnclamped[3]; /* Point the GPU at the new indirect state. */ - if (intel->gen == 6) { + if (brw->gen == 6) { BEGIN_BATCH(4); OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2)); OUT_BATCH(0); diff --git a/src/mesa/drivers/dri/i965/gen6_depthstencil.c b/src/mesa/drivers/dri/i965/gen6_depthstencil.c index 8c471477938..01710180087 100644 --- a/src/mesa/drivers/dri/i965/gen6_depthstencil.c +++ b/src/mesa/drivers/dri/i965/gen6_depthstencil.c @@ -35,7 +35,6 @@ static void gen6_upload_depth_stencil_state(struct brw_context *brw) { struct gl_context *ctx = &brw->intel.ctx; - struct intel_context *intel = &brw->intel; struct gen6_depth_stencil_state *ds; struct intel_renderbuffer *depth_irb; @@ -88,7 +87,7 @@ gen6_upload_depth_stencil_state(struct brw_context *brw) } /* Point the GPU at the new indirect state. */ - if (intel->gen == 6) { + if (brw->gen == 6) { BEGIN_BATCH(4); OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2)); OUT_BATCH(0); diff --git a/src/mesa/drivers/dri/i965/gen6_multisample_state.c b/src/mesa/drivers/dri/i965/gen6_multisample_state.c index 3247bb90c08..ce0fdfc87dd 100644 --- a/src/mesa/drivers/dri/i965/gen6_multisample_state.c +++ b/src/mesa/drivers/dri/i965/gen6_multisample_state.c @@ -105,8 +105,6 @@ void gen6_emit_3dstate_multisample(struct brw_context *brw, unsigned num_samples) { - struct intel_context *intel = &brw->intel; - uint32_t number_of_multisamples = 0; uint32_t sample_positions_3210 = 0; uint32_t sample_positions_7654 = 0; @@ -130,12 +128,12 @@ gen6_emit_3dstate_multisample(struct brw_context *brw, break; } - int len = intel->gen >= 7 ? 4 : 3; + int len = brw->gen >= 7 ? 4 : 3; BEGIN_BATCH(len); OUT_BATCH(_3DSTATE_MULTISAMPLE << 16 | (len - 2)); OUT_BATCH(MS_PIXEL_LOCATION_CENTER | number_of_multisamples); OUT_BATCH(sample_positions_3210); - if (intel->gen >= 7) + if (brw->gen >= 7) OUT_BATCH(sample_positions_7654); ADVANCE_BATCH(); } @@ -166,8 +164,7 @@ gen6_emit_3dstate_sample_mask(struct brw_context *brw, static void upload_multisample_state(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - struct gl_context *ctx = &intel->ctx; + struct gl_context *ctx = &brw->intel.ctx; float coverage = 1.0; float coverage_invert = false; unsigned sample_mask = ~0u; diff --git a/src/mesa/drivers/dri/i965/gen6_queryobj.c b/src/mesa/drivers/dri/i965/gen6_queryobj.c index 814363068a2..6f4f6f5de8c 100644 --- a/src/mesa/drivers/dri/i965/gen6_queryobj.c +++ b/src/mesa/drivers/dri/i965/gen6_queryobj.c @@ -45,9 +45,8 @@ static void write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx) { - struct intel_context *intel = &brw->intel; /* Emit workaround flushes: */ - if (intel->gen == 6) { + if (brw->gen == 6) { /* The timestamp write below is a non-zero post-sync op, which on * Gen6 necessitates a CS stall. CS stalls need stall at scoreboard * set. See the comments for intel_emit_post_sync_nonzero_flush(). @@ -78,9 +77,8 @@ write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx) static void write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx) { - struct intel_context *intel = &brw->intel; /* Emit Sandybridge workaround flush: */ - if (intel->gen == 6) + if (brw->gen == 6) intel_emit_post_sync_nonzero_flush(brw); BEGIN_BATCH(5); @@ -107,8 +105,7 @@ static void write_reg(struct brw_context *brw, drm_intel_bo *query_bo, uint32_t reg, int idx) { - struct intel_context *intel = &brw->intel; - assert(intel->gen >= 6); + assert(brw->gen >= 6); intel_batchbuffer_emit_mi_flush(brw); @@ -141,8 +138,7 @@ static void write_xfb_primitives_written(struct brw_context *brw, drm_intel_bo *query_bo, int idx) { - struct intel_context *intel = &brw->intel; - if (intel->gen >= 7) { + if (brw->gen >= 7) { write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx); } else { write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN, idx); diff --git a/src/mesa/drivers/dri/i965/gen6_sol.c b/src/mesa/drivers/dri/i965/gen6_sol.c index 35944ef35e0..cf945fd3d5b 100644 --- a/src/mesa/drivers/dri/i965/gen6_sol.c +++ b/src/mesa/drivers/dri/i965/gen6_sol.c @@ -137,7 +137,6 @@ brw_begin_transform_feedback(struct gl_context *ctx, GLenum mode, struct gl_transform_feedback_object *obj) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = &brw->intel; const struct gl_shader_program *vs_prog = ctx->Shader.CurrentVertexProgram; const struct gl_transform_feedback_info *linked_xfb_info = @@ -145,7 +144,7 @@ brw_begin_transform_feedback(struct gl_context *ctx, GLenum mode, struct gl_transform_feedback_object *xfb_obj = ctx->TransformFeedback.CurrentObject; - assert(intel->gen == 6); + assert(brw->gen == 6); /* Compute the maximum number of vertices that we can write without * overflowing any of the buffers currently being used for feedback. diff --git a/src/mesa/drivers/dri/i965/gen7_urb.c b/src/mesa/drivers/dri/i965/gen7_urb.c index 862e8153f6e..ce01ddab66a 100644 --- a/src/mesa/drivers/dri/i965/gen7_urb.c +++ b/src/mesa/drivers/dri/i965/gen7_urb.c @@ -56,10 +56,8 @@ void gen7_allocate_push_constants(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - unsigned size = 8; - if (brw->is_haswell && intel->gt == 3) + if (brw->is_haswell && brw->gt == 3) size = 16; BEGIN_BATCH(2); @@ -76,8 +74,7 @@ gen7_allocate_push_constants(struct brw_context *brw) static void gen7_upload_urb(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - const int push_size_kB = brw->is_haswell && intel->gt == 3 ? 32 : 16; + const int push_size_kB = brw->is_haswell && brw->gt == 3 ? 32 : 16; /* Total space for entries is URB size - 16kB for push constants */ int handle_region_size = (brw->urb.size - push_size_kB) * 1024; /* bytes */ diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c index e40fe5224a1..ab7a9a37031 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c @@ -58,10 +58,9 @@ clear_cache(struct brw_context *brw) void intel_batchbuffer_init(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; intel_batchbuffer_reset(brw); - if (intel->gen >= 6) { + if (brw->gen >= 6) { /* We can't just use brw_state_batch to get a chunk of space for * the gen6 workaround because it involves actually writing to * the buffer, and the kernel doesn't let us write to the batch. @@ -176,7 +175,6 @@ do_batch_dump(struct brw_context *brw) static int do_flush_locked(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; struct intel_batchbuffer *batch = &brw->batch; int ret = 0; @@ -195,7 +193,7 @@ do_flush_locked(struct brw_context *brw) if (!brw->intelScreen->no_hw) { int flags; - if (intel->gen < 6 || !batch->is_blit) { + if (brw->gen < 6 || !batch->is_blit) { flags = I915_EXEC_RENDER; } else { flags = I915_EXEC_BLT; @@ -396,8 +394,7 @@ emit: void intel_emit_depth_stall_flushes(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - assert(intel->gen >= 6 && intel->gen <= 7); + assert(brw->gen >= 6 && brw->gen <= 7); BEGIN_BATCH(4); OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2)); @@ -432,8 +429,7 @@ intel_emit_depth_stall_flushes(struct brw_context *brw) void gen7_emit_vs_workaround_flush(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - assert(intel->gen == 7); + assert(brw->gen == 7); BEGIN_BATCH(4); OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2)); @@ -515,8 +511,7 @@ intel_emit_post_sync_nonzero_flush(struct brw_context *brw) void intel_batchbuffer_emit_mi_flush(struct brw_context *brw) { - struct intel_context *intel = &brw->intel; - if (intel->gen >= 6) { + if (brw->gen >= 6) { if (brw->batch.is_blit) { BEGIN_BATCH_BLT(4); OUT_BATCH(MI_FLUSH_DW); @@ -525,7 +520,7 @@ intel_batchbuffer_emit_mi_flush(struct brw_context *brw) OUT_BATCH(0); ADVANCE_BATCH(); } else { - if (intel->gen == 6) { + if (brw->gen == 6) { /* Hardware workaround: SNB B-Spec says: * * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h index 4e73f61db0d..e151eb6d281 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h @@ -101,8 +101,7 @@ intel_batchbuffer_emit_float(struct brw_context *brw, float f) static INLINE void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit) { - struct intel_context *intel = &brw->intel; - if (intel->gen >= 6 && + if (brw->gen >= 6 && brw->batch.is_blit != is_blit && brw->batch.used) { intel_batchbuffer_flush(brw); } diff --git a/src/mesa/drivers/dri/i965/intel_blit.c b/src/mesa/drivers/dri/i965/intel_blit.c index 9806e5001e6..1f08fbfa7fb 100644 --- a/src/mesa/drivers/dri/i965/intel_blit.c +++ b/src/mesa/drivers/dri/i965/intel_blit.c @@ -104,8 +104,7 @@ static void set_blitter_tiling(struct brw_context *brw, bool dst_y_tiled, bool src_y_tiled) { - struct intel_context *intel = &brw->intel; - assert(intel->gen >= 6); + assert(brw->gen >= 6); /* Idle the blitter before we update how tiling is interpreted. */ OUT_BATCH(MI_FLUSH_DW); @@ -279,7 +278,6 @@ intelEmitCopyBlit(struct brw_context *brw, GLshort w, GLshort h, GLenum logic_op) { - struct intel_context *intel = &brw->intel; GLuint CMD, BR13, pass = 0; int dst_y2 = dst_y + h; int dst_x2 = dst_x + w; @@ -296,7 +294,7 @@ intelEmitCopyBlit(struct brw_context *brw, if (src_offset & 4095) return false; } - if ((dst_y_tiled || src_y_tiled) && intel->gen < 6) + if ((dst_y_tiled || src_y_tiled) && brw->gen < 6) return false; /* do space check before going any further */ diff --git a/src/mesa/drivers/dri/i965/intel_context.c b/src/mesa/drivers/dri/i965/intel_context.c index f2717c45090..86bbd6113cb 100644 --- a/src/mesa/drivers/dri/i965/intel_context.c +++ b/src/mesa/drivers/dri/i965/intel_context.c @@ -94,8 +94,7 @@ void intel_resolve_for_dri2_flush(struct brw_context *brw, __DRIdrawable *drawable) { - struct intel_context *intel = &brw->intel; - if (intel->gen < 6) { + if (brw->gen < 6) { /* MSAA and fast color clear are not supported, so don't waste time * checking whether a resolve is needed. */ @@ -474,30 +473,30 @@ intelInitContext(struct brw_context *brw, driContextPriv->driverPrivate = brw; brw->driContext = driContextPriv; - intel->gen = intelScreen->gen; + brw->gen = intelScreen->gen; const int devID = intelScreen->deviceID; if (IS_SNB_GT1(devID) || IS_IVB_GT1(devID) || IS_HSW_GT1(devID)) - intel->gt = 1; + brw->gt = 1; else if (IS_SNB_GT2(devID) || IS_IVB_GT2(devID) || IS_HSW_GT2(devID)) - intel->gt = 2; + brw->gt = 2; else if (IS_HSW_GT3(devID)) - intel->gt = 3; + brw->gt = 3; else - intel->gt = 0; + brw->gt = 0; if (IS_HASWELL(devID)) { brw->is_haswell = true; } else if (IS_BAYTRAIL(devID)) { brw->is_baytrail = true; - intel->gt = 1; + brw->gt = 1; } else if (IS_G4X(devID)) { brw->is_g4x = true; } brw->has_separate_stencil = brw->intelScreen->hw_has_separate_stencil; brw->must_use_separate_stencil = brw->intelScreen->hw_must_use_separate_stencil; - brw->has_hiz = intel->gen >= 6; + brw->has_hiz = brw->gen >= 6; brw->has_llc = brw->intelScreen->hw_has_llc; brw->has_swizzling = brw->intelScreen->hw_has_swizzling; @@ -560,7 +559,7 @@ intelInitContext(struct brw_context *brw, INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control); if (INTEL_DEBUG & DEBUG_BUFMGR) dri_bufmgr_set_debug(brw->bufmgr, true); - if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && intel->gen < 7) { + if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && brw->gen < 7) { fprintf(stderr, "shader_time debugging requires gen7 (Ivybridge) or better.\n"); INTEL_DEBUG &= ~DEBUG_SHADER_TIME; @@ -578,7 +577,7 @@ intelInitContext(struct brw_context *brw, if (!driQueryOptionb(&brw->optionCache, "hiz")) { brw->has_hiz = false; /* On gen6, you can only do separate stencil with HIZ. */ - if (intel->gen == 6) + if (brw->gen == 6) brw->has_separate_stencil = false; } diff --git a/src/mesa/drivers/dri/i965/intel_context.h b/src/mesa/drivers/dri/i965/intel_context.h index ade9f32c16d..f4e7bf4ac35 100644 --- a/src/mesa/drivers/dri/i965/intel_context.h +++ b/src/mesa/drivers/dri/i965/intel_context.h @@ -112,12 +112,6 @@ struct intel_batchbuffer { struct intel_context { struct gl_context ctx; /**< base class, must be first field */ - - /** - * Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965. - */ - int gen; - int gt; }; /** diff --git a/src/mesa/drivers/dri/i965/intel_extensions.c b/src/mesa/drivers/dri/i965/intel_extensions.c index 47fd53e6fef..1e762ef4f01 100644 --- a/src/mesa/drivers/dri/i965/intel_extensions.c +++ b/src/mesa/drivers/dri/i965/intel_extensions.c @@ -40,9 +40,8 @@ void intelInitExtensions(struct gl_context *ctx) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); - assert(intel->gen >= 4); + assert(brw->gen >= 4); ctx->Extensions.ARB_depth_buffer_float = true; ctx->Extensions.ARB_depth_clamp = true; @@ -124,13 +123,13 @@ intelInitExtensions(struct gl_context *ctx) ctx->Extensions.OES_draw_texture = true; ctx->Extensions.OES_standard_derivatives = true; - if (intel->gen >= 6) + if (brw->gen >= 6) ctx->Const.GLSLVersion = 140; else ctx->Const.GLSLVersion = 120; _mesa_override_glsl_version(ctx); - if (intel->gen >= 6) { + if (brw->gen >= 6) { uint64_t dummy; ctx->Extensions.EXT_framebuffer_multisample = true; @@ -152,7 +151,7 @@ intelInitExtensions(struct gl_context *ctx) ctx->Extensions.ARB_timer_query = true; } - if (intel->gen >= 5) { + if (brw->gen >= 5) { ctx->Extensions.ARB_texture_query_lod = true; ctx->Extensions.EXT_timer_query = true; } diff --git a/src/mesa/drivers/dri/i965/intel_fbo.c b/src/mesa/drivers/dri/i965/intel_fbo.c index 02eca0aaf13..0a89492fc69 100644 --- a/src/mesa/drivers/dri/i965/intel_fbo.c +++ b/src/mesa/drivers/dri/i965/intel_fbo.c @@ -550,7 +550,6 @@ static void intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); struct intel_renderbuffer *depthRb = intel_get_renderbuffer(fb, BUFFER_DEPTH); struct intel_renderbuffer *stencilRb = @@ -596,7 +595,7 @@ intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) "instead of S8\n", _mesa_get_format_name(stencil_mt->format)); } - if (intel->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) { + if (brw->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) { /* Before Gen7, separate depth and stencil buffers can be used * only if HiZ is enabled. From the Sandybridge PRM, Volume 2, * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable: diff --git a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c index 25ba85bff2a..058048b24ed 100644 --- a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c +++ b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c @@ -71,9 +71,8 @@ target_to_target(GLenum target) static enum intel_msaa_layout compute_msaa_layout(struct brw_context *brw, gl_format format, GLenum target) { - struct intel_context *intel = &brw->intel; /* Prior to Gen7, all MSAA surfaces used IMS layout. */ - if (intel->gen < 7) + if (brw->gen < 7) return INTEL_MSAA_LAYOUT_IMS; /* In Gen7, IMS layout is only used for depth and stencil buffers. */ @@ -96,7 +95,7 @@ compute_msaa_layout(struct brw_context *brw, gl_format format, GLenum target) */ if (_mesa_get_format_datatype(format) == GL_INT) { /* TODO: is this workaround needed for future chipsets? */ - assert(intel->gen == 7); + assert(brw->gen == 7); return INTEL_MSAA_LAYOUT_UMS; } else { /* For now, if we're going to be texturing from this surface, @@ -201,10 +200,8 @@ bool intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw, struct intel_mipmap_tree *mt) { - struct intel_context *intel = &brw->intel; - /* MCS support does not exist prior to Gen7 */ - if (intel->gen < 7) + if (brw->gen < 7) return false; /* MCS is only supported for color buffers */ @@ -415,7 +412,6 @@ intel_miptree_choose_tiling(struct brw_context *brw, enum intel_miptree_tiling_mode requested, struct intel_mipmap_tree *mt) { - struct intel_context *intel = &brw->intel; if (format == MESA_FORMAT_S8) { /* The stencil buffer is W tiled. However, we request from the kernel a * non-tiled buffer because the GTT is incapable of W fencing. @@ -469,7 +465,7 @@ intel_miptree_choose_tiling(struct brw_context *brw, } /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */ - if (intel->gen < 6) + if (brw->gen < 6) return I915_TILING_X; return I915_TILING_Y | I915_TILING_X; @@ -1131,8 +1127,7 @@ intel_miptree_alloc_mcs(struct brw_context *brw, struct intel_mipmap_tree *mt, GLuint num_samples) { - struct intel_context *intel = &brw->intel; - assert(intel->gen >= 7); /* MCS only used on Gen7+ */ + assert(brw->gen >= 7); /* MCS only used on Gen7+ */ assert(mt->mcs_mt == NULL); /* Choose the correct format for the MCS buffer. All that really matters @@ -2104,7 +2099,6 @@ intel_miptree_map_singlesample(struct brw_context *brw, void **out_ptr, int *out_stride) { - struct intel_context *intel = &brw->intel; struct intel_miptree_map *map; assert(mt->num_samples <= 1); @@ -2134,7 +2128,7 @@ intel_miptree_map_singlesample(struct brw_context *brw, !(mode & GL_MAP_WRITE_BIT) && !mt->compressed && (mt->region->tiling == I915_TILING_X || - (intel->gen >= 6 && mt->region->tiling == I915_TILING_Y)) && + (brw->gen >= 6 && mt->region->tiling == I915_TILING_Y)) && mt->region->pitch < 32768) { intel_miptree_map_blit(brw, mt, map, level, slice); } else if (mt->region->tiling != I915_TILING_NONE && diff --git a/src/mesa/drivers/dri/i965/intel_tex_subimage.c b/src/mesa/drivers/dri/i965/intel_tex_subimage.c index 5604a7d8e79..05e684cf57b 100644 --- a/src/mesa/drivers/dri/i965/intel_tex_subimage.c +++ b/src/mesa/drivers/dri/i965/intel_tex_subimage.c @@ -52,7 +52,6 @@ intel_blit_texsubimage(struct gl_context * ctx, const struct gl_pixelstore_attrib *packing) { struct brw_context *brw = brw_context(ctx); - struct intel_context *intel = intel_context(ctx); struct intel_texture_image *intelImage = intel_texture_image(texImage); /* Try to do a blit upload of the subimage if the texture is @@ -71,7 +70,7 @@ intel_blit_texsubimage(struct gl_context * ctx, /* On gen6, it's probably not worth swapping to the blit ring to do * this because of all the overhead involved. */ - if (intel->gen >= 6) + if (brw->gen >= 6) return false; if (!drm_intel_bo_busy(intelImage->mt->region->bo)) diff --git a/src/mesa/drivers/dri/i965/test_eu_compact.c b/src/mesa/drivers/dri/i965/test_eu_compact.c index 6259969e379..e6e4ef818f1 100644 --- a/src/mesa/drivers/dri/i965/test_eu_compact.c +++ b/src/mesa/drivers/dri/i965/test_eu_compact.c @@ -52,7 +52,7 @@ test_compact_instruction(struct brw_compile *p, struct brw_instruction src) if (memcmp(&unchanged, &dst, sizeof(dst))) { fprintf(stderr, "Failed to compact, but dst changed\n"); fprintf(stderr, " Instruction: "); - brw_disasm(stderr, &src, intel->gen); + brw_disasm(stderr, &src, brw->gen); return false; } } @@ -297,10 +297,10 @@ main(int argc, char **argv) { struct brw_context *brw = calloc(1, sizeof(*brw)); struct intel_context *intel = &brw->intel; - intel->gen = 6; + brw->gen = 6; bool fail = false; - for (intel->gen = 6; intel->gen <= 7; intel->gen++) { + for (brw->gen = 6; brw->gen <= 7; brw->gen++) { fail |= run_tests(brw); } diff --git a/src/mesa/drivers/dri/i965/test_vec4_register_coalesce.cpp b/src/mesa/drivers/dri/i965/test_vec4_register_coalesce.cpp index 71f6b1aaa86..2603e72570f 100644 --- a/src/mesa/drivers/dri/i965/test_vec4_register_coalesce.cpp +++ b/src/mesa/drivers/dri/i965/test_vec4_register_coalesce.cpp @@ -107,7 +107,7 @@ void register_coalesce_test::SetUp() _mesa_init_vertex_program(ctx, &vp->program, GL_VERTEX_SHADER, 0); - intel->gen = 4; + brw->gen = 4; } static void |