diff options
author | Connor Abbott <[email protected]> | 2015-07-15 16:47:51 -0700 |
---|---|---|
committer | Connor Abbott <[email protected]> | 2015-07-15 17:18:48 -0700 |
commit | 9fa0989ff2266315d7dc8469dab601ebc2289fea (patch) | |
tree | ddde34d725636b7f5c736401c2a91ff539878a33 | |
parent | 5520221118cee12c7d8af378ca11978c776156ae (diff) |
nir: move to two-level binding model for UBO's
The GLSL layer above is still hacky, so we're really just moving the
hack into GLSL-to-NIR. I'd rather not go all the way and make GLSL
support the Vulkan binding model too, since presumably we'll be
switching to SPIR-V exclusively, and so working on proper GLSL support
will be a waste of time. For now, doing this keeps it working as we add
SPIR-V->NIR support though.
-rw-r--r-- | src/glsl/nir/glsl_to_nir.cpp | 26 | ||||
-rw-r--r-- | src/glsl/nir/nir_intrinsics.h | 23 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_fs_nir.cpp | 9 |
3 files changed, 36 insertions, 22 deletions
diff --git a/src/glsl/nir/glsl_to_nir.cpp b/src/glsl/nir/glsl_to_nir.cpp index 0338af67567..54e56145c89 100644 --- a/src/glsl/nir/glsl_to_nir.cpp +++ b/src/glsl/nir/glsl_to_nir.cpp @@ -43,7 +43,7 @@ namespace { class nir_visitor : public ir_visitor { public: - nir_visitor(nir_shader *shader, gl_shader_stage stage); + nir_visitor(nir_shader *shader, struct gl_shader *sh, gl_shader_stage stage); ~nir_visitor(); virtual void visit(ir_variable *); @@ -83,6 +83,8 @@ private: bool supports_ints; + struct gl_shader *sh; + nir_shader *shader; gl_shader_stage stage; nir_function_impl *impl; @@ -133,7 +135,7 @@ glsl_to_nir(struct gl_shader *sh, const nir_shader_compiler_options *options) { nir_shader *shader = nir_shader_create(NULL, options); - nir_visitor v1(shader, sh->Stage); + nir_visitor v1(shader, sh, sh->Stage); nir_function_visitor v2(&v1); v2.run(sh->ir); visit_exec_list(sh->ir, &v1); @@ -141,10 +143,12 @@ glsl_to_nir(struct gl_shader *sh, const nir_shader_compiler_options *options) return shader; } -nir_visitor::nir_visitor(nir_shader *shader, gl_shader_stage stage) +nir_visitor::nir_visitor(nir_shader *shader, struct gl_shader *sh, + gl_shader_stage stage) { this->supports_ints = shader->options->native_integers; this->shader = shader; + this->sh = sh; this->stage = stage; this->is_global = true; this->var_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer, @@ -987,11 +991,21 @@ nir_visitor::visit(ir_expression *ir) } else { op = nir_intrinsic_load_ubo_indirect; } + + ir_constant *const_block = ir->operands[0]->as_constant(); + assert(const_block && "can't figure out descriptor set index"); + unsigned index = const_block->value.u[0]; + unsigned set = sh->UniformBlocks[index].Set; + unsigned binding = sh->UniformBlocks[index].Binding; + nir_intrinsic_instr *load = nir_intrinsic_instr_create(this->shader, op); load->num_components = ir->type->vector_elements; - load->const_index[0] = const_index ? const_index->value.u[0] : 0; /* base offset */ - load->const_index[1] = 1; /* number of vec4's */ - load->src[0] = evaluate_rvalue(ir->operands[0]); + load->const_index[0] = set; + load->const_index[1] = const_index ? const_index->value.u[0] : 0; /* base offset */ + nir_load_const_instr *load_binding = nir_load_const_instr_create(shader, 1); + load_binding->value.u[0] = binding; + nir_instr_insert_after_cf_list(this->cf_node_list, &load_binding->instr); + load->src[0] = nir_src_for_ssa(&load_binding->def); if (!const_index) load->src[1] = evaluate_rvalue(ir->operands[1]); add_instr(&load->instr, ir->type->vector_elements); diff --git a/src/glsl/nir/nir_intrinsics.h b/src/glsl/nir/nir_intrinsics.h index bc6e6b8f498..64861300b55 100644 --- a/src/glsl/nir/nir_intrinsics.h +++ b/src/glsl/nir/nir_intrinsics.h @@ -139,11 +139,12 @@ SYSTEM_VALUE(sample_mask_in, 1) SYSTEM_VALUE(invocation_id, 1) /* - * The first and only index is the base address to load from. Indirect - * loads have an additional register input, which is added to the constant - * address to compute the final address to load from. For UBO's (and - * SSBO's), the first source is the (possibly constant) UBO buffer index - * and the indirect (if it exists) is the second source. + * The last index is the base address to load from. Indirect loads have an + * additional register input, which is added to the constant address to + * compute the final address to load from. For UBO's (and SSBO's), the first + * source is the (possibly constant) UBO buffer index and the indirect (if it + * exists) is the second source, and the first index is the descriptor set + * index. * * For vector backends, the address is in terms of one vec4, and so each array * element is +4 scalar components from the previous array element. For scalar @@ -151,14 +152,14 @@ SYSTEM_VALUE(invocation_id, 1) * elements begin immediately after the previous array element. */ -#define LOAD(name, extra_srcs, flags) \ - INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, 1, flags) \ +#define LOAD(name, extra_srcs, extra_indices, flags) \ + INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, 1 + extra_indices, flags) \ INTRINSIC(load_##name##_indirect, extra_srcs + 1, ARR(1, 1), \ - true, 0, 0, 1, flags) + true, 0, 0, 1 + extra_indices, flags) -LOAD(uniform, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) -LOAD(ubo, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) -LOAD(input, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) +LOAD(uniform, 0, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) +LOAD(ubo, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) +LOAD(input, 0, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) /* LOAD(ssbo, 1, 0) */ /* diff --git a/src/mesa/drivers/dri/i965/brw_fs_nir.cpp b/src/mesa/drivers/dri/i965/brw_fs_nir.cpp index 46c30fcae26..4d98b048433 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_nir.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_nir.cpp @@ -1364,13 +1364,12 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr has_indirect = true; /* fallthrough */ case nir_intrinsic_load_ubo: { + uint32_t set = instr->const_index[0]; nir_const_value *const_index = nir_src_as_const_value(instr->src[0]); fs_reg surf_index; if (const_index) { - uint32_t index = const_index->u[0]; - uint32_t set = shader->base.UniformBlocks[index].Set; - uint32_t binding = shader->base.UniformBlocks[index].Binding; + uint32_t binding = const_index->u[0]; /* FIXME: We should probably assert here, but dota2 seems to hit * it and we'd like to keep going. @@ -1405,7 +1404,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr BRW_REGISTER_TYPE_D), fs_reg(2)); - unsigned vec4_offset = instr->const_index[0] / 4; + unsigned vec4_offset = instr->const_index[1] / 4; for (int i = 0; i < instr->num_components; i++) VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, i), surf_index, base_offset, vec4_offset + i); @@ -1413,7 +1412,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr fs_reg packed_consts = vgrf(glsl_type::float_type); packed_consts.type = dest.type; - fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15); + fs_reg const_offset_reg((unsigned) instr->const_index[1] & ~15); bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts, surf_index, const_offset_reg); |