summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTimothy Arceri <[email protected]>2017-11-02 12:30:33 +1100
committerTimothy Arceri <[email protected]>2017-11-03 14:54:45 +1100
commitbd59a0bb8b860aacaba9a137cecb8021408304cd (patch)
tree0af755469a0cf9f90a5c9e748cf6f15ff9436e2a
parent439a2febc4a658fdf38275268b6b5f566f3a3253 (diff)
ac: use the ac i32 llvm type
Reviewed-by: Dave Airlie <[email protected]> Reviewed-by: Marek Olšák <[email protected] Acked-by: Nicolai Hähnle <[email protected]>
-rw-r--r--src/amd/common/ac_nir_to_llvm.c360
1 files changed, 179 insertions, 181 deletions
diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c
index 2ec30517e03..d7920429254 100644
--- a/src/amd/common/ac_nir_to_llvm.c
+++ b/src/amd/common/ac_nir_to_llvm.c
@@ -137,7 +137,6 @@ struct nir_to_llvm_context {
LLVMTypeRef i1;
LLVMTypeRef i8;
LLVMTypeRef i16;
- LLVMTypeRef i32;
LLVMTypeRef i64;
LLVMTypeRef v2i32;
LLVMTypeRef v3i32;
@@ -468,7 +467,7 @@ get_tcs_out_patch0_offset(struct nir_to_llvm_context *ctx)
{
return LLVMBuildMul(ctx->builder,
unpack_param(&ctx->ac, ctx->tcs_out_offsets, 0, 16),
- LLVMConstInt(ctx->i32, 4, false), "");
+ LLVMConstInt(ctx->ac.i32, 4, false), "");
}
static LLVMValueRef
@@ -476,7 +475,7 @@ get_tcs_out_patch0_patch_data_offset(struct nir_to_llvm_context *ctx)
{
return LLVMBuildMul(ctx->builder,
unpack_param(&ctx->ac, ctx->tcs_out_offsets, 16, 16),
- LLVMConstInt(ctx->i32, 4, false), "");
+ LLVMConstInt(ctx->ac.i32, 4, false), "");
}
static LLVMValueRef
@@ -670,7 +669,7 @@ radv_define_common_user_sgprs_phase2(struct nir_to_llvm_context *ctx,
for (unsigned i = 0; i < num_sets; ++i) {
if (ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
set_userdata_location_indirect(&ctx->shader_info->user_sgprs_locs.descriptor_sets[i], desc_sgpr_idx, 2, i * 8);
- ctx->descriptor_sets[i] = ac_build_load_to_sgpr(&ctx->ac, desc_sets, LLVMConstInt(ctx->i32, i, false));
+ ctx->descriptor_sets[i] = ac_build_load_to_sgpr(&ctx->ac, desc_sets, LLVMConstInt(ctx->ac.i32, i, false));
} else
ctx->descriptor_sets[i] = NULL;
@@ -693,10 +692,10 @@ radv_define_vs_user_sgprs_phase1(struct nir_to_llvm_context *ctx,
if (!ctx->is_gs_copy_shader && (stage == MESA_SHADER_VERTEX || (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
if (ctx->shader_info->info.vs.has_vertex_buffers)
add_user_sgpr_argument(args, const_array(ctx->v4i32, 16), &ctx->vertex_buffers); /* vertex buffers */
- add_user_sgpr_argument(args, ctx->i32, &ctx->abi.base_vertex); // base vertex
- add_user_sgpr_argument(args, ctx->i32, &ctx->abi.start_instance);// start instance
+ add_user_sgpr_argument(args, ctx->ac.i32, &ctx->abi.base_vertex); // base vertex
+ add_user_sgpr_argument(args, ctx->ac.i32, &ctx->abi.start_instance);// start instance
if (ctx->shader_info->info.vs.needs_draw_id)
- add_user_sgpr_argument(args, ctx->i32, &ctx->abi.draw_id); // draw id
+ add_user_sgpr_argument(args, ctx->ac.i32, &ctx->abi.draw_id); // draw id
}
}
@@ -740,149 +739,149 @@ static void create_function(struct nir_to_llvm_context *ctx,
case MESA_SHADER_COMPUTE:
radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
if (ctx->shader_info->info.cs.grid_components_used)
- add_user_sgpr_argument(&args, LLVMVectorType(ctx->i32, ctx->shader_info->info.cs.grid_components_used), &ctx->num_work_groups); /* grid size */
- add_sgpr_argument(&args, LLVMVectorType(ctx->i32, 3), &ctx->workgroup_ids);
- add_sgpr_argument(&args, ctx->i32, &ctx->tg_size);
- add_vgpr_argument(&args, LLVMVectorType(ctx->i32, 3), &ctx->local_invocation_ids);
+ add_user_sgpr_argument(&args, LLVMVectorType(ctx->ac.i32, ctx->shader_info->info.cs.grid_components_used), &ctx->num_work_groups); /* grid size */
+ add_sgpr_argument(&args, LLVMVectorType(ctx->ac.i32, 3), &ctx->workgroup_ids);
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->tg_size);
+ add_vgpr_argument(&args, LLVMVectorType(ctx->ac.i32, 3), &ctx->local_invocation_ids);
break;
case MESA_SHADER_VERTEX:
radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args);
if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.vs.as_es && !ctx->options->key.vs.as_ls && ctx->options->key.has_multiview_view_index))
- add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index);
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
if (ctx->options->key.vs.as_es)
- add_sgpr_argument(&args, ctx->i32, &ctx->es2gs_offset); // es2gs offset
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->es2gs_offset); // es2gs offset
else if (ctx->options->key.vs.as_ls)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->ls_out_layout); // ls out layout
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.vertex_id); // vertex id
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->ls_out_layout); // ls out layout
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.vertex_id); // vertex id
if (!ctx->is_gs_copy_shader) {
- add_vgpr_argument(&args, ctx->i32, &ctx->rel_auto_id); // rel auto id
- add_vgpr_argument(&args, ctx->i32, &ctx->vs_prim_id); // vs prim id
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.instance_id); // instance id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->rel_auto_id); // rel auto id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->vs_prim_id); // vs prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.instance_id); // instance id
}
break;
case MESA_SHADER_TESS_CTRL:
if (has_previous_stage) {
// First 6 system regs
- add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // param oc lds
- add_sgpr_argument(&args, ctx->i32, &ctx->merged_wave_info); // merged wave info
- add_sgpr_argument(&args, ctx->i32, &ctx->tess_factor_offset); // tess factor offset
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // param oc lds
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->merged_wave_info); // merged wave info
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->tess_factor_offset); // tess factor offset
- add_sgpr_argument(&args, ctx->i32, NULL); // scratch offset
- add_sgpr_argument(&args, ctx->i32, NULL); // unknown
- add_sgpr_argument(&args, ctx->i32, NULL); // unknown
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // scratch offset
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown
radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args);
- add_user_sgpr_argument(&args, ctx->i32, &ctx->ls_out_layout); // ls out layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->ls_out_layout); // ls out layout
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_offsets); // tcs out offsets
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_layout); // tcs out layout
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_in_layout); // tcs in layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_offsets); // tcs out offsets
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_layout); // tcs out layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_in_layout); // tcs in layout
if (ctx->shader_info->info.needs_multiview_view_index)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index);
-
- add_vgpr_argument(&args, ctx->i32, &ctx->tcs_patch_id); // patch id
- add_vgpr_argument(&args, ctx->i32, &ctx->tcs_rel_ids); // rel ids;
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.vertex_id); // vertex id
- add_vgpr_argument(&args, ctx->i32, &ctx->rel_auto_id); // rel auto id
- add_vgpr_argument(&args, ctx->i32, &ctx->vs_prim_id); // vs prim id
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.instance_id); // instance id
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
+
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_patch_id); // patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_rel_ids); // rel ids;
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.vertex_id); // vertex id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->rel_auto_id); // rel auto id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->vs_prim_id); // vs prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.instance_id); // instance id
} else {
radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_offsets); // tcs out offsets
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_layout); // tcs out layout
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_in_layout); // tcs in layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_offsets); // tcs out offsets
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_layout); // tcs out layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_in_layout); // tcs in layout
if (ctx->shader_info->info.needs_multiview_view_index)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index);
- add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // param oc lds
- add_sgpr_argument(&args, ctx->i32, &ctx->tess_factor_offset); // tess factor offset
- add_vgpr_argument(&args, ctx->i32, &ctx->tcs_patch_id); // patch id
- add_vgpr_argument(&args, ctx->i32, &ctx->tcs_rel_ids); // rel ids;
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // param oc lds
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->tess_factor_offset); // tess factor offset
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_patch_id); // patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_rel_ids); // rel ids;
}
break;
case MESA_SHADER_TESS_EVAL:
radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout
if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.as_es && ctx->options->key.has_multiview_view_index))
- add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index);
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
if (ctx->options->key.tes.as_es) {
- add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // OC LDS
- add_sgpr_argument(&args, ctx->i32, NULL); //
- add_sgpr_argument(&args, ctx->i32, &ctx->es2gs_offset); // es2gs offset
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // OC LDS
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); //
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->es2gs_offset); // es2gs offset
} else {
- add_sgpr_argument(&args, ctx->i32, NULL); //
- add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // OC LDS
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); //
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // OC LDS
}
add_vgpr_argument(&args, ctx->f32, &ctx->tes_u); // tes_u
add_vgpr_argument(&args, ctx->f32, &ctx->tes_v); // tes_v
- add_vgpr_argument(&args, ctx->i32, &ctx->tes_rel_patch_id); // tes rel patch id
- add_vgpr_argument(&args, ctx->i32, &ctx->tes_patch_id); // tes patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_rel_patch_id); // tes rel patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_patch_id); // tes patch id
break;
case MESA_SHADER_GEOMETRY:
if (has_previous_stage) {
// First 6 system regs
- add_sgpr_argument(&args, ctx->i32, &ctx->gs2vs_offset); // tess factor offset
- add_sgpr_argument(&args, ctx->i32, &ctx->merged_wave_info); // merged wave info
- add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // param oc lds
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->gs2vs_offset); // tess factor offset
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->merged_wave_info); // merged wave info
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // param oc lds
- add_sgpr_argument(&args, ctx->i32, NULL); // scratch offset
- add_sgpr_argument(&args, ctx->i32, NULL); // unknown
- add_sgpr_argument(&args, ctx->i32, NULL); // unknown
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // scratch offset
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown
radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
if (previous_stage == MESA_SHADER_TESS_EVAL)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout
else
radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args);
- add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_ring_stride); // gsvs stride
- add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_num_entries); // gsvs num entires
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_ring_stride); // gsvs stride
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_num_entries); // gsvs num entires
if (ctx->shader_info->info.needs_multiview_view_index)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index);
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[0]); // vtx01
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[2]); // vtx23
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_prim_id); // prim id
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_invocation_id);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[4]);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[0]); // vtx01
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[2]); // vtx23
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_prim_id); // prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_invocation_id);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[4]);
if (previous_stage == MESA_SHADER_VERTEX) {
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.vertex_id); // vertex id
- add_vgpr_argument(&args, ctx->i32, &ctx->rel_auto_id); // rel auto id
- add_vgpr_argument(&args, ctx->i32, &ctx->vs_prim_id); // vs prim id
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.instance_id); // instance id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.vertex_id); // vertex id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->rel_auto_id); // rel auto id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->vs_prim_id); // vs prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.instance_id); // instance id
} else {
add_vgpr_argument(&args, ctx->f32, &ctx->tes_u); // tes_u
add_vgpr_argument(&args, ctx->f32, &ctx->tes_v); // tes_v
- add_vgpr_argument(&args, ctx->i32, &ctx->tes_rel_patch_id); // tes rel patch id
- add_vgpr_argument(&args, ctx->i32, &ctx->tes_patch_id); // tes patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_rel_patch_id); // tes rel patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_patch_id); // tes patch id
}
} else {
radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args);
- add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_ring_stride); // gsvs stride
- add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_num_entries); // gsvs num entires
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_ring_stride); // gsvs stride
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_num_entries); // gsvs num entires
if (ctx->shader_info->info.needs_multiview_view_index)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index);
- add_sgpr_argument(&args, ctx->i32, &ctx->gs2vs_offset); // gs2vs offset
- add_sgpr_argument(&args, ctx->i32, &ctx->gs_wave_id); // wave id
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[0]); // vtx0
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[1]); // vtx1
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_prim_id); // prim id
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[2]);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[3]);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[4]);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[5]);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_invocation_id);
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->gs2vs_offset); // gs2vs offset
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->gs_wave_id); // wave id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[0]); // vtx0
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[1]); // vtx1
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_prim_id); // prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[2]);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[3]);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[4]);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[5]);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_invocation_id);
}
break;
case MESA_SHADER_FRAGMENT:
radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
if (ctx->shader_info->info.ps.needs_sample_positions)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->sample_pos_offset); /* sample position offset */
- add_sgpr_argument(&args, ctx->i32, &ctx->prim_mask); /* prim mask */
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->sample_pos_offset); /* sample position offset */
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->prim_mask); /* prim mask */
add_vgpr_argument(&args, ctx->v2i32, &ctx->persp_sample); /* persp sample */
add_vgpr_argument(&args, ctx->v2i32, &ctx->persp_center); /* persp center */
add_vgpr_argument(&args, ctx->v2i32, &ctx->persp_centroid); /* persp centroid */
@@ -895,10 +894,10 @@ static void create_function(struct nir_to_llvm_context *ctx,
add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[1]); /* pos y float */
add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[2]); /* pos z float */
add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[3]); /* pos w float */
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.front_face); /* front face */
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.ancillary); /* ancillary */
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.sample_coverage); /* sample coverage */
- add_vgpr_argument(&args, ctx->i32, NULL); /* fixed pt */
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.front_face); /* front face */
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.ancillary); /* ancillary */
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.sample_coverage); /* sample coverage */
+ add_vgpr_argument(&args, ctx->ac.i32, NULL); /* fixed pt */
break;
default:
unreachable("Shader stage not implemented");
@@ -1002,12 +1001,11 @@ static void setup_types(struct nir_to_llvm_context *ctx)
ctx->i1 = LLVMIntTypeInContext(ctx->context, 1);
ctx->i8 = LLVMIntTypeInContext(ctx->context, 8);
ctx->i16 = LLVMIntTypeInContext(ctx->context, 16);
- ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
ctx->i64 = LLVMIntTypeInContext(ctx->context, 64);
- ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
- ctx->v3i32 = LLVMVectorType(ctx->i32, 3);
- ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
- ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
+ ctx->v2i32 = LLVMVectorType(ctx->ac.i32, 2);
+ ctx->v3i32 = LLVMVectorType(ctx->ac.i32, 3);
+ ctx->v4i32 = LLVMVectorType(ctx->ac.i32, 4);
+ ctx->v8i32 = LLVMVectorType(ctx->ac.i32, 8);
ctx->f32 = LLVMFloatTypeInContext(ctx->context);
ctx->f16 = LLVMHalfTypeInContext(ctx->context);
ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
@@ -1344,7 +1342,7 @@ static LLVMValueRef emit_f2f16(struct nir_to_llvm_context *ctx,
LLVMValueRef args[2];
/* Check if the result is a denormal - and flush to 0 if so. */
args[0] = result;
- args[1] = LLVMConstInt(ctx->i32, N_SUBNORMAL | P_SUBNORMAL, false);
+ args[1] = LLVMConstInt(ctx->ac.i32, N_SUBNORMAL | P_SUBNORMAL, false);
cond = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.class.f16", ctx->i1, args, 2, AC_FUNC_ATTR_READNONE);
}
@@ -1362,7 +1360,7 @@ static LLVMValueRef emit_f2f16(struct nir_to_llvm_context *ctx,
temp = emit_intrin_1f_param(&ctx->ac, "llvm.fabs",
ctx->f32, result);
cond = LLVMBuildFCmp(ctx->builder, LLVMRealUGT,
- LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->i32, 0x38800000, false), ctx->f32, ""),
+ LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->ac.i32, 0x38800000, false), ctx->f32, ""),
temp, "");
cond2 = LLVMBuildFCmp(ctx->builder, LLVMRealUNE,
temp, ctx->ac.f32_0, "");
@@ -2243,11 +2241,11 @@ static LLVMValueRef visit_vulkan_resource_index(struct nir_to_llvm_context *ctx,
layout->binding[binding].dynamic_offset_offset;
desc_ptr = ctx->push_constants;
base_offset = pipeline_layout->push_constant_size + 16 * idx;
- stride = LLVMConstInt(ctx->i32, 16, false);
+ stride = LLVMConstInt(ctx->ac.i32, 16, false);
} else
- stride = LLVMConstInt(ctx->i32, layout->binding[binding].size, false);
+ stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);
- offset = LLVMConstInt(ctx->i32, base_offset, false);
+ offset = LLVMConstInt(ctx->ac.i32, base_offset, false);
index = LLVMBuildMul(ctx->builder, index, stride, "");
offset = LLVMBuildAdd(ctx->builder, offset, index, "");
@@ -2263,7 +2261,7 @@ static LLVMValueRef visit_load_push_constant(struct nir_to_llvm_context *ctx,
{
LLVMValueRef ptr, addr;
- addr = LLVMConstInt(ctx->i32, nir_intrinsic_base(instr), 0);
+ addr = LLVMConstInt(ctx->ac.i32, nir_intrinsic_base(instr), 0);
addr = LLVMBuildAdd(ctx->builder, addr, get_src(ctx->nir, instr->src[0]), "");
ptr = ac_build_gep0(&ctx->ac, ctx->push_constants, addr);
@@ -2627,7 +2625,7 @@ static LLVMValueRef get_tcs_tes_buffer_address(struct nir_to_llvm_context *ctx,
total_vertices = LLVMBuildMul(ctx->builder, vertices_per_patch,
num_patches, "");
- constant16 = LLVMConstInt(ctx->i32, 16, false);
+ constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
if (vertex_index) {
base_addr = LLVMBuildMul(ctx->builder, rel_patch_id,
vertices_per_patch, "");
@@ -2667,12 +2665,12 @@ static LLVMValueRef get_tcs_tes_buffer_address_params(struct nir_to_llvm_context
LLVMValueRef param_index;
if (indir_index)
- param_index = LLVMBuildAdd(ctx->builder, LLVMConstInt(ctx->i32, param, false),
+ param_index = LLVMBuildAdd(ctx->builder, LLVMConstInt(ctx->ac.i32, param, false),
indir_index, "");
else {
if (const_index && !is_compact)
param += const_index;
- param_index = LLVMConstInt(ctx->i32, param, false);
+ param_index = LLVMConstInt(ctx->ac.i32, param, false);
}
return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
}
@@ -2710,17 +2708,17 @@ get_dw_address(struct nir_to_llvm_context *ctx,
if (indir_index)
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
LLVMBuildMul(ctx->builder, indir_index,
- LLVMConstInt(ctx->i32, 4, false), ""), "");
+ LLVMConstInt(ctx->ac.i32, 4, false), ""), "");
else if (const_index && !compact_const_index)
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
- LLVMConstInt(ctx->i32, const_index, false), "");
+ LLVMConstInt(ctx->ac.i32, const_index, false), "");
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
- LLVMConstInt(ctx->i32, param * 4, false), "");
+ LLVMConstInt(ctx->ac.i32, param * 4, false), "");
if (const_index && compact_const_index)
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
- LLVMConstInt(ctx->i32, const_index, false), "");
+ LLVMConstInt(ctx->ac.i32, const_index, false), "");
return dw_addr;
}
@@ -2916,7 +2914,7 @@ load_tes_input(struct nir_to_llvm_context *ctx,
buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
is_compact, vertex_index, indir_index);
- LLVMValueRef comp_offset = LLVMConstInt(ctx->i32, comp * 4, false);
+ LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, comp * 4, false);
buf_addr = LLVMBuildAdd(ctx->builder, buf_addr, comp_offset, "");
result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, instr->num_components, NULL,
@@ -2942,7 +2940,7 @@ load_gs_input(struct nir_to_llvm_context *ctx,
vtx_offset_param = vertex_index;
assert(vtx_offset_param < 6);
vtx_offset = LLVMBuildMul(ctx->builder, ctx->gs_vtx_offset[vtx_offset_param],
- LLVMConstInt(ctx->i32, 4, false), "");
+ LLVMConstInt(ctx->ac.i32, 4, false), "");
param = shader_io_get_unique_index(instr->variables[0]->var->data.location);
@@ -2956,7 +2954,7 @@ load_gs_input(struct nir_to_llvm_context *ctx,
} else {
args[0] = ctx->esgs_ring;
args[1] = vtx_offset;
- args[2] = LLVMConstInt(ctx->i32, (param * 4 + i + const_index) * 256, false);
+ args[2] = LLVMConstInt(ctx->ac.i32, (param * 4 + i + const_index) * 256, false);
args[3] = ctx->ac.i32_0;
args[4] = ctx->ac.i32_1; /* OFFEN */
args[5] = ctx->ac.i32_0; /* IDXEN */
@@ -2965,7 +2963,7 @@ load_gs_input(struct nir_to_llvm_context *ctx,
args[8] = ctx->ac.i32_0; /* TFE */
value[i] = ac_build_intrinsic(&ctx->ac, "llvm.SI.buffer.load.dword.i32.i32",
- ctx->i32, args, 9,
+ ctx->ac.i32, args, 9,
AC_FUNC_ATTR_READONLY |
AC_FUNC_ATTR_LEGACY);
}
@@ -3692,7 +3690,7 @@ static void emit_waitcnt(struct nir_to_llvm_context *ctx,
unsigned simm16)
{
LLVMValueRef args[1] = {
- LLVMConstInt(ctx->i32, simm16, false),
+ LLVMConstInt(ctx->ac.i32, simm16, false),
};
ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.waitcnt",
ctx->voidt, args, 1, 0);
@@ -3730,7 +3728,7 @@ visit_load_local_invocation_index(struct nir_to_llvm_context *ctx)
LLVMValueRef result;
LLVMValueRef thread_id = ac_get_thread_id(&ctx->ac);
result = LLVMBuildAnd(ctx->builder, ctx->tg_size,
- LLVMConstInt(ctx->i32, 0xfc0, false), "");
+ LLVMConstInt(ctx->ac.i32, 0xfc0, false), "");
return LLVMBuildAdd(ctx->builder, result, thread_id, "");
}
@@ -3826,7 +3824,7 @@ static LLVMValueRef load_sample_position(struct nir_to_llvm_context *ctx,
LLVMValueRef sample_id)
{
LLVMValueRef result;
- LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_PS_SAMPLE_POSITIONS, false));
+ LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false));
ptr = LLVMBuildBitCast(ctx->builder, ptr,
const_array(ctx->v2f32, 64), "");
@@ -3886,7 +3884,7 @@ static LLVMValueRef visit_interp(struct nir_to_llvm_context *ctx,
src_c1 = LLVMBuildFSub(ctx->builder, src_c1, halfval, "");
}
interp_param = lookup_interp_param(ctx, instr->variables[0]->var->data.interpolation, location);
- attr_number = LLVMConstInt(ctx->i32, input_index, false);
+ attr_number = LLVMConstInt(ctx->ac.i32, input_index, false);
if (location == INTERP_CENTER) {
LLVMValueRef ij_out[2];
@@ -3901,8 +3899,8 @@ static LLVMValueRef visit_interp(struct nir_to_llvm_context *ctx,
* interp_param.J = ddy * offset/sample.y + temp1;
*/
for (unsigned i = 0; i < 2; i++) {
- LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, false);
- LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, false);
+ LLVMValueRef ix_ll = LLVMConstInt(ctx->ac.i32, i, false);
+ LLVMValueRef iy_ll = LLVMConstInt(ctx->ac.i32, i + 2, false);
LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->builder,
ddxy_out, ix_ll, "");
LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->builder,
@@ -3921,14 +3919,14 @@ static LLVMValueRef visit_interp(struct nir_to_llvm_context *ctx,
temp2 = LLVMBuildFAdd(ctx->builder, temp2, temp1, "");
ij_out[i] = LLVMBuildBitCast(ctx->builder,
- temp2, ctx->i32, "");
+ temp2, ctx->ac.i32, "");
}
interp_param = ac_build_gather_values(&ctx->ac, ij_out, 2);
}
for (chan = 0; chan < 4; chan++) {
- LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false);
+ LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
if (interp_param) {
interp_param = LLVMBuildBitCast(ctx->builder,
@@ -3943,7 +3941,7 @@ static LLVMValueRef visit_interp(struct nir_to_llvm_context *ctx,
ctx->prim_mask, i, j);
} else {
result[chan] = ac_build_fs_interp_mov(&ctx->ac,
- LLVMConstInt(ctx->i32, 2, false),
+ LLVMConstInt(ctx->ac.i32, 2, false),
llvm_chan, attr_number,
ctx->prim_mask);
}
@@ -3972,7 +3970,7 @@ visit_emit_vertex(struct nir_to_llvm_context *ctx,
* effects other than emitting vertices.
*/
can_emit = LLVMBuildICmp(ctx->builder, LLVMIntULT, gs_next_vertex,
- LLVMConstInt(ctx->i32, ctx->gs_max_out_vertices, false), "");
+ LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
ac_build_kill_if_false(&ctx->ac, can_emit);
/* loop num outputs */
@@ -3995,11 +3993,11 @@ visit_emit_vertex(struct nir_to_llvm_context *ctx,
for (unsigned j = 0; j < length; j++) {
LLVMValueRef out_val = LLVMBuildLoad(ctx->builder,
out_ptr[j], "");
- LLVMValueRef voffset = LLVMConstInt(ctx->i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
+ LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
voffset = LLVMBuildAdd(ctx->builder, voffset, gs_next_vertex, "");
- voffset = LLVMBuildMul(ctx->builder, voffset, LLVMConstInt(ctx->i32, 4, false), "");
+ voffset = LLVMBuildMul(ctx->builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");
- out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->i32, "");
+ out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->ac.i32, "");
ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring,
out_val, 1,
@@ -4302,10 +4300,10 @@ static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
const uint32_t *samplers = radv_immutable_samplers(layout, binding);
LLVMValueRef constants[] = {
- LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 0], 0),
- LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 1], 0),
- LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 2], 0),
- LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 3], 0),
+ LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0),
+ LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0),
+ LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0),
+ LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0),
};
return ac_build_gather_values(&ctx->ac, constants, 4);
}
@@ -4315,9 +4313,9 @@ static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
if (!index)
index = ctx->ac.i32_0;
- index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, stride / type_size, 0), "");
+ index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
- list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->i32, offset, 0));
+ list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->ac.i32, offset, 0));
list = LLVMBuildPointerCast(builder, list, const_array(type, 0), "");
return ac_build_load_to_sgpr(&ctx->ac, list, index);
@@ -5028,17 +5026,17 @@ handle_vs_input_decl(struct nir_to_llvm_context *ctx,
ctx->abi.base_vertex, "");
for (unsigned i = 0; i < attrib_count; ++i, ++idx) {
- t_offset = LLVMConstInt(ctx->i32, index + i, false);
+ t_offset = LLVMConstInt(ctx->ac.i32, index + i, false);
t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
input = ac_build_buffer_load_format(&ctx->ac, t_list,
buffer_index,
- LLVMConstInt(ctx->i32, 0, false),
+ LLVMConstInt(ctx->ac.i32, 0, false),
true);
for (unsigned chan = 0; chan < 4; chan++) {
- LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false);
+ LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
ctx->inputs[radeon_llvm_reg_index_soa(idx, chan)] =
ac_to_integer(&ctx->ac, LLVMBuildExtractElement(ctx->builder,
input, llvm_chan, ""));
@@ -5057,7 +5055,7 @@ static void interp_fs_input(struct nir_to_llvm_context *ctx,
LLVMValueRef i, j;
bool interp = interp_param != NULL;
- attr_number = LLVMConstInt(ctx->i32, attr, false);
+ attr_number = LLVMConstInt(ctx->ac.i32, attr, false);
/* fs.constant returns the param from the middle vertex, so it's not
* really useful for flat shading. It's meant to be used for custom
@@ -5080,7 +5078,7 @@ static void interp_fs_input(struct nir_to_llvm_context *ctx,
}
for (chan = 0; chan < 4; chan++) {
- LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false);
+ LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
if (interp) {
result[chan] = ac_build_fs_interp(&ctx->ac,
@@ -5089,7 +5087,7 @@ static void interp_fs_input(struct nir_to_llvm_context *ctx,
prim_mask, i, j);
} else {
result[chan] = ac_build_fs_interp_mov(&ctx->ac,
- LLVMConstInt(ctx->i32, 2, false),
+ LLVMConstInt(ctx->ac.i32, 2, false),
llvm_chan,
attr_number,
prim_mask);
@@ -5336,7 +5334,7 @@ glsl_base_to_llvm_type(struct nir_to_llvm_context *ctx,
case GLSL_TYPE_UINT:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_SUBROUTINE:
- return ctx->i32;
+ return ctx->ac.i32;
case GLSL_TYPE_FLOAT: /* TODO handle mediump */
return ctx->f32;
case GLSL_TYPE_INT64:
@@ -5438,11 +5436,11 @@ emit_float_saturate(struct ac_llvm_context *ctx, LLVMValueRef v, float lo, float
static LLVMValueRef emit_pack_int16(struct nir_to_llvm_context *ctx,
LLVMValueRef src0, LLVMValueRef src1)
{
- LLVMValueRef const16 = LLVMConstInt(ctx->i32, 16, false);
+ LLVMValueRef const16 = LLVMConstInt(ctx->ac.i32, 16, false);
LLVMValueRef comp[2];
- comp[0] = LLVMBuildAnd(ctx->builder, src0, LLVMConstInt(ctx-> i32, 65535, 0), "");
- comp[1] = LLVMBuildAnd(ctx->builder, src1, LLVMConstInt(ctx-> i32, 65535, 0), "");
+ comp[0] = LLVMBuildAnd(ctx->builder, src0, LLVMConstInt(ctx->ac.i32, 65535, 0), "");
+ comp[1] = LLVMBuildAnd(ctx->builder, src1, LLVMConstInt(ctx->ac.i32, 65535, 0), "");
comp[1] = LLVMBuildShl(ctx->builder, comp[1], const16, "");
return LLVMBuildOr(ctx->builder, comp[0], comp[1], "");
}
@@ -5528,7 +5526,7 @@ si_llvm_init_export_args(struct nir_to_llvm_context *ctx,
val[chan] = LLVMBuildFAdd(ctx->builder, val[chan],
LLVMConstReal(ctx->f32, 0.5), "");
val[chan] = LLVMBuildFPToUI(ctx->builder, val[chan],
- ctx->i32, "");
+ ctx->ac.i32, "");
}
args->compr = 1;
@@ -5549,7 +5547,7 @@ si_llvm_init_export_args(struct nir_to_llvm_context *ctx,
val[chan], ctx->ac.f32_0, ""),
LLVMConstReal(ctx->f32, 0.5),
LLVMConstReal(ctx->f32, -0.5), ""), "");
- val[chan] = LLVMBuildFPToSI(ctx->builder, val[chan], ctx->i32, "");
+ val[chan] = LLVMBuildFPToSI(ctx->builder, val[chan], ctx->ac.i32, "");
}
args->compr = 1;
@@ -5558,9 +5556,9 @@ si_llvm_init_export_args(struct nir_to_llvm_context *ctx,
break;
case V_028714_SPI_SHADER_UINT16_ABGR: {
- LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
+ LLVMValueRef max_rgb = LLVMConstInt(ctx->ac.i32,
is_int8 ? 255 : is_int10 ? 1023 : 65535, 0);
- LLVMValueRef max_alpha = !is_int10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
+ LLVMValueRef max_alpha = !is_int10 ? max_rgb : LLVMConstInt(ctx->ac.i32, 3, 0);
for (unsigned chan = 0; chan < 4; chan++) {
val[chan] = ac_to_integer(&ctx->ac, values[chan]);
@@ -5574,12 +5572,12 @@ si_llvm_init_export_args(struct nir_to_llvm_context *ctx,
}
case V_028714_SPI_SHADER_SINT16_ABGR: {
- LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
+ LLVMValueRef max_rgb = LLVMConstInt(ctx->ac.i32,
is_int8 ? 127 : is_int10 ? 511 : 32767, 0);
- LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
+ LLVMValueRef min_rgb = LLVMConstInt(ctx->ac.i32,
is_int8 ? -128 : is_int10 ? -512 : -32768, 0);
LLVMValueRef max_alpha = !is_int10 ? max_rgb : ctx->ac.i32_1;
- LLVMValueRef min_alpha = !is_int10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
+ LLVMValueRef min_alpha = !is_int10 ? min_rgb : LLVMConstInt(ctx->ac.i32, -2, 0);
/* Clamp. */
for (unsigned chan = 0; chan < 4; chan++) {
@@ -5714,7 +5712,7 @@ handle_vs_outputs_post(struct nir_to_llvm_context *ctx,
LLVMValueRef v = viewport_index_value;
v = ac_to_integer(&ctx->ac, v);
v = LLVMBuildShl(ctx->builder, v,
- LLVMConstInt(ctx->i32, 16, false),
+ LLVMConstInt(ctx->ac.i32, 16, false),
"");
v = LLVMBuildOr(ctx->builder, v,
ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
@@ -5833,9 +5831,9 @@ handle_es_outputs_post(struct nir_to_llvm_context *ctx,
LLVMConstInt(ctx->ac.i32, 4, false), false);
vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
LLVMBuildMul(ctx->ac.builder, wave_idx,
- LLVMConstInt(ctx->i32, 64, false), ""), "");
+ LLVMConstInt(ctx->ac.i32, 64, false), ""), "");
lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
- LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
+ LLVMConstInt(ctx->ac.i32, itemsize_dw, 0), "");
}
for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
@@ -5854,12 +5852,12 @@ handle_es_outputs_post(struct nir_to_llvm_context *ctx,
if (lds_base) {
dw_addr = LLVMBuildAdd(ctx->builder, lds_base,
- LLVMConstInt(ctx->i32, param_index * 4, false),
+ LLVMConstInt(ctx->ac.i32, param_index * 4, false),
"");
}
for (j = 0; j < length; j++) {
LLVMValueRef out_val = LLVMBuildLoad(ctx->builder, out_ptr[j], "");
- out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->i32, "");
+ out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->ac.i32, "");
if (ctx->ac.chip_class >= GFX9) {
ac_lds_store(&ctx->ac, dw_addr,
@@ -5899,7 +5897,7 @@ handle_ls_outputs_post(struct nir_to_llvm_context *ctx)
if (length > 4)
mark_tess_output(ctx, false, param + 1);
LLVMValueRef dw_addr = LLVMBuildAdd(ctx->builder, base_dw_addr,
- LLVMConstInt(ctx->i32, param * 4, false),
+ LLVMConstInt(ctx->ac.i32, param * 4, false),
"");
for (unsigned j = 0; j < length; j++) {
ac_lds_store(&ctx->ac, dw_addr,
@@ -6044,33 +6042,33 @@ write_tess_factors(struct nir_to_llvm_context *ctx)
mark_tess_output(ctx, true, tess_outer_index);
lds_base = get_tcs_out_current_patch_data_offset(ctx);
lds_inner = LLVMBuildAdd(ctx->builder, lds_base,
- LLVMConstInt(ctx->i32, tess_inner_index * 4, false), "");
+ LLVMConstInt(ctx->ac.i32, tess_inner_index * 4, false), "");
lds_outer = LLVMBuildAdd(ctx->builder, lds_base,
- LLVMConstInt(ctx->i32, tess_outer_index * 4, false), "");
+ LLVMConstInt(ctx->ac.i32, tess_outer_index * 4, false), "");
for (i = 0; i < 4; i++) {
- inner[i] = LLVMGetUndef(ctx->i32);
- outer[i] = LLVMGetUndef(ctx->i32);
+ inner[i] = LLVMGetUndef(ctx->ac.i32);
+ outer[i] = LLVMGetUndef(ctx->ac.i32);
}
// LINES reverseal
if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) {
outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer);
lds_outer = LLVMBuildAdd(ctx->builder, lds_outer,
- LLVMConstInt(ctx->i32, 1, false), "");
+ LLVMConstInt(ctx->ac.i32, 1, false), "");
outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer);
} else {
for (i = 0; i < outer_comps; i++) {
outer[i] = out[i] =
ac_lds_load(&ctx->ac, lds_outer);
lds_outer = LLVMBuildAdd(ctx->builder, lds_outer,
- LLVMConstInt(ctx->i32, 1, false), "");
+ LLVMConstInt(ctx->ac.i32, 1, false), "");
}
for (i = 0; i < inner_comps; i++) {
inner[i] = out[outer_comps+i] =
ac_lds_load(&ctx->ac, lds_inner);
lds_inner = LLVMBuildAdd(ctx->builder, lds_inner,
- LLVMConstInt(ctx->i32, 1, false), "");
+ LLVMConstInt(ctx->ac.i32, 1, false), "");
}
}
@@ -6085,7 +6083,7 @@ write_tess_factors(struct nir_to_llvm_context *ctx)
buffer = ctx->hs_ring_tess_factor;
tf_base = ctx->tess_factor_offset;
byteoffset = LLVMBuildMul(ctx->builder, rel_patch_id,
- LLVMConstInt(ctx->i32, 4 * stride, false), "");
+ LLVMConstInt(ctx->ac.i32, 4 * stride, false), "");
unsigned tf_offset = 0;
if (ctx->options->chip_class <= VI) {
@@ -6095,7 +6093,7 @@ write_tess_factors(struct nir_to_llvm_context *ctx)
/* Store the dynamic HS control word. */
ac_build_buffer_store_dword(&ctx->ac, buffer,
- LLVMConstInt(ctx->i32, 0x80000000, false),
+ LLVMConstInt(ctx->ac.i32, 0x80000000, false),
1, ctx->ac.i32_0, tf_base,
0, 1, 0, true, false);
tf_offset += 4;
@@ -6120,7 +6118,7 @@ write_tess_factors(struct nir_to_llvm_context *ctx)
param_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL,
- LLVMConstInt(ctx->i32, param_outer, 0));
+ LLVMConstInt(ctx->ac.i32, param_outer, 0));
outer_vec = ac_build_gather_values(&ctx->ac, outer,
util_next_power_of_two(outer_comps));
@@ -6131,7 +6129,7 @@ write_tess_factors(struct nir_to_llvm_context *ctx)
if (inner_comps) {
param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
- LLVMConstInt(ctx->i32, param_inner, 0));
+ LLVMConstInt(ctx->ac.i32, param_inner, 0));
inner_vec = inner_comps == 1 ? inner[0] :
ac_build_gather_values(&ctx->ac, inner, inner_comps);
@@ -6369,20 +6367,20 @@ ac_setup_rings(struct nir_to_llvm_context *ctx)
{
if ((ctx->stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_es) ||
(ctx->stage == MESA_SHADER_TESS_EVAL && ctx->options->key.tes.as_es)) {
- ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_ESGS_VS, false));
+ ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_VS, false));
}
if (ctx->is_gs_copy_shader) {
- ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_GSVS_VS, false));
+ ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false));
}
if (ctx->stage == MESA_SHADER_GEOMETRY) {
LLVMValueRef tmp;
- ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_ESGS_GS, false));
- ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_GSVS_GS, false));
+ ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_GS, false));
+ ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false));
ctx->gsvs_ring = LLVMBuildBitCast(ctx->builder, ctx->gsvs_ring, ctx->v4i32, "");
- ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, ctx->gsvs_num_entries, LLVMConstInt(ctx->i32, 2, false), "");
+ ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, ctx->gsvs_num_entries, LLVMConstInt(ctx->ac.i32, 2, false), "");
tmp = LLVMBuildExtractElement(ctx->builder, ctx->gsvs_ring, ctx->ac.i32_1, "");
tmp = LLVMBuildOr(ctx->builder, tmp, ctx->gsvs_ring_stride, "");
ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, tmp, ctx->ac.i32_1, "");
@@ -6390,8 +6388,8 @@ ac_setup_rings(struct nir_to_llvm_context *ctx)
if (ctx->stage == MESA_SHADER_TESS_CTRL ||
ctx->stage == MESA_SHADER_TESS_EVAL) {
- ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_HS_TESS_OFFCHIP, false));
- ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_HS_TESS_FACTOR, false));
+ ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false));
+ ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_FACTOR, false));
}
}
@@ -6561,7 +6559,7 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
ctx.num_output_culls = shaders[i]->info.cull_distance_array_size;
if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
- ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.i32, "gs_next_vertex");
+ ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.ac.i32, "gs_next_vertex");
ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
} else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
@@ -6826,7 +6824,7 @@ ac_gs_copy_shader_emit(struct nir_to_llvm_context *ctx)
{
LLVMValueRef args[9];
args[0] = ctx->gsvs_ring;
- args[1] = LLVMBuildMul(ctx->builder, ctx->abi.vertex_id, LLVMConstInt(ctx->i32, 4, false), "");
+ args[1] = LLVMBuildMul(ctx->builder, ctx->abi.vertex_id, LLVMConstInt(ctx->ac.i32, 4, false), "");
args[3] = ctx->ac.i32_0;
args[4] = ctx->ac.i32_1; /* OFFEN */
args[5] = ctx->ac.i32_0; /* IDXEN */
@@ -6852,13 +6850,13 @@ ac_gs_copy_shader_emit(struct nir_to_llvm_context *ctx)
for (unsigned j = 0; j < length; j++) {
LLVMValueRef value;
- args[2] = LLVMConstInt(ctx->i32,
+ args[2] = LLVMConstInt(ctx->ac.i32,
(slot * 4 + j) *
ctx->gs_max_out_vertices * 16 * 4, false);
value = ac_build_intrinsic(&ctx->ac,
"llvm.SI.buffer.load.dword.i32.i32",
- ctx->i32, args, 9,
+ ctx->ac.i32, args, 9,
AC_FUNC_ATTR_READONLY |
AC_FUNC_ATTR_LEGACY);