summaryrefslogtreecommitdiffstats
path: root/src/intel
diff options
context:
space:
mode:
Diffstat (limited to 'src/intel')
-rw-r--r--src/intel/compiler/brw_fs_nir.cpp22
-rw-r--r--src/intel/compiler/brw_nir_analyze_boolean_resolves.c2
-rw-r--r--src/intel/compiler/brw_vec4_nir.cpp8
-rw-r--r--src/intel/vulkan/anv_nir_lower_ycbcr_textures.c50
4 files changed, 43 insertions, 39 deletions
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index ed8c479ca40..bbcbc0fa3d2 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -1694,17 +1694,17 @@ fs_visitor::nir_emit_load_const(const fs_builder &bld,
switch (instr->def.bit_size) {
case 8:
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value.i8[i]));
+ bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value[i].i8));
break;
case 16:
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value.i16[i]));
+ bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value[i].i16));
break;
case 32:
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i32[i]));
+ bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value[i].i32));
break;
case 64:
@@ -1713,11 +1713,11 @@ fs_visitor::nir_emit_load_const(const fs_builder &bld,
/* We don't get 64-bit integer types until gen8 */
for (unsigned i = 0; i < instr->def.num_components; i++) {
bld.MOV(retype(offset(reg, bld, i), BRW_REGISTER_TYPE_DF),
- setup_imm_df(bld, instr->value.f64[i]));
+ setup_imm_df(bld, instr->value[i].f64));
}
} else {
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value.i64[i]));
+ bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value[i].i64));
}
break;
@@ -3383,8 +3383,8 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
if (const_offset) {
assert(nir_src_bit_size(instr->src[0]) == 32);
- unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
- unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
+ unsigned off_x = MIN2((int)(const_offset[0].f32 * 16), 7) & 0xf;
+ unsigned off_y = MIN2((int)(const_offset[1].f32 * 16), 7) & 0xf;
emit_pixel_interpolater_send(bld,
FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
@@ -3674,14 +3674,14 @@ brw_nir_reduction_op_identity(const fs_builder &bld,
switch (type_sz(type)) {
case 2:
assert(type != BRW_REGISTER_TYPE_HF);
- return retype(brw_imm_uw(value.u16[0]), type);
+ return retype(brw_imm_uw(value.u16), type);
case 4:
- return retype(brw_imm_ud(value.u32[0]), type);
+ return retype(brw_imm_ud(value.u32), type);
case 8:
if (type == BRW_REGISTER_TYPE_DF)
- return setup_imm_df(bld, value.f64[0]);
+ return setup_imm_df(bld, value.f64);
else
- return retype(brw_imm_u64(value.u64[0]), type);
+ return retype(brw_imm_u64(value.u64), type);
default:
unreachable("Invalid type size");
}
diff --git a/src/intel/compiler/brw_nir_analyze_boolean_resolves.c b/src/intel/compiler/brw_nir_analyze_boolean_resolves.c
index b1be54d92ac..fd9e7740078 100644
--- a/src/intel/compiler/brw_nir_analyze_boolean_resolves.c
+++ b/src/intel/compiler/brw_nir_analyze_boolean_resolves.c
@@ -225,7 +225,7 @@ analyze_boolean_resolves_block(nir_block *block)
* have to worry about resolving them.
*/
instr->pass_flags &= ~BRW_NIR_BOOLEAN_MASK;
- if (load->value.u32[0] == NIR_TRUE || load->value.u32[0] == NIR_FALSE) {
+ if (load->value[0].u32 == NIR_TRUE || load->value[0].u32 == NIR_FALSE) {
instr->pass_flags |= BRW_NIR_BOOLEAN_NO_RESOLVE;
} else {
instr->pass_flags |= BRW_NIR_NON_BOOLEAN;
diff --git a/src/intel/compiler/brw_vec4_nir.cpp b/src/intel/compiler/brw_vec4_nir.cpp
index 53a0d97117c..7a8ae8158a3 100644
--- a/src/intel/compiler/brw_vec4_nir.cpp
+++ b/src/intel/compiler/brw_vec4_nir.cpp
@@ -353,18 +353,18 @@ vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr)
for (unsigned j = i; j < instr->def.num_components; j++) {
if ((instr->def.bit_size == 32 &&
- instr->value.u32[i] == instr->value.u32[j]) ||
+ instr->value[i].u32 == instr->value[j].u32) ||
(instr->def.bit_size == 64 &&
- instr->value.f64[i] == instr->value.f64[j])) {
+ instr->value[i].f64 == instr->value[j].f64)) {
writemask |= 1 << j;
}
}
reg.writemask = writemask;
if (instr->def.bit_size == 64) {
- emit(MOV(reg, setup_imm_df(ibld, instr->value.f64[i])));
+ emit(MOV(reg, setup_imm_df(ibld, instr->value[i].f64)));
} else {
- emit(MOV(reg, brw_imm_d(instr->value.i32[i])));
+ emit(MOV(reg, brw_imm_d(instr->value[i].i32)));
}
remaining &= ~writemask;
diff --git a/src/intel/vulkan/anv_nir_lower_ycbcr_textures.c b/src/intel/vulkan/anv_nir_lower_ycbcr_textures.c
index 0567a1be939..799749d5db0 100644
--- a/src/intel/vulkan/anv_nir_lower_ycbcr_textures.c
+++ b/src/intel/vulkan/anv_nir_lower_ycbcr_textures.c
@@ -79,36 +79,40 @@ chroma_range(nir_builder *b,
}
}
-static const nir_const_value *
+typedef struct nir_const_value_3_4 {
+ nir_const_value v[3][4];
+} nir_const_value_3_4;
+
+static const nir_const_value_3_4 *
ycbcr_model_to_rgb_matrix(VkSamplerYcbcrModelConversion model)
{
switch (model) {
case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601: {
- static const nir_const_value bt601[3] = {
- { .f32 = { 1.402f, 1.0f, 0.0f, 0.0f } },
- { .f32 = { -0.714136286201022f, 1.0f, -0.344136286201022f, 0.0f } },
- { .f32 = { 0.0f, 1.0f, 1.772f, 0.0f } }
- };
+ static const nir_const_value_3_4 bt601 = { {
+ { { .f32 = 1.402f }, { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
+ { { .f32 = -0.714136286201022f }, { .f32 = 1.0f }, { .f32 = -0.344136286201022f }, { .f32 = 0.0f } },
+ { { .f32 = 0.0f }, { .f32 = 1.0f }, { .f32 = 1.772f }, { .f32 = 0.0f } },
+ } };
- return bt601;
+ return &bt601;
}
case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709: {
- static const nir_const_value bt709[3] = {
- { .f32 = { 1.5748031496063f, 1.0f, 0.0, 0.0f } },
- { .f32 = { -0.468125209181067f, 1.0f, -0.187327487470334f, 0.0f } },
- { .f32 = { 0.0f, 1.0f, 1.85563184264242f, 0.0f } }
- };
+ static const nir_const_value_3_4 bt709 = { {
+ { { .f32 = 1.5748031496063f }, { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
+ { { .f32 = -0.468125209181067f }, { .f32 = 1.0f }, { .f32 = -0.187327487470334f }, { .f32 = 0.0f } },
+ { { .f32 = 0.0f }, { .f32 = 1.0f }, { .f32 = 1.85563184264242f }, { .f32 = 0.0f } },
+ } };
- return bt709;
+ return &bt709;
}
case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020: {
- static const nir_const_value bt2020[3] = {
- { .f32 = { 1.4746f, 1.0f, 0.0f, 0.0f } },
- { .f32 = { -0.571353126843658f, 1.0f, -0.164553126843658f, 0.0f } },
- { .f32 = { 0.0f, 1.0f, 1.8814f, 0.0f } }
- };
+ static const nir_const_value_3_4 bt2020 = { {
+ { { .f32 = 1.4746f }, { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
+ { { .f32 = -0.571353126843658f }, { .f32 = 1.0f }, { .f32 = -0.164553126843658f }, { .f32 = 0.0f } },
+ { { .f32 = 0.0f }, { .f32 = 1.0f }, { .f32 = 1.8814f }, { .f32 = 0.0f } },
+ } };
- return bt2020;
+ return &bt2020;
}
default:
unreachable("missing Ycbcr model");
@@ -137,13 +141,13 @@ convert_ycbcr(struct ycbcr_state *state,
if (conversion->ycbcr_model == VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY)
return expanded_channels;
- const nir_const_value *conversion_matrix =
+ const nir_const_value_3_4 *conversion_matrix =
ycbcr_model_to_rgb_matrix(conversion->ycbcr_model);
nir_ssa_def *converted_channels[] = {
- nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix[0])),
- nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix[1])),
- nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix[2]))
+ nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[0])),
+ nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[1])),
+ nir_fdot4(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[2]))
};
return nir_vec4(b,