summaryrefslogtreecommitdiffstats
path: root/src/intel
diff options
context:
space:
mode:
authorDylan Baker <[email protected]>2018-08-21 09:46:46 -0700
committerDylan Baker <[email protected]>2018-09-07 10:21:26 -0700
commit8396043f304bb2a752130230055605c5c966e89f (patch)
treeee2e8a5494b88bff3b5e67ece8ffdba70d12c087 /src/intel
parent80825abb5d1a7491035880253ffd531c55acae6b (diff)
Replace uses of _mesa_bitcount with util_bitcount
and _mesa_bitcount_64 with util_bitcount_64. This fixes a build problem in nir for platforms that don't have popcount or popcountll, such as 32bit msvc. v2: - Fix additional uses of _mesa_bitcount added after this was originally written Acked-by: Eric Engestrom <[email protected]> (v1) Acked-by: Eric Anholt <[email protected]> Reviewed-by: Ian Romanick <[email protected]>
Diffstat (limited to 'src/intel')
-rw-r--r--src/intel/blorp/blorp_blit.c5
-rw-r--r--src/intel/compiler/brw_fs.cpp3
-rw-r--r--src/intel/compiler/brw_fs_nir.cpp3
-rw-r--r--src/intel/compiler/brw_nir.c5
-rw-r--r--src/intel/compiler/brw_vec4.cpp3
-rw-r--r--src/intel/compiler/brw_vec4_visitor.cpp3
-rw-r--r--src/intel/vulkan/anv_blorp.c2
-rw-r--r--src/intel/vulkan/anv_image.c9
-rw-r--r--src/intel/vulkan/anv_nir_lower_multiview.c8
-rw-r--r--src/intel/vulkan/anv_pipeline.c2
-rw-r--r--src/intel/vulkan/anv_private.h7
-rw-r--r--src/intel/vulkan/genX_cmd_buffer.c4
-rw-r--r--src/intel/vulkan/genX_query.c14
13 files changed, 38 insertions, 30 deletions
diff --git a/src/intel/blorp/blorp_blit.c b/src/intel/blorp/blorp_blit.c
index 60cb32641d6..3c963c60eaf 100644
--- a/src/intel/blorp/blorp_blit.c
+++ b/src/intel/blorp/blorp_blit.c
@@ -29,6 +29,7 @@
#include "util/format_rgb9e5.h"
/* header-only include needed for _mesa_unorm_to_float and friends. */
#include "mesa/main/format_utils.h"
+#include "util/u_math.h"
#define FILE_DEBUG_FLAG DEBUG_BLORP
@@ -582,7 +583,7 @@ static inline int count_trailing_one_bits(unsigned value)
#ifdef HAVE___BUILTIN_CTZ
return __builtin_ctz(~value);
#else
- return _mesa_bitcount(value & ~(value + 1));
+ return util_bitcount(value & ~(value + 1));
#endif
}
@@ -634,7 +635,7 @@ blorp_nir_manual_blend_average(nir_builder *b, struct brw_blorp_blit_vars *v,
nir_ssa_def *texture_data[5];
unsigned stack_depth = 0;
for (unsigned i = 0; i < tex_samples; ++i) {
- assert(stack_depth == _mesa_bitcount(i)); /* Loop invariant */
+ assert(stack_depth == util_bitcount(i)); /* Loop invariant */
/* Push sample i onto the stack */
assert(stack_depth < ARRAY_SIZE(texture_data));
diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp
index 02a7a33c4d7..3f7f2b4c984 100644
--- a/src/intel/compiler/brw_fs.cpp
+++ b/src/intel/compiler/brw_fs.cpp
@@ -39,6 +39,7 @@
#include "compiler/glsl_types.h"
#include "compiler/nir/nir_builder.h"
#include "program/prog_parameter.h"
+#include "util/u_math.h"
using namespace brw;
@@ -1534,7 +1535,7 @@ fs_visitor::calculate_urb_setup()
int urb_next = 0;
/* Figure out where each of the incoming setup attributes lands. */
if (devinfo->gen >= 6) {
- if (_mesa_bitcount_64(nir->info.inputs_read &
+ if (util_bitcount64(nir->info.inputs_read &
BRW_FS_VARYING_INPUT_MASK) <= 16) {
/* The SF/SBE pipeline stage can do arbitrary rearrangement of the
* first 16 varying inputs, so we can put them wherever we want.
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index ab915ee0a2d..7f453d75b64 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -25,6 +25,7 @@
#include "brw_fs.h"
#include "brw_fs_surface_builder.h"
#include "brw_nir.h"
+#include "util/u_math.h"
using namespace brw;
using namespace brw::surface_access;
@@ -751,7 +752,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
/* Since NIR is doing the scalarizing for us, we should only ever see
* vectorized operations with a single channel.
*/
- assert(_mesa_bitcount(instr->dest.write_mask) == 1);
+ assert(util_bitcount(instr->dest.write_mask) == 1);
channel = ffs(instr->dest.write_mask) - 1;
result = offset(result, bld, channel);
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index ce865e2ce71..b38c3ba383d 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -26,6 +26,7 @@
#include "common/gen_debug.h"
#include "compiler/glsl_types.h"
#include "compiler/nir/nir_builder.h"
+#include "util/u_math.h"
static bool
is_input(nir_intrinsic_instr *intrin)
@@ -243,7 +244,7 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID));
- const unsigned num_inputs = _mesa_bitcount_64(nir->info.inputs_read);
+ const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
nir_foreach_function(function, nir) {
if (!function->impl)
@@ -322,7 +323,7 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
* before it and counting the bits.
*/
int attr = nir_intrinsic_base(intrin);
- int slot = _mesa_bitcount_64(nir->info.inputs_read &
+ int slot = util_bitcount64(nir->info.inputs_read &
BITFIELD64_MASK(attr));
nir_intrinsic_set_base(intrin, slot);
break;
diff --git a/src/intel/compiler/brw_vec4.cpp b/src/intel/compiler/brw_vec4.cpp
index 4e242e03032..5a86f30634a 100644
--- a/src/intel/compiler/brw_vec4.cpp
+++ b/src/intel/compiler/brw_vec4.cpp
@@ -31,6 +31,7 @@
#include "brw_dead_control_flow.h"
#include "common/gen_debug.h"
#include "program/prog_parameter.h"
+#include "util/u_math.h"
#define MAX_INSTRUCTION (1 << 30)
@@ -2845,7 +2846,7 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
((1 << shader->info.cull_distance_array_size) - 1) <<
shader->info.clip_distance_array_size;
- unsigned nr_attribute_slots = _mesa_bitcount_64(prog_data->inputs_read);
+ unsigned nr_attribute_slots = util_bitcount64(prog_data->inputs_read);
/* gl_VertexID and gl_InstanceID are system values, but arrive via an
* incoming vertex attribute. So, add an extra slot.
diff --git a/src/intel/compiler/brw_vec4_visitor.cpp b/src/intel/compiler/brw_vec4_visitor.cpp
index 65e1c6d88e1..b2bb2c6b82a 100644
--- a/src/intel/compiler/brw_vec4_visitor.cpp
+++ b/src/intel/compiler/brw_vec4_visitor.cpp
@@ -24,6 +24,7 @@
#include "brw_vec4.h"
#include "brw_cfg.h"
#include "brw_eu.h"
+#include "util/u_math.h"
namespace brw {
@@ -1317,7 +1318,7 @@ vec4_visitor::emit_urb_slot(dst_reg reg, int varying)
* determine which edges should be drawn as wireframe.
*/
current_annotation = "edge flag";
- int edge_attr = _mesa_bitcount_64(nir->info.inputs_read &
+ int edge_attr = util_bitcount64(nir->info.inputs_read &
BITFIELD64_MASK(VERT_ATTRIB_EDGEFLAG));
emit(MOV(reg, src_reg(dst_reg(ATTR, edge_attr,
glsl_type::float_type, WRITEMASK_XYZW))));
diff --git a/src/intel/vulkan/anv_blorp.c b/src/intel/vulkan/anv_blorp.c
index bff54a0c736..9ab291eabec 100644
--- a/src/intel/vulkan/anv_blorp.c
+++ b/src/intel/vulkan/anv_blorp.c
@@ -296,7 +296,7 @@ void anv_CmdCopyImage(
assert(anv_image_aspects_compatible(src_mask, dst_mask));
- if (_mesa_bitcount(src_mask) > 1) {
+ if (util_bitcount(src_mask) > 1) {
uint32_t aspect_bit;
anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
struct blorp_surf src_surf, dst_surf;
diff --git a/src/intel/vulkan/anv_image.c b/src/intel/vulkan/anv_image.c
index 36d4ac13c75..a3aecb93901 100644
--- a/src/intel/vulkan/anv_image.c
+++ b/src/intel/vulkan/anv_image.c
@@ -32,6 +32,7 @@
#include "anv_private.h"
#include "util/debug.h"
#include "vk_util.h"
+#include "util/u_math.h"
#include "vk_format_info.h"
@@ -814,7 +815,7 @@ anv_layout_to_aux_usage(const struct gen_device_info * const devinfo,
assert(image != NULL);
/* The aspect must be exactly one of the image aspects. */
- assert(_mesa_bitcount(aspect) == 1 && (aspect & image->aspects));
+ assert(util_bitcount(aspect) == 1 && (aspect & image->aspects));
/* Determine the optimal buffer. */
@@ -942,7 +943,7 @@ anv_layout_to_fast_clear_type(const struct gen_device_info * const devinfo,
const VkImageLayout layout)
{
/* The aspect must be exactly one of the image aspects. */
- assert(_mesa_bitcount(aspect) == 1 && (aspect & image->aspects));
+ assert(util_bitcount(aspect) == 1 && (aspect & image->aspects));
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
@@ -1230,11 +1231,11 @@ static VkImageAspectFlags
remap_aspect_flags(VkImageAspectFlags view_aspects)
{
if (view_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
- if (_mesa_bitcount(view_aspects) == 1)
+ if (util_bitcount(view_aspects) == 1)
return VK_IMAGE_ASPECT_COLOR_BIT;
VkImageAspectFlags color_aspects = 0;
- for (uint32_t i = 0; i < _mesa_bitcount(view_aspects); i++)
+ for (uint32_t i = 0; i < util_bitcount(view_aspects); i++)
color_aspects |= VK_IMAGE_ASPECT_PLANE_0_BIT << i;
return color_aspects;
}
diff --git a/src/intel/vulkan/anv_nir_lower_multiview.c b/src/intel/vulkan/anv_nir_lower_multiview.c
index bde7aade50f..498be05b7ba 100644
--- a/src/intel/vulkan/anv_nir_lower_multiview.c
+++ b/src/intel/vulkan/anv_nir_lower_multiview.c
@@ -57,7 +57,7 @@ build_instance_id(struct lower_multiview_state *state)
*/
state->instance_id =
nir_idiv(b, nir_load_instance_id(b),
- nir_imm_int(b, _mesa_bitcount(state->view_mask)));
+ nir_imm_int(b, util_bitcount(state->view_mask)));
}
return state->instance_id;
@@ -72,7 +72,7 @@ build_view_index(struct lower_multiview_state *state)
b->cursor = nir_before_block(nir_start_block(b->impl));
assert(state->view_mask != 0);
- if (_mesa_bitcount(state->view_mask) == 1) {
+ if (util_bitcount(state->view_mask) == 1) {
/* Set the view index directly. */
state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1);
} else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) {
@@ -85,7 +85,7 @@ build_view_index(struct lower_multiview_state *state)
*/
nir_ssa_def *compacted =
nir_umod(b, nir_load_instance_id(b),
- nir_imm_int(b, _mesa_bitcount(state->view_mask)));
+ nir_imm_int(b, util_bitcount(state->view_mask)));
if (util_is_power_of_two_or_zero(state->view_mask + 1)) {
/* If we have a full view mask, then compacted is what we want */
@@ -206,7 +206,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
/* Unless there is only one possible view index (that would be set
* directly), pass it to the next stage. */
- if (_mesa_bitcount(state.view_mask) != 1) {
+ if (util_bitcount(state.view_mask) != 1) {
nir_variable *view_index_out =
nir_variable_create(shader, nir_var_shader_out,
glsl_int_type(), "view index");
diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c
index a3eb68769a2..be05c11f45d 100644
--- a/src/intel/vulkan/anv_pipeline.c
+++ b/src/intel/vulkan/anv_pipeline.c
@@ -364,7 +364,7 @@ populate_wm_prog_key(const struct gen_device_info *devinfo,
key->color_outputs_valid |= (1 << i);
}
- key->nr_color_regions = _mesa_bitcount(key->color_outputs_valid);
+ key->nr_color_regions = util_bitcount(key->color_outputs_valid);
key->replicate_alpha = key->nr_color_regions > 1 &&
ms_info && ms_info->alphaToCoverageEnable;
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index 5537c8cab57..d15a91dd014 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -54,6 +54,7 @@
#include "util/set.h"
#include "util/u_atomic.h"
#include "util/u_vector.h"
+#include "util/u_math.h"
#include "util/vma.h"
#include "vk_alloc.h"
#include "vk_debug_report.h"
@@ -2557,7 +2558,7 @@ anv_plane_to_aspect(VkImageAspectFlags image_aspects,
uint32_t plane)
{
if (image_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
- if (_mesa_bitcount(image_aspects) > 1)
+ if (util_bitcount(image_aspects) > 1)
return VK_IMAGE_ASPECT_PLANE_0_BIT << plane;
return VK_IMAGE_ASPECT_COLOR_BIT;
}
@@ -2968,7 +2969,7 @@ anv_image_aspects_compatible(VkImageAspectFlags aspects1,
/* Only 1 color aspects are compatibles. */
if ((aspects1 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
(aspects2 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
- _mesa_bitcount(aspects1) == _mesa_bitcount(aspects2))
+ util_bitcount(aspects1) == util_bitcount(aspects2))
return true;
return false;
@@ -3179,7 +3180,7 @@ struct anv_subpass {
static inline unsigned
anv_subpass_view_count(const struct anv_subpass *subpass)
{
- return MAX2(1, _mesa_bitcount(subpass->view_mask));
+ return MAX2(1, util_bitcount(subpass->view_mask));
}
struct anv_render_pass_attachment {
diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c
index 16168d415c3..1aeb32cab03 100644
--- a/src/intel/vulkan/genX_cmd_buffer.c
+++ b/src/intel/vulkan/genX_cmd_buffer.c
@@ -787,7 +787,7 @@ genX(cmd_buffer_mark_image_written)(struct anv_cmd_buffer *cmd_buffer,
uint32_t layer_count)
{
/* The aspect must be exactly one of the image aspects. */
- assert(_mesa_bitcount(aspect) == 1 && (aspect & image->aspects));
+ assert(util_bitcount(aspect) == 1 && (aspect & image->aspects));
/* The only compression types with more than just fast-clears are MCS,
* CCS_E, and HiZ. With HiZ we just trust the layout and don't actually
@@ -1859,7 +1859,7 @@ cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
#endif
const unsigned num_stages =
- _mesa_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
+ util_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
unsigned size_per_stage = push_constant_kb / num_stages;
/* Broadwell+ and Haswell gt3 require that the push constant sizes be in
diff --git a/src/intel/vulkan/genX_query.c b/src/intel/vulkan/genX_query.c
index e35e9b85844..011db549c08 100644
--- a/src/intel/vulkan/genX_query.c
+++ b/src/intel/vulkan/genX_query.c
@@ -70,7 +70,7 @@ VkResult genX(CreateQueryPool)(
pipeline_statistics &= ANV_PIPELINE_STATISTICS_MASK;
/* Statistics queries have a min and max for every statistic */
- uint64s_per_slot += 2 * _mesa_bitcount(pipeline_statistics);
+ uint64s_per_slot += 2 * util_bitcount(pipeline_statistics);
break;
default:
assert(!"Invalid query type");
@@ -272,7 +272,7 @@ VkResult genX(GetQueryPoolResults)(
idx++;
}
- assert(idx == _mesa_bitcount(pool->pipeline_statistics));
+ assert(idx == util_bitcount(pool->pipeline_statistics));
break;
}
@@ -289,7 +289,7 @@ VkResult genX(GetQueryPoolResults)(
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
uint32_t idx = (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) ?
- _mesa_bitcount(pool->pipeline_statistics) : 1;
+ util_bitcount(pool->pipeline_statistics) : 1;
cpu_write_query_result(pData, flags, idx, available);
}
@@ -489,7 +489,7 @@ void genX(CmdEndQuery)(
*/
if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
const uint32_t num_queries =
- _mesa_bitcount(cmd_buffer->state.subpass->view_mask);
+ util_bitcount(cmd_buffer->state.subpass->view_mask);
if (num_queries > 1)
emit_zero_queries(cmd_buffer, pool, query + 1, num_queries - 1);
}
@@ -546,7 +546,7 @@ void genX(CmdWriteTimestamp)(
*/
if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
const uint32_t num_queries =
- _mesa_bitcount(cmd_buffer->state.subpass->view_mask);
+ util_bitcount(cmd_buffer->state.subpass->view_mask);
if (num_queries > 1)
emit_zero_queries(cmd_buffer, pool, query + 1, num_queries - 1);
}
@@ -778,7 +778,7 @@ void genX(CmdCopyQueryPoolResults)(
idx++;
}
- assert(idx == _mesa_bitcount(pool->pipeline_statistics));
+ assert(idx == util_bitcount(pool->pipeline_statistics));
break;
}
@@ -795,7 +795,7 @@ void genX(CmdCopyQueryPoolResults)(
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
uint32_t idx = (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) ?
- _mesa_bitcount(pool->pipeline_statistics) : 1;
+ util_bitcount(pool->pipeline_statistics) : 1;
emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
&pool->bo, slot_offset);