summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/i965/brw_compiler.h8
-rw-r--r--src/mesa/drivers/dri/i965/brw_vue_map.c10
2 files changed, 9 insertions, 9 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_compiler.h b/src/mesa/drivers/dri/i965/brw_compiler.h
index 1b71c7f7f98..16d4d0ec525 100644
--- a/src/mesa/drivers/dri/i965/brw_compiler.h
+++ b/src/mesa/drivers/dri/i965/brw_compiler.h
@@ -546,7 +546,7 @@ struct brw_vue_map {
* map, and (b) actually written by the shader. Does not include any of
* the additional varying slots defined in brw_varying_slot.
*/
- GLbitfield64 slots_valid;
+ uint64_t slots_valid;
/**
* Is this VUE map for a separate shader pipeline?
@@ -616,12 +616,12 @@ GLuint brw_varying_to_offset(const struct brw_vue_map *vue_map, GLuint varying)
void brw_compute_vue_map(const struct gen_device_info *devinfo,
struct brw_vue_map *vue_map,
- GLbitfield64 slots_valid,
+ uint64_t slots_valid,
bool separate_shader);
void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map,
- const GLbitfield64 slots_valid,
- const GLbitfield is_patch);
+ uint64_t slots_valid,
+ uint32_t is_patch);
/* brw_interpolation_map.c */
void brw_setup_vue_interpolation(struct brw_vue_map *vue_map,
diff --git a/src/mesa/drivers/dri/i965/brw_vue_map.c b/src/mesa/drivers/dri/i965/brw_vue_map.c
index 0d8f6c700b6..178a4e5ee83 100644
--- a/src/mesa/drivers/dri/i965/brw_vue_map.c
+++ b/src/mesa/drivers/dri/i965/brw_vue_map.c
@@ -58,7 +58,7 @@ assign_vue_slot(struct brw_vue_map *vue_map, int varying, int slot)
void
brw_compute_vue_map(const struct gen_device_info *devinfo,
struct brw_vue_map *vue_map,
- GLbitfield64 slots_valid,
+ uint64_t slots_valid,
bool separate)
{
/* Keep using the packed/contiguous layout on old hardware - we only need
@@ -166,7 +166,7 @@ brw_compute_vue_map(const struct gen_device_info *devinfo,
* However, it may be output by transform feedback, and we'd rather not
* recompute state when TF changes, so we just always include it.
*/
- GLbitfield64 builtins = slots_valid & BITFIELD64_MASK(VARYING_SLOT_VAR0);
+ uint64_t builtins = slots_valid & BITFIELD64_MASK(VARYING_SLOT_VAR0);
while (builtins != 0) {
const int varying = ffsll(builtins) - 1;
if (vue_map->varying_to_slot[varying] == -1) {
@@ -176,7 +176,7 @@ brw_compute_vue_map(const struct gen_device_info *devinfo,
}
const int first_generic_slot = slot;
- GLbitfield64 generics = slots_valid & ~BITFIELD64_MASK(VARYING_SLOT_VAR0);
+ uint64_t generics = slots_valid & ~BITFIELD64_MASK(VARYING_SLOT_VAR0);
while (generics != 0) {
const int varying = ffsll(generics) - 1;
if (separate) {
@@ -197,8 +197,8 @@ brw_compute_vue_map(const struct gen_device_info *devinfo,
*/
void
brw_compute_tess_vue_map(struct brw_vue_map *vue_map,
- GLbitfield64 vertex_slots,
- GLbitfield patch_slots)
+ uint64_t vertex_slots,
+ uint32_t patch_slots)
{
/* I don't think anything actually uses this... */
vue_map->slots_valid = vertex_slots;