aboutsummaryrefslogtreecommitdiffstats
path: root/src/compiler/nir
diff options
context:
space:
mode:
Diffstat (limited to 'src/compiler/nir')
-rw-r--r--src/compiler/nir/nir_opt_load_store_vectorize.c2
-rw-r--r--src/compiler/nir/tests/load_store_vectorizer_tests.cpp8
2 files changed, 6 insertions, 4 deletions
diff --git a/src/compiler/nir/nir_opt_load_store_vectorize.c b/src/compiler/nir/nir_opt_load_store_vectorize.c
index 6587251c7e7..414a97fec45 100644
--- a/src/compiler/nir/nir_opt_load_store_vectorize.c
+++ b/src/compiler/nir/nir_opt_load_store_vectorize.c
@@ -38,6 +38,8 @@
* - It won't turn four consecutive vec3 loads into 3 vec4 loads.
* - It doesn't do global vectorization.
* Handling these cases probably wouldn't provide much benefit though.
+ *
+ * This probably doesn't handle big-endian GPUs correctly.
*/
#include "nir.h"
diff --git a/src/compiler/nir/tests/load_store_vectorizer_tests.cpp b/src/compiler/nir/tests/load_store_vectorizer_tests.cpp
index cd623704fc3..0d749debd4c 100644
--- a/src/compiler/nir/tests/load_store_vectorizer_tests.cpp
+++ b/src/compiler/nir/tests/load_store_vectorizer_tests.cpp
@@ -961,10 +961,10 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent_8_8_16)
ASSERT_EQ(val->bit_size, 8);
ASSERT_EQ(val->num_components, 4);
nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
- ASSERT_EQ(nir_const_value_as_uint(cv[0], 32), 0x10);
- ASSERT_EQ(nir_const_value_as_uint(cv[1], 32), 0x20);
- ASSERT_EQ(nir_const_value_as_uint(cv[2], 32), 0x30);
- ASSERT_EQ(nir_const_value_as_uint(cv[3], 32), 0x0);
+ ASSERT_EQ(nir_const_value_as_uint(cv[0], 8), 0x10);
+ ASSERT_EQ(nir_const_value_as_uint(cv[1], 8), 0x20);
+ ASSERT_EQ(nir_const_value_as_uint(cv[2], 8), 0x30);
+ ASSERT_EQ(nir_const_value_as_uint(cv[3], 8), 0x0);
}
TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent_32_32_64)