summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/vc4/vc4_cl.h
diff options
context:
space:
mode:
authorEric Anholt <[email protected]>2014-12-25 12:22:02 -1000
committerEric Anholt <[email protected]>2014-12-25 15:47:39 -1000
commitcb5a37249c23084f057ece366bff0a0cf5e66297 (patch)
tree1b4fb3ec101896cf696b9caa5908bac882cf3498 /src/gallium/drivers/vc4/vc4_cl.h
parentdb6e054eb03421c401f5cff592c25c810e11d1f3 (diff)
vc4: Handle unaligned accesses in CL emits.
As of 229bf4475ff0a5dbeb9bc95250f7a40a983c2e28 we started getting SIBGUS from unaligned accesses on the hardware, for reasons I haven't figured out. However, we should be avoiding unaligned accesses anyway, and our CL setup certainly would have produced them.
Diffstat (limited to 'src/gallium/drivers/vc4/vc4_cl.h')
-rw-r--r--src/gallium/drivers/vc4/vc4_cl.h53
1 files changed, 52 insertions, 1 deletions
diff --git a/src/gallium/drivers/vc4/vc4_cl.h b/src/gallium/drivers/vc4/vc4_cl.h
index 5c67f225749..32a2e717379 100644
--- a/src/gallium/drivers/vc4/vc4_cl.h
+++ b/src/gallium/drivers/vc4/vc4_cl.h
@@ -27,6 +27,7 @@
#include <stdint.h>
#include "util/u_math.h"
+#include "util/macros.h"
#include "vc4_packet.h"
@@ -45,6 +46,23 @@ void vc4_reset_cl(struct vc4_cl *cl);
void vc4_dump_cl(void *cl, uint32_t size, bool is_render);
uint32_t vc4_gem_hindex(struct vc4_context *vc4, struct vc4_bo *bo);
+struct PACKED unaligned_16 { uint16_t x; };
+struct PACKED unaligned_32 { uint32_t x; };
+
+static inline void
+put_unaligned_32(void *ptr, uint32_t val)
+{
+ struct unaligned_32 *p = ptr;
+ p->x = val;
+}
+
+static inline void
+put_unaligned_16(void *ptr, uint16_t val)
+{
+ struct unaligned_16 *p = ptr;
+ p->x = val;
+}
+
static inline void
cl_u8(struct vc4_cl *cl, uint8_t n)
{
@@ -59,7 +77,7 @@ cl_u16(struct vc4_cl *cl, uint16_t n)
{
assert((cl->next - cl->base) + 2 <= cl->size);
- *(uint16_t *)cl->next = n;
+ put_unaligned_16(cl->next, n);
cl->next += 2;
}
@@ -68,6 +86,15 @@ cl_u32(struct vc4_cl *cl, uint32_t n)
{
assert((cl->next - cl->base) + 4 <= cl->size);
+ put_unaligned_32(cl->next, n);
+ cl->next += 4;
+}
+
+static inline void
+cl_aligned_u32(struct vc4_cl *cl, uint32_t n)
+{
+ assert((cl->next - cl->base) + 4 <= cl->size);
+
*(uint32_t *)cl->next = n;
cl->next += 4;
}
@@ -88,6 +115,12 @@ cl_f(struct vc4_cl *cl, float f)
}
static inline void
+cl_aligned_f(struct vc4_cl *cl, float f)
+{
+ cl_aligned_u32(cl, fui(f));
+}
+
+static inline void
cl_start_reloc(struct vc4_cl *cl, uint32_t n)
{
assert(n == 1 || n == 2);
@@ -123,12 +156,30 @@ cl_reloc_hindex(struct vc4_cl *cl, uint32_t hindex, uint32_t offset)
}
static inline void
+cl_aligned_reloc_hindex(struct vc4_cl *cl, uint32_t hindex, uint32_t offset)
+{
+ *(uint32_t *)(cl->base + cl->reloc_next) = hindex;
+ cl->reloc_next += 4;
+
+ cl->reloc_count--;
+
+ cl_aligned_u32(cl, offset);
+}
+
+static inline void
cl_reloc(struct vc4_context *vc4, struct vc4_cl *cl,
struct vc4_bo *bo, uint32_t offset)
{
cl_reloc_hindex(cl, vc4_gem_hindex(vc4, bo), offset);
}
+static inline void
+cl_aligned_reloc(struct vc4_context *vc4, struct vc4_cl *cl,
+ struct vc4_bo *bo, uint32_t offset)
+{
+ cl_aligned_reloc_hindex(cl, vc4_gem_hindex(vc4, bo), offset);
+}
+
void cl_ensure_space(struct vc4_cl *cl, uint32_t size);
#endif /* VC4_CL_H */