aboutsummaryrefslogtreecommitdiffstats
path: root/src/intel/common
diff options
context:
space:
mode:
authorLionel Landwerlin <[email protected]>2020-02-02 14:25:16 +0100
committerLionel Landwerlin <[email protected]>2020-05-20 14:02:27 +0300
commit796fccce631bf8ecb6ce2fd1a68f219788693a6e (patch)
tree0eea5e0705ad84dd299b6963a92ffa09fa26a741 /src/intel/common
parent570bd760d3e1c2754fc045981d2162df67e81592 (diff)
intel/mi-builder: add framework for self modifying batches
v2: Use Jason's idea to store addresses to modify v3: Add ALU flushes (Jason) v4: Remove ALU flush from gen_mi_self_mod_barrier() (Jason) Signed-off-by: Lionel Landwerlin <[email protected]> Reviewed-by: Jason Ekstrand <[email protected]> (v2) Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/2775>
Diffstat (limited to 'src/intel/common')
-rw-r--r--src/intel/common/gen_mi_builder.h69
1 files changed, 69 insertions, 0 deletions
diff --git a/src/intel/common/gen_mi_builder.h b/src/intel/common/gen_mi_builder.h
index bcd685754a3..5f3f4e28c1f 100644
--- a/src/intel/common/gen_mi_builder.h
+++ b/src/intel/common/gen_mi_builder.h
@@ -24,6 +24,7 @@
#ifndef GEN_MI_BUILDER_H
#define GEN_MI_BUILDER_H
+#include "genxml/genX_bits.h"
#include "util/bitscan.h"
#include "util/fast_idiv_by_const.h"
#include "util/u_math.h"
@@ -45,6 +46,18 @@
* __gen_address_type
* __gen_address_offset(__gen_address_type addr, uint64_t offset);
*
+ *
+ * If self-modifying batches are supported, we must be able to pass batch
+ * addresses around as void*s so pinning as well as batch chaining or some
+ * other mechanism for ensuring batch pointers remain valid during building is
+ * required. The following function must also be defined, it returns an
+ * address in canonical form:
+ *
+ * uint64_t
+ * __gen_get_batch_address(__gen_user_data *user_data, void *location);
+ *
+ * Also, __gen_combine_address must accept a location value of NULL and return
+ * a fully valid 64-bit address.
*/
/*
@@ -831,4 +844,60 @@ gen_mi_udiv32_imm(struct gen_mi_builder *b,
#endif /* MI_MATH section */
+/* This assumes addresses of strictly more than 32bits (aka. Gen8+). */
+#if GEN_MI_BUILDER_CAN_WRITE_BATCH
+
+struct gen_mi_address_token {
+ /* Pointers to address memory fields in the batch. */
+ uint64_t *ptrs[2];
+};
+
+static inline struct gen_mi_address_token
+gen_mi_store_address(struct gen_mi_builder *b,
+ struct gen_mi_value addr_reg)
+{
+ gen_mi_builder_flush_math(b);
+
+ assert(addr_reg.type == GEN_MI_VALUE_TYPE_REG64);
+
+ struct gen_mi_address_token token = {};
+
+ for (unsigned i = 0; i < 2; i++) {
+ gen_mi_builder_emit(b, GENX(MI_STORE_REGISTER_MEM), srm) {
+ srm.RegisterAddress = addr_reg.reg + (i * 4);
+
+ const unsigned addr_dw =
+ GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8;
+ token.ptrs[i] = (void *)_dst + addr_dw;
+ }
+ }
+
+ gen_mi_value_unref(b, addr_reg);
+ return token;
+}
+
+static inline void
+gen_mi_self_mod_barrier(struct gen_mi_builder *b)
+{
+ /* Documentation says Gen11+ should be able to invalidate the command cache
+ * but experiment show it doesn't work properly, so for now just get over
+ * the CS prefetch.
+ */
+ for (uint32_t i = 0; i < 128; i++)
+ gen_mi_builder_emit(b, GENX(MI_NOOP), noop);
+}
+
+static inline void
+_gen_mi_resolve_address_token(struct gen_mi_builder *b,
+ struct gen_mi_address_token token,
+ void *batch_location)
+{
+ uint64_t addr_addr_u64 = __gen_get_batch_address(b->user_data,
+ batch_location);
+ *(token.ptrs[0]) = addr_addr_u64;
+ *(token.ptrs[1]) = addr_addr_u64 + 4;
+}
+
+#endif /* GEN_MI_BUILDER_CAN_WRITE_BATCH */
+
#endif /* GEN_MI_BUILDER_H */