summaryrefslogtreecommitdiffstats
path: root/src/intel/dev/gen_device_info.c
diff options
context:
space:
mode:
authorLionel Landwerlin <[email protected]>2018-03-14 15:44:56 +0000
committerLionel Landwerlin <[email protected]>2018-03-22 20:14:22 +0000
commitc1900f5b0fb7a6f22a13f67e2645f3754b5df245 (patch)
treefba566e29ff7954a161e0e29e9015f2a9d3b39f9 /src/intel/dev/gen_device_info.c
parent2d26c9993389a8eb8f7125e2440a2e7c5729a405 (diff)
intel: devinfo: add helper functions to fill fusing masks values
There are a couple of ways we can get the fusing information from the kernel : - Through DRM_I915_GETPARAM with the SLICE_MASK/SUBSLICE_MASK parameters - Through the new DRM_IOCTL_I915_QUERY by requesting the DRM_I915_QUERY_TOPOLOGY_INFO The second method is more accurate and also gives us the EUs fusing masks. It's also a requirement for CNL as this platform has asymetric subslices and the first method SUBSLICE_MASK value is assumed uniform across slices. v2: Change gen_device_info_update_from_masks() to generate topology and call into gen_device_info_update_from_topology (Lionel/Ken) Signed-off-by: Lionel Landwerlin <[email protected]> Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/intel/dev/gen_device_info.c')
-rw-r--r--src/intel/dev/gen_device_info.c126
1 files changed, 126 insertions, 0 deletions
diff --git a/src/intel/dev/gen_device_info.c b/src/intel/dev/gen_device_info.c
index acf921b60ae..f7cb94f1795 100644
--- a/src/intel/dev/gen_device_info.c
+++ b/src/intel/dev/gen_device_info.c
@@ -28,8 +28,11 @@
#include <unistd.h>
#include "gen_device_info.h"
#include "compiler/shader_enums.h"
+#include "util/bitscan.h"
#include "util/macros.h"
+#include <i915_drm.h>
+
/**
* Get the PCI ID for the device name.
*
@@ -925,6 +928,129 @@ fill_masks(struct gen_device_info *devinfo)
}
}
+void
+gen_device_info_update_from_masks(struct gen_device_info *devinfo,
+ uint32_t slice_mask,
+ uint32_t subslice_mask,
+ uint32_t n_eus)
+{
+ struct {
+ struct drm_i915_query_topology_info base;
+ uint8_t data[100];
+ } topology;
+
+ assert((slice_mask & 0xff) == slice_mask);
+
+ memset(&topology, 0, sizeof(topology));
+
+ topology.base.max_slices = util_last_bit(slice_mask);
+ topology.base.max_subslices = util_last_bit(subslice_mask);
+
+ topology.base.subslice_offset = DIV_ROUND_UP(topology.base.max_slices, 8);
+ topology.base.subslice_stride = DIV_ROUND_UP(topology.base.max_subslices, 8);
+
+ uint32_t n_subslices = __builtin_popcount(slice_mask) *
+ __builtin_popcount(subslice_mask);
+ uint32_t num_eu_per_subslice = DIV_ROUND_UP(n_eus, n_subslices);
+ uint32_t eu_mask = (1U << num_eu_per_subslice) - 1;
+
+ topology.base.eu_offset = topology.base.subslice_offset +
+ DIV_ROUND_UP(topology.base.max_subslices, 8);
+ topology.base.eu_stride = DIV_ROUND_UP(num_eu_per_subslice, 8);
+
+ /* Set slice mask in topology */
+ for (int b = 0; b < topology.base.subslice_offset; b++)
+ topology.base.data[b] = (slice_mask >> (b * 8)) & 0xff;
+
+ for (int s = 0; s < topology.base.max_slices; s++) {
+
+ /* Set subslice mask in topology */
+ for (int b = 0; b < topology.base.subslice_stride; b++) {
+ int subslice_offset = topology.base.subslice_offset +
+ s * topology.base.subslice_stride + b;
+
+ topology.base.data[subslice_offset] = (subslice_mask >> (b * 8)) & 0xff;
+ }
+
+ /* Set eu mask in topology */
+ for (int ss = 0; ss < topology.base.max_subslices; ss++) {
+ for (int b = 0; b < topology.base.eu_stride; b++) {
+ int eu_offset = topology.base.eu_offset +
+ (s * topology.base.max_subslices + ss) * topology.base.eu_stride + b;
+
+ topology.base.data[eu_offset] = (eu_mask >> (b * 8)) & 0xff;
+ }
+ }
+ }
+
+ gen_device_info_update_from_topology(devinfo, &topology.base);
+}
+
+static void
+reset_masks(struct gen_device_info *devinfo)
+{
+ devinfo->subslice_slice_stride = 0;
+ devinfo->eu_subslice_stride = 0;
+ devinfo->eu_slice_stride = 0;
+
+ devinfo->num_slices = 0;
+ devinfo->num_eu_per_subslice = 0;
+ memset(devinfo->num_subslices, 0, sizeof(devinfo->num_subslices));
+
+ memset(&devinfo->slice_masks, 0, sizeof(devinfo->slice_masks));
+ memset(devinfo->subslice_masks, 0, sizeof(devinfo->subslice_masks));
+ memset(devinfo->eu_masks, 0, sizeof(devinfo->eu_masks));
+}
+
+void
+gen_device_info_update_from_topology(struct gen_device_info *devinfo,
+ const struct drm_i915_query_topology_info *topology)
+{
+ reset_masks(devinfo);
+
+ devinfo->subslice_slice_stride = topology->subslice_stride;
+
+ devinfo->eu_subslice_stride = DIV_ROUND_UP(topology->max_eus_per_subslice, 8);
+ devinfo->eu_slice_stride = topology->max_subslices * devinfo->eu_subslice_stride;
+
+ assert(sizeof(devinfo->slice_masks) >= DIV_ROUND_UP(topology->max_slices, 8));
+ memcpy(&devinfo->slice_masks, topology->data, DIV_ROUND_UP(topology->max_slices, 8));
+ devinfo->num_slices = __builtin_popcount(devinfo->slice_masks);
+
+ uint32_t subslice_mask_len =
+ topology->max_slices * topology->subslice_stride;
+ assert(sizeof(devinfo->subslice_masks) >= subslice_mask_len);
+ memcpy(devinfo->subslice_masks, &topology->data[topology->subslice_offset],
+ subslice_mask_len);
+
+ uint32_t n_subslices = 0;
+ for (int s = 0; s < topology->max_slices; s++) {
+ if ((devinfo->slice_masks & (1UL << s)) == 0)
+ continue;
+
+ for (int b = 0; b < devinfo->subslice_slice_stride; b++) {
+ devinfo->num_subslices[s] +=
+ __builtin_popcount(devinfo->subslice_masks[b]);
+ }
+ n_subslices += devinfo->num_subslices[s];
+ }
+
+ uint32_t eu_mask_len =
+ topology->eu_stride * topology->max_subslices * topology->max_slices;
+ assert(sizeof(devinfo->eu_masks) >= eu_mask_len);
+ memcpy(devinfo->eu_masks, &topology->data[topology->eu_offset], eu_mask_len);
+
+ uint32_t n_eus = 0;
+ for (int b = 0; b < eu_mask_len; b++)
+ n_eus += __builtin_popcount(devinfo->eu_masks[b]);
+
+ /* We expect the total number of EUs to be uniformly distributed throughout
+ * the subslices.
+ */
+ assert((n_eus % n_subslices) == 0);
+ devinfo->num_eu_per_subslice = n_eus / n_subslices;
+}
+
bool
gen_get_device_info(int devid, struct gen_device_info *devinfo)
{