summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/iris/iris_resolve.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/iris/iris_resolve.c')
-rw-r--r--src/gallium/drivers/iris/iris_resolve.c854
1 files changed, 854 insertions, 0 deletions
diff --git a/src/gallium/drivers/iris/iris_resolve.c b/src/gallium/drivers/iris/iris_resolve.c
index 737bcfe0e90..9fc32fc0482 100644
--- a/src/gallium/drivers/iris/iris_resolve.c
+++ b/src/gallium/drivers/iris/iris_resolve.c
@@ -286,3 +286,857 @@ iris_depth_cache_add_bo(struct iris_batch *batch, struct iris_bo *bo)
{
_mesa_set_add_pre_hashed(batch->cache.depth, bo->hash, bo);
}
+
+/**
+ * Return true if the format that will be used to access the resource is
+ * CCS_E-compatible with the resource's linear/non-sRGB format.
+ *
+ * Why use the linear format? Well, although the resourcemay be specified
+ * with an sRGB format, the usage of that color space/format can be toggled.
+ * Since our HW tends to support more linear formats than sRGB ones, we use
+ * this format variant for check for CCS_E compatibility.
+ */
+static bool
+format_ccs_e_compat_with_resource(const struct gen_device_info *devinfo,
+ const struct iris_resource *res,
+ enum isl_format access_format)
+{
+ assert(res->aux.usage == ISL_AUX_USAGE_CCS_E);
+
+ enum isl_format isl_format = isl_format_srgb_to_linear(res->surf.format);
+ return isl_formats_are_ccs_e_compatible(devinfo, isl_format, access_format);
+}
+
+static bool
+sample_with_hiz(const struct gen_device_info *devinfo,
+ const struct iris_resource *res)
+{
+ if (!devinfo->has_sample_with_hiz)
+ return false;
+
+ if (res->aux.usage != ISL_AUX_USAGE_HIZ)
+ return false;
+
+ /* It seems the hardware won't fallback to the depth buffer if some of the
+ * mipmap levels aren't available in the HiZ buffer. So we need all levels
+ * of the texture to be HiZ enabled.
+ */
+ for (unsigned level = 0; level < res->surf.levels; ++level) {
+ if (!iris_resource_level_has_hiz(res, level))
+ return false;
+ }
+
+ /* If compressed multisampling is enabled, then we use it for the auxiliary
+ * buffer instead.
+ *
+ * From the BDW PRM (Volume 2d: Command Reference: Structures
+ * RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
+ *
+ * "If this field is set to AUX_HIZ, Number of Multisamples must be
+ * MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
+ *
+ * There is no such blurb for 1D textures, but there is sufficient evidence
+ * that this is broken on SKL+.
+ */
+ // XXX: i965 disables this for arrays too, is that reasonable?
+ return res->surf.samples == 1 && res->surf.dim == ISL_SURF_DIM_2D;
+}
+
+/**
+ * Does the resource's slice have hiz enabled?
+ */
+bool
+iris_resource_level_has_hiz(const struct iris_resource *res, uint32_t level)
+{
+ iris_resource_check_level_layer(res, level, 0);
+ // return res->level[level].has_hiz;
+ return false;
+}
+
+/** \brief Assert that the level and layer are valid for the resource. */
+void
+iris_resource_check_level_layer(UNUSED const struct iris_resource *res,
+ UNUSED uint32_t level, UNUSED uint32_t layer)
+{
+ assert(level < res->surf.levels);
+ assert(layer < util_num_layers(&res->base, level));
+}
+
+static inline uint32_t
+miptree_level_range_length(const struct iris_resource *res,
+ uint32_t start_level, uint32_t num_levels)
+{
+ assert(start_level < res->surf.levels);
+
+ if (num_levels == INTEL_REMAINING_LAYERS)
+ num_levels = res->surf.levels;
+
+ /* Check for overflow */
+ assert(start_level + num_levels >= start_level);
+ assert(start_level + num_levels <= res->surf.levels);
+
+ return num_levels;
+}
+
+static inline uint32_t
+miptree_layer_range_length(const struct iris_resource *res, uint32_t level,
+ uint32_t start_layer, uint32_t num_layers)
+{
+ assert(level <= res->base.last_level);
+
+ const uint32_t total_num_layers = iris_get_num_logical_layers(res, level);
+ assert(start_layer < total_num_layers);
+ if (num_layers == INTEL_REMAINING_LAYERS)
+ num_layers = total_num_layers - start_layer;
+ /* Check for overflow */
+ assert(start_layer + num_layers >= start_layer);
+ assert(start_layer + num_layers <= total_num_layers);
+
+ return num_layers;
+}
+
+static bool
+has_color_unresolved(const struct iris_resource *res,
+ unsigned start_level, unsigned num_levels,
+ unsigned start_layer, unsigned num_layers)
+{
+ if (!res->aux.bo)
+ return false;
+
+ /* Clamp the level range to fit the resource */
+ num_levels = miptree_level_range_length(res, start_level, num_levels);
+
+ for (uint32_t l = 0; l < num_levels; l++) {
+ const uint32_t level = start_level + l;
+ const uint32_t level_layers =
+ miptree_layer_range_length(res, level, start_layer, num_layers);
+ for (unsigned a = 0; a < level_layers; a++) {
+ enum isl_aux_state aux_state =
+ iris_resource_get_aux_state(res, level, start_layer + a);
+ assert(aux_state != ISL_AUX_STATE_AUX_INVALID);
+ if (aux_state != ISL_AUX_STATE_PASS_THROUGH)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static enum isl_aux_op
+get_ccs_d_resolve_op(enum isl_aux_state aux_state,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_CCS_D);
+
+ const bool ccs_supported = aux_usage == ISL_AUX_USAGE_CCS_D;
+
+ assert(ccs_supported == fast_clear_supported);
+
+ switch (aux_state) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ if (!ccs_supported)
+ return ISL_AUX_OP_FULL_RESOLVE;
+ else
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_AUX_INVALID:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ break;
+ }
+
+ unreachable("Invalid aux state for CCS_D");
+}
+
+static enum isl_aux_op
+get_ccs_e_resolve_op(enum isl_aux_state aux_state,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ /* CCS_E surfaces can be accessed as CCS_D if we're careful. */
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ aux_usage == ISL_AUX_USAGE_CCS_D ||
+ aux_usage == ISL_AUX_USAGE_CCS_E);
+
+ if (aux_usage == ISL_AUX_USAGE_CCS_D)
+ assert(fast_clear_supported);
+
+ switch (aux_state) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ if (fast_clear_supported)
+ return ISL_AUX_OP_NONE;
+ else if (aux_usage == ISL_AUX_USAGE_CCS_E)
+ return ISL_AUX_OP_PARTIAL_RESOLVE;
+ else
+ return ISL_AUX_OP_FULL_RESOLVE;
+
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ if (aux_usage != ISL_AUX_USAGE_CCS_E)
+ return ISL_AUX_OP_FULL_RESOLVE;
+ else if (!fast_clear_supported)
+ return ISL_AUX_OP_PARTIAL_RESOLVE;
+ else
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ if (aux_usage != ISL_AUX_USAGE_CCS_E)
+ return ISL_AUX_OP_FULL_RESOLVE;
+ else
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_AUX_INVALID:
+ break;
+ }
+
+ unreachable("Invalid aux state for CCS_E");
+}
+
+static void
+iris_resource_prepare_ccs_access(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t level, uint32_t layer,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ enum isl_aux_state aux_state = iris_resource_get_aux_state(res, level, layer);
+
+ enum isl_aux_op resolve_op;
+ if (res->aux.usage == ISL_AUX_USAGE_CCS_E) {
+ resolve_op = get_ccs_e_resolve_op(aux_state, aux_usage,
+ fast_clear_supported);
+ } else {
+ assert(res->aux.usage == ISL_AUX_USAGE_CCS_D);
+ resolve_op = get_ccs_d_resolve_op(aux_state, aux_usage,
+ fast_clear_supported);
+ }
+
+ if (resolve_op != ISL_AUX_OP_NONE) {
+ // XXX: iris_blorp_resolve_color(ice, res, level, layer, resolve_op);
+
+ switch (resolve_op) {
+ case ISL_AUX_OP_FULL_RESOLVE:
+ /* The CCS full resolve operation destroys the CCS and sets it to the
+ * pass-through state. (You can also think of this as being both a
+ * resolve and an ambiguate in one operation.)
+ */
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_PASS_THROUGH);
+ break;
+
+ case ISL_AUX_OP_PARTIAL_RESOLVE:
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ break;
+
+ default:
+ unreachable("Invalid resolve op");
+ }
+ }
+}
+
+static void
+iris_resource_finish_ccs_write(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t level, uint32_t layer,
+ enum isl_aux_usage aux_usage)
+{
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ aux_usage == ISL_AUX_USAGE_CCS_D ||
+ aux_usage == ISL_AUX_USAGE_CCS_E);
+
+ enum isl_aux_state aux_state =
+ iris_resource_get_aux_state(res, level, layer);
+
+ if (res->aux.usage == ISL_AUX_USAGE_CCS_E) {
+ switch (aux_state) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_E ||
+ aux_usage == ISL_AUX_USAGE_CCS_D);
+
+ if (aux_usage == ISL_AUX_USAGE_CCS_E) {
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_CLEAR);
+ } else if (aux_state != ISL_AUX_STATE_PARTIAL_CLEAR) {
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_PARTIAL_CLEAR);
+ }
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_E);
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ if (aux_usage == ISL_AUX_USAGE_CCS_E) {
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ } else {
+ /* Nothing to do */
+ }
+ break;
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_AUX_INVALID:
+ unreachable("Invalid aux state for CCS_E");
+ }
+ } else {
+ assert(res->aux.usage == ISL_AUX_USAGE_CCS_D);
+ /* CCS_D is a bit simpler */
+ switch (aux_state) {
+ case ISL_AUX_STATE_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_D);
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_PARTIAL_CLEAR);
+ break;
+
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_D);
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ /* Nothing to do */
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_AUX_INVALID:
+ unreachable("Invalid aux state for CCS_D");
+ }
+ }
+}
+
+static void
+iris_resource_prepare_mcs_access(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t layer,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ assert(aux_usage == ISL_AUX_USAGE_MCS);
+
+ switch (iris_resource_get_aux_state(res, 0, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ if (!fast_clear_supported) {
+ // XXX: iris_blorp_mcs_partial_resolve(ice, res, layer, 1);
+ iris_resource_set_aux_state(res, 0, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ }
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_PASS_THROUGH:
+ case ISL_AUX_STATE_AUX_INVALID:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid aux state for MCS");
+ }
+}
+
+static void
+iris_resource_finish_mcs_write(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t layer,
+ enum isl_aux_usage aux_usage)
+{
+ assert(aux_usage == ISL_AUX_USAGE_MCS);
+
+ switch (iris_resource_get_aux_state(res, 0, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ iris_resource_set_aux_state(res, 0, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_CLEAR);
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_PASS_THROUGH:
+ case ISL_AUX_STATE_AUX_INVALID:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid aux state for MCS");
+ }
+}
+
+static void
+iris_resource_prepare_hiz_access(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t level, uint32_t layer,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
+
+ enum isl_aux_op hiz_op = ISL_AUX_OP_NONE;
+ switch (iris_resource_get_aux_state(res, level, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ if (aux_usage != ISL_AUX_USAGE_HIZ || !fast_clear_supported)
+ hiz_op = ISL_AUX_OP_FULL_RESOLVE;
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ if (aux_usage != ISL_AUX_USAGE_HIZ)
+ hiz_op = ISL_AUX_OP_FULL_RESOLVE;
+ break;
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ case ISL_AUX_STATE_RESOLVED:
+ break;
+
+ case ISL_AUX_STATE_AUX_INVALID:
+ if (aux_usage == ISL_AUX_USAGE_HIZ)
+ hiz_op = ISL_AUX_OP_AMBIGUATE;
+ break;
+
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid HiZ state");
+ }
+
+ if (hiz_op != ISL_AUX_OP_NONE) {
+ // XXX: HiZ
+ //intel_hiz_exec(ice, res, level, layer, 1, hiz_op);
+
+ switch (hiz_op) {
+ case ISL_AUX_OP_FULL_RESOLVE:
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_RESOLVED);
+ break;
+
+ case ISL_AUX_OP_AMBIGUATE:
+ /* The HiZ resolve operation is actually an ambiguate */
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_PASS_THROUGH);
+ break;
+
+ default:
+ unreachable("Invalid HiZ op");
+ }
+ }
+}
+
+static void
+iris_resource_finish_hiz_write(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t level, uint32_t layer,
+ enum isl_aux_usage aux_usage)
+{
+ assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
+
+ switch (iris_resource_get_aux_state(res, level, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_HIZ);
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_CLEAR);
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_HIZ);
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_RESOLVED:
+ if (aux_usage == ISL_AUX_USAGE_HIZ) {
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ } else {
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_AUX_INVALID);
+ }
+ break;
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ if (aux_usage == ISL_AUX_USAGE_HIZ) {
+ iris_resource_set_aux_state(res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ }
+ break;
+
+ case ISL_AUX_STATE_AUX_INVALID:
+ assert(aux_usage != ISL_AUX_USAGE_HIZ);
+ break;
+
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid HiZ state");
+ }
+}
+
+void
+iris_resource_prepare_access(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t start_level, uint32_t num_levels,
+ uint32_t start_layer, uint32_t num_layers,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ num_levels = miptree_level_range_length(res, start_level, num_levels);
+
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_NONE:
+ /* Nothing to do */
+ break;
+
+ case ISL_AUX_USAGE_MCS:
+ assert(start_level == 0 && num_levels == 1);
+ const uint32_t level_layers =
+ miptree_layer_range_length(res, 0, start_layer, num_layers);
+ for (uint32_t a = 0; a < level_layers; a++) {
+ iris_resource_prepare_mcs_access(ice, res, start_layer + a,
+ aux_usage, fast_clear_supported);
+ }
+ break;
+
+ case ISL_AUX_USAGE_CCS_D:
+ case ISL_AUX_USAGE_CCS_E:
+ for (uint32_t l = 0; l < num_levels; l++) {
+ const uint32_t level = start_level + l;
+ const uint32_t level_layers =
+ miptree_layer_range_length(res, level, start_layer, num_layers);
+ for (uint32_t a = 0; a < level_layers; a++) {
+ iris_resource_prepare_ccs_access(ice, res, level,
+ start_layer + a,
+ aux_usage, fast_clear_supported);
+ }
+ }
+ break;
+
+ case ISL_AUX_USAGE_HIZ:
+ for (uint32_t l = 0; l < num_levels; l++) {
+ const uint32_t level = start_level + l;
+ if (!iris_resource_level_has_hiz(res, level))
+ continue;
+
+ const uint32_t level_layers =
+ miptree_layer_range_length(res, level, start_layer, num_layers);
+ for (uint32_t a = 0; a < level_layers; a++) {
+ iris_resource_prepare_hiz_access(ice, res, level, start_layer + a,
+ aux_usage, fast_clear_supported);
+ }
+ }
+ break;
+
+ default:
+ unreachable("Invalid aux usage");
+ }
+}
+
+void
+iris_resource_finish_write(struct iris_context *ice,
+ struct iris_resource *res, uint32_t level,
+ uint32_t start_layer, uint32_t num_layers,
+ enum isl_aux_usage aux_usage)
+{
+ num_layers = miptree_layer_range_length(res, level, start_layer, num_layers);
+
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_NONE:
+ break;
+
+ case ISL_AUX_USAGE_MCS:
+ for (uint32_t a = 0; a < num_layers; a++) {
+ iris_resource_finish_mcs_write(ice, res, start_layer + a,
+ aux_usage);
+ }
+ break;
+
+ case ISL_AUX_USAGE_CCS_D:
+ case ISL_AUX_USAGE_CCS_E:
+ for (uint32_t a = 0; a < num_layers; a++) {
+ iris_resource_finish_ccs_write(ice, res, level, start_layer + a,
+ aux_usage);
+ }
+ break;
+
+ case ISL_AUX_USAGE_HIZ:
+ if (!iris_resource_level_has_hiz(res, level))
+ return;
+
+ for (uint32_t a = 0; a < num_layers; a++) {
+ iris_resource_finish_hiz_write(ice, res, level, start_layer + a,
+ aux_usage);
+ }
+ break;
+
+ default:
+ unreachable("Invavlid aux usage");
+ }
+}
+
+enum isl_aux_state
+iris_resource_get_aux_state(const struct iris_resource *res,
+ uint32_t level, uint32_t layer)
+{
+ iris_resource_check_level_layer(res, level, layer);
+
+ if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
+ assert(iris_resource_level_has_hiz(res, level));
+ } else if (res->surf.usage & ISL_SURF_USAGE_STENCIL_BIT) {
+ unreachable("Cannot get aux state for stencil");
+ } else {
+ assert(res->surf.samples == 1 ||
+ res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
+ }
+
+ return res->aux.state[level][layer];
+}
+
+void
+iris_resource_set_aux_state(struct iris_resource *res, uint32_t level,
+ uint32_t start_layer, uint32_t num_layers,
+ enum isl_aux_state aux_state)
+{
+ num_layers = miptree_layer_range_length(res, level, start_layer, num_layers);
+
+ if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
+ assert(iris_resource_level_has_hiz(res, level));
+ } else if (res->surf.usage & ISL_SURF_USAGE_STENCIL_BIT) {
+ unreachable("Cannot set aux state for stencil");
+ } else {
+ assert(res->surf.samples == 1 ||
+ res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
+ }
+
+ for (unsigned a = 0; a < num_layers; a++) {
+ if (res->aux.state[level][start_layer + a] != aux_state) {
+ res->aux.state[level][start_layer + a] = aux_state;
+ // XXX: dirty works differently
+ // brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
+ }
+ }
+}
+
+/* On Gen9 color buffers may be compressed by the hardware (lossless
+ * compression). There are, however, format restrictions and care needs to be
+ * taken that the sampler engine is capable for re-interpreting a buffer with
+ * format different the buffer was originally written with.
+ *
+ * For example, SRGB formats are not compressible and the sampler engine isn't
+ * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
+ * color buffer needs to be resolved so that the sampling surface can be
+ * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
+ * set).
+ */
+static bool
+can_texture_with_ccs(const struct gen_device_info *devinfo,
+ struct pipe_debug_callback *dbg,
+ const struct iris_resource *res,
+ enum isl_format view_format)
+{
+ if (res->aux.usage != ISL_AUX_USAGE_CCS_E)
+ return false;
+
+ if (!format_ccs_e_compat_with_resource(devinfo, res, view_format)) {
+ const struct isl_format_layout *res_fmtl =
+ isl_format_get_layout(res->surf.format);
+ const struct isl_format_layout *view_fmtl =
+ isl_format_get_layout(view_format);
+
+ perf_debug(dbg, "Incompatible sampling format (%s) for CCS (%s)\n",
+ view_fmtl->name, res_fmtl->name);
+
+ return false;
+ }
+
+ return true;
+}
+
+enum isl_aux_usage
+iris_resource_texture_aux_usage(struct iris_context *ice,
+ const struct iris_resource *res,
+ enum isl_format view_format,
+ enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits)
+{
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ struct gen_device_info *devinfo = &screen->devinfo;
+
+ assert(devinfo->gen == 9 || astc5x5_wa_bits == 0);
+
+ /* On gen9, ASTC 5x5 textures cannot live in the sampler cache along side
+ * CCS or HiZ compressed textures. See gen9_apply_astc5x5_wa_flush() for
+ * details.
+ */
+ if ((astc5x5_wa_bits & GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5) &&
+ res->aux.usage != ISL_AUX_USAGE_MCS)
+ return ISL_AUX_USAGE_NONE;
+
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_HIZ:
+ if (sample_with_hiz(devinfo, res))
+ return ISL_AUX_USAGE_HIZ;
+ break;
+
+ case ISL_AUX_USAGE_MCS:
+ return ISL_AUX_USAGE_MCS;
+
+ case ISL_AUX_USAGE_CCS_D:
+ case ISL_AUX_USAGE_CCS_E:
+ /* If we don't have any unresolved color, report an aux usage of
+ * ISL_AUX_USAGE_NONE. This way, texturing won't even look at the
+ * aux surface and we can save some bandwidth.
+ */
+ if (!has_color_unresolved(res, 0, INTEL_REMAINING_LEVELS,
+ 0, INTEL_REMAINING_LAYERS))
+ return ISL_AUX_USAGE_NONE;
+
+ if (can_texture_with_ccs(devinfo, &ice->dbg, res, view_format))
+ return ISL_AUX_USAGE_CCS_E;
+ break;
+
+ default:
+ break;
+ }
+
+ return ISL_AUX_USAGE_NONE;
+}
+
+static bool
+isl_formats_are_fast_clear_compatible(enum isl_format a, enum isl_format b)
+{
+ /* On gen8 and earlier, the hardware was only capable of handling 0/1 clear
+ * values so sRGB curve application was a no-op for all fast-clearable
+ * formats.
+ *
+ * On gen9+, the hardware supports arbitrary clear values. For sRGB clear
+ * values, the hardware interprets the floats, not as what would be
+ * returned from the sampler (or written by the shader), but as being
+ * between format conversion and sRGB curve application. This means that
+ * we can switch between sRGB and UNORM without having to whack the clear
+ * color.
+ */
+ return isl_format_srgb_to_linear(a) == isl_format_srgb_to_linear(b);
+}
+
+void
+iris_resource_prepare_texture(struct iris_context *ice,
+ struct iris_resource *res,
+ enum isl_format view_format,
+ uint32_t start_level, uint32_t num_levels,
+ uint32_t start_layer, uint32_t num_layers,
+ enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits)
+{
+ enum isl_aux_usage aux_usage =
+ iris_resource_texture_aux_usage(ice, res, view_format, astc5x5_wa_bits);
+
+ bool clear_supported = aux_usage != ISL_AUX_USAGE_NONE;
+
+ /* Clear color is specified as ints or floats and the conversion is done by
+ * the sampler. If we have a texture view, we would have to perform the
+ * clear color conversion manually. Just disable clear color.
+ */
+ if (!isl_formats_are_fast_clear_compatible(res->surf.format, view_format))
+ clear_supported = false;
+
+ iris_resource_prepare_access(ice, res, start_level, num_levels,
+ start_layer, num_layers,
+ aux_usage, clear_supported);
+}
+
+void
+iris_resource_prepare_image(struct iris_context *ice,
+ struct iris_resource *res)
+{
+ /* The data port doesn't understand any compression */
+ iris_resource_prepare_access(ice, res, 0, INTEL_REMAINING_LEVELS,
+ 0, INTEL_REMAINING_LAYERS,
+ ISL_AUX_USAGE_NONE, false);
+}
+
+enum isl_aux_usage
+iris_resource_render_aux_usage(struct iris_context *ice,
+ struct iris_resource *res,
+ enum isl_format render_format,
+ bool blend_enabled,
+ bool draw_aux_disabled)
+{
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ struct gen_device_info *devinfo = &screen->devinfo;
+
+ if (draw_aux_disabled)
+ return ISL_AUX_USAGE_NONE;
+
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_MCS:
+ return ISL_AUX_USAGE_MCS;
+
+ case ISL_AUX_USAGE_CCS_D:
+ case ISL_AUX_USAGE_CCS_E:
+ /* Gen9+ hardware technically supports non-0/1 clear colors with sRGB
+ * formats. However, there are issues with blending where it doesn't
+ * properly apply the sRGB curve to the clear color when blending.
+ */
+ /* XXX:
+ if (devinfo->gen >= 9 && blend_enabled &&
+ isl_format_is_srgb(render_format) &&
+ !isl_color_value_is_zero_one(res->fast_clear_color, render_format))
+ return ISL_AUX_USAGE_NONE;
+ */
+
+ if (res->aux.usage == ISL_AUX_USAGE_CCS_E &&
+ format_ccs_e_compat_with_resource(devinfo, res, render_format))
+ return ISL_AUX_USAGE_CCS_E;
+
+ /* Otherwise, we have to fall back to CCS_D */
+ return ISL_AUX_USAGE_CCS_D;
+
+ default:
+ return ISL_AUX_USAGE_NONE;
+ }
+}
+
+void
+iris_resource_prepare_render(struct iris_context *ice,
+ struct iris_resource *res, uint32_t level,
+ uint32_t start_layer, uint32_t layer_count,
+ enum isl_aux_usage aux_usage)
+{
+ iris_resource_prepare_access(ice, res, level, 1, start_layer, layer_count,
+ aux_usage, aux_usage != ISL_AUX_USAGE_NONE);
+}
+
+void
+iris_resource_finish_render(struct iris_context *ice,
+ struct iris_resource *res, uint32_t level,
+ uint32_t start_layer, uint32_t layer_count,
+ enum isl_aux_usage aux_usage)
+{
+ iris_resource_finish_write(ice, res, level, start_layer, layer_count,
+ aux_usage);
+}
+
+void
+iris_resource_prepare_depth(struct iris_context *ice,
+ struct iris_resource *res, uint32_t level,
+ uint32_t start_layer, uint32_t layer_count)
+{
+ iris_resource_prepare_access(ice, res, level, 1, start_layer, layer_count,
+ res->aux.usage, res->aux.bo != NULL);
+}
+
+void
+iris_resource_finish_depth(struct iris_context *ice,
+ struct iris_resource *res, uint32_t level,
+ uint32_t start_layer, uint32_t layer_count,
+ bool depth_written)
+{
+ if (depth_written) {
+ iris_resource_finish_write(ice, res, level, start_layer, layer_count,
+ res->aux.usage);
+ }
+}