summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gallium/drivers/ilo/ilo_3d.c76
-rw-r--r--src/gallium/drivers/ilo/ilo_3d.h6
-rw-r--r--src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.c17
-rw-r--r--src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.h2
-rw-r--r--src/gallium/drivers/ilo/ilo_3d_pipeline_gen7.c2
-rw-r--r--src/gallium/drivers/ilo/ilo_context.c2
-rw-r--r--src/gallium/drivers/ilo/ilo_shader.c317
-rw-r--r--src/gallium/drivers/ilo/ilo_shader.h50
-rw-r--r--src/gallium/drivers/ilo/ilo_state.c70
9 files changed, 356 insertions, 186 deletions
diff --git a/src/gallium/drivers/ilo/ilo_3d.c b/src/gallium/drivers/ilo/ilo_3d.c
index 17dd5b125e9..6223de98c0f 100644
--- a/src/gallium/drivers/ilo/ilo_3d.c
+++ b/src/gallium/drivers/ilo/ilo_3d.c
@@ -363,6 +363,10 @@ void
ilo_3d_destroy(struct ilo_3d *hw3d)
{
ilo_3d_pipeline_destroy(hw3d->pipeline);
+
+ if (hw3d->kernel.bo)
+ intel_bo_unreference(hw3d->kernel.bo);
+
FREE(hw3d);
}
@@ -627,6 +631,66 @@ ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
FREE(restart_info);
}
+static bool
+upload_shaders(struct ilo_3d *hw3d, struct ilo_shader_cache *shc)
+{
+ bool incremental = true;
+ int upload;
+
+ upload = ilo_shader_cache_upload(shc,
+ NULL, hw3d->kernel.used, incremental);
+ if (!upload)
+ return true;
+
+ /*
+ * Allocate a new bo. When this is a new batch, assume the bo is still in
+ * use by the previous batch and force allocation.
+ *
+ * Does it help to make shader cache upload with unsynchronized mapping,
+ * and remove the check for new batch here?
+ */
+ if (hw3d->kernel.used + upload > hw3d->kernel.size || hw3d->new_batch) {
+ unsigned new_size = (hw3d->kernel.size) ?
+ hw3d->kernel.size : (8 * 1024);
+
+ while (hw3d->kernel.used + upload > new_size)
+ new_size *= 2;
+
+ if (hw3d->kernel.bo)
+ intel_bo_unreference(hw3d->kernel.bo);
+
+ hw3d->kernel.bo = intel_winsys_alloc_buffer(hw3d->cp->winsys,
+ "kernel bo", new_size, 0);
+ if (!hw3d->kernel.bo) {
+ ilo_err("failed to allocate kernel bo\n");
+ return false;
+ }
+
+ hw3d->kernel.used = 0;
+ hw3d->kernel.size = new_size;
+ incremental = false;
+
+ assert(new_size >= ilo_shader_cache_upload(shc,
+ NULL, hw3d->kernel.used, incremental));
+
+ ilo_3d_pipeline_invalidate(hw3d->pipeline,
+ ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
+ }
+
+ upload = ilo_shader_cache_upload(shc,
+ hw3d->kernel.bo, hw3d->kernel.used, incremental);
+ if (upload < 0) {
+ ilo_err("failed to upload shaders\n");
+ return false;
+ }
+
+ hw3d->kernel.used += upload;
+
+ assert(hw3d->kernel.used <= hw3d->kernel.size);
+
+ return true;
+}
+
static void
ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
@@ -649,17 +713,10 @@ ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
}
}
- /* assume the cache is still in use by the previous batch */
- if (hw3d->new_batch)
- ilo_shader_cache_mark_busy(ilo->shader_cache);
-
ilo_finalize_states(ilo);
- /* the shaders may be uploaded to a new shader cache */
- if (hw3d->shader_cache_seqno != ilo->shader_cache->seqno) {
- ilo_3d_pipeline_invalidate(hw3d->pipeline,
- ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
- }
+ if (!upload_shaders(hw3d, ilo->shader_cache))
+ return;
/* If draw_vbo ever fails, return immediately. */
if (!draw_vbo(hw3d, ilo, info, &prim_generated, &prim_emitted))
@@ -668,7 +725,6 @@ ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
/* clear dirty status */
ilo->dirty = 0x0;
hw3d->new_batch = false;
- hw3d->shader_cache_seqno = ilo->shader_cache->seqno;
update_prim_count(hw3d, prim_generated, prim_emitted);
diff --git a/src/gallium/drivers/ilo/ilo_3d.h b/src/gallium/drivers/ilo/ilo_3d.h
index a1a0efc5e96..e9888e1b3ec 100644
--- a/src/gallium/drivers/ilo/ilo_3d.h
+++ b/src/gallium/drivers/ilo/ilo_3d.h
@@ -44,7 +44,11 @@ struct ilo_3d {
int owner_reserve;
bool new_batch;
- uint32_t shader_cache_seqno;
+
+ struct {
+ struct intel_bo *bo;
+ unsigned used, size;
+ } kernel;
struct {
struct pipe_query *query;
diff --git a/src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.c b/src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.c
index e5c9db1be02..1f855dc34d0 100644
--- a/src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.c
+++ b/src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.c
@@ -29,6 +29,7 @@
#include "util/u_prim.h"
#include "intel_reg.h"
+#include "ilo_3d.h"
#include "ilo_context.h"
#include "ilo_cp.h"
#include "ilo_gpe_gen6.h"
@@ -210,13 +211,13 @@ gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
struct gen6_pipeline_session *session)
{
/* STATE_BASE_ADDRESS */
- if (session->state_bo_changed || session->instruction_bo_changed ||
+ if (session->state_bo_changed || session->kernel_bo_changed ||
session->batch_bo_changed) {
if (p->dev->gen == ILO_GEN(6))
gen6_wa_pipe_control_post_sync(p, false);
p->gen6_STATE_BASE_ADDRESS(p->dev,
- NULL, p->cp->bo, p->cp->bo, NULL, ilo->shader_cache->bo,
+ NULL, p->cp->bo, p->cp->bo, NULL, ilo->hw3d->kernel.bo,
0, 0, 0, 0, p->cp);
/*
@@ -457,7 +458,8 @@ gen6_pipeline_vs(struct ilo_3d_pipeline *p,
const struct ilo_context *ilo,
struct gen6_pipeline_session *session)
{
- const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(VERTEX_SAMPLERS));
+ const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(VERTEX_SAMPLERS) ||
+ session->kernel_bo_changed);
const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
/*
@@ -497,7 +499,8 @@ gen6_pipeline_gs(struct ilo_3d_pipeline *p,
p->gen6_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
/* 3DSTATE_GS */
- if (DIRTY(GS) || DIRTY(VS) || session->prim_changed) {
+ if (DIRTY(GS) || DIRTY(VS) ||
+ session->prim_changed || session->kernel_bo_changed) {
const struct ilo_shader *gs = (ilo->gs)? ilo->gs->shader : NULL;
const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
const int num_vertices = u_vertices_per_prim(session->reduced_prim);
@@ -666,7 +669,7 @@ gen6_pipeline_wm(struct ilo_3d_pipeline *p,
/* 3DSTATE_WM */
if (DIRTY(FS) || DIRTY(FRAGMENT_SAMPLERS) ||
DIRTY(BLEND) || DIRTY(DEPTH_STENCIL_ALPHA) ||
- DIRTY(RASTERIZER)) {
+ DIRTY(RASTERIZER) || session->kernel_bo_changed) {
const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
const bool dual_blend = ilo->blend->dual_blend;
@@ -1315,7 +1318,7 @@ gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
/* these should be enough to make everything uploaded */
session->batch_bo_changed = true;
session->state_bo_changed = true;
- session->instruction_bo_changed = true;
+ session->kernel_bo_changed = true;
session->prim_changed = true;
}
else {
@@ -1333,7 +1336,7 @@ gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
session->state_bo_changed =
(p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
- session->instruction_bo_changed =
+ session->kernel_bo_changed =
(p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
}
diff --git a/src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.h b/src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.h
index 18d9309953d..6ba1f2a87b9 100644
--- a/src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.h
+++ b/src/gallium/drivers/ilo/ilo_3d_pipeline_gen6.h
@@ -44,7 +44,7 @@ struct gen6_pipeline_session {
bool hw_ctx_changed;
bool batch_bo_changed;
bool state_bo_changed;
- bool instruction_bo_changed;
+ bool kernel_bo_changed;
bool prim_changed;
void (*emit_draw_states)(struct ilo_3d_pipeline *p,
diff --git a/src/gallium/drivers/ilo/ilo_3d_pipeline_gen7.c b/src/gallium/drivers/ilo/ilo_3d_pipeline_gen7.c
index 878f6d0434b..1c7260f7fd1 100644
--- a/src/gallium/drivers/ilo/ilo_3d_pipeline_gen7.c
+++ b/src/gallium/drivers/ilo/ilo_3d_pipeline_gen7.c
@@ -492,7 +492,7 @@ gen7_pipeline_wm(struct ilo_3d_pipeline *p,
/* 3DSTATE_PS */
if (DIRTY(FS) || DIRTY(FRAGMENT_SAMPLERS) ||
- DIRTY(BLEND)) {
+ DIRTY(BLEND) || session->kernel_bo_changed) {
const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
const bool dual_blend = ilo->blend->dual_blend;
diff --git a/src/gallium/drivers/ilo/ilo_context.c b/src/gallium/drivers/ilo/ilo_context.c
index 394081092ec..d3e900661ee 100644
--- a/src/gallium/drivers/ilo/ilo_context.c
+++ b/src/gallium/drivers/ilo/ilo_context.c
@@ -124,7 +124,7 @@ ilo_context_create(struct pipe_screen *screen, void *priv)
ilo->dev = &is->dev;
ilo->cp = ilo_cp_create(ilo->winsys, is->dev.has_llc);
- ilo->shader_cache = ilo_shader_cache_create(ilo->winsys);
+ ilo->shader_cache = ilo_shader_cache_create();
if (ilo->cp)
ilo->hw3d = ilo_3d_create(ilo->cp, ilo->dev);
diff --git a/src/gallium/drivers/ilo/ilo_shader.c b/src/gallium/drivers/ilo/ilo_shader.c
index 6d534d5a27e..90682d291a2 100644
--- a/src/gallium/drivers/ilo/ilo_shader.c
+++ b/src/gallium/drivers/ilo/ilo_shader.c
@@ -30,6 +30,204 @@
#include "ilo_shader.h"
+struct ilo_shader_cache {
+ struct list_head shaders;
+ struct list_head changed;
+};
+
+/**
+ * Create a shader cache. A shader cache can manage shaders and upload them
+ * to a bo as a whole.
+ */
+struct ilo_shader_cache *
+ilo_shader_cache_create(void)
+{
+ struct ilo_shader_cache *shc;
+
+ shc = CALLOC_STRUCT(ilo_shader_cache);
+ if (!shc)
+ return NULL;
+
+ list_inithead(&shc->shaders);
+ list_inithead(&shc->changed);
+
+ return shc;
+}
+
+/**
+ * Destroy a shader cache.
+ */
+void
+ilo_shader_cache_destroy(struct ilo_shader_cache *shc)
+{
+ FREE(shc);
+}
+
+/**
+ * Add a shader to the cache.
+ */
+void
+ilo_shader_cache_add(struct ilo_shader_cache *shc,
+ struct ilo_shader_state *shader)
+{
+ struct ilo_shader *sh;
+
+ shader->cache = shc;
+ LIST_FOR_EACH_ENTRY(sh, &shader->variants, list)
+ sh->cache_seqno = false;
+
+ list_add(&shader->list, &shc->changed);
+}
+
+/**
+ * Remove a shader from the cache.
+ */
+void
+ilo_shader_cache_remove(struct ilo_shader_cache *shc,
+ struct ilo_shader_state *shader)
+{
+ list_del(&shader->list);
+ shader->cache = NULL;
+}
+
+/**
+ * Notify the cache that a managed shader has changed.
+ */
+static void
+ilo_shader_cache_notify_change(struct ilo_shader_cache *shc,
+ struct ilo_shader_state *shader)
+{
+ if (shader->cache == shc) {
+ list_del(&shader->list);
+ list_add(&shader->list, &shc->changed);
+ }
+}
+
+/**
+ * Upload a managed shader to the bo.
+ */
+static int
+ilo_shader_cache_upload_shader(struct ilo_shader_cache *shc,
+ struct ilo_shader_state *shader,
+ struct intel_bo *bo, unsigned offset,
+ bool incremental)
+{
+ const unsigned base = offset;
+ struct ilo_shader *sh;
+
+ LIST_FOR_EACH_ENTRY(sh, &shader->variants, list) {
+ int err;
+
+ if (incremental && sh->cache_seqno)
+ continue;
+
+ /* kernels must be aligned to 64-byte */
+ offset = align(offset, 64);
+
+ err = intel_bo_pwrite(bo, offset, sh->kernel_size, sh->kernel);
+ if (unlikely(err))
+ return -1;
+
+ sh->cache_seqno = true;
+ sh->cache_offset = offset;
+
+ offset += sh->kernel_size;
+ }
+
+ return (int) (offset - base);
+}
+
+/**
+ * Similar to ilo_shader_cache_upload(), except no upload happens.
+ */
+static int
+ilo_shader_cache_get_upload_size(struct ilo_shader_cache *shc,
+ unsigned offset,
+ bool incremental)
+{
+ const unsigned base = offset;
+ struct ilo_shader_state *shader;
+
+ if (!incremental) {
+ LIST_FOR_EACH_ENTRY(shader, &shc->shaders, list) {
+ struct ilo_shader *sh;
+
+ /* see ilo_shader_cache_upload_shader() */
+ LIST_FOR_EACH_ENTRY(sh, &shader->variants, list) {
+ if (!incremental || !sh->cache_seqno)
+ offset = align(offset, 64) + sh->kernel_size;
+ }
+ }
+ }
+
+ LIST_FOR_EACH_ENTRY(shader, &shc->changed, list) {
+ struct ilo_shader *sh;
+
+ /* see ilo_shader_cache_upload_shader() */
+ LIST_FOR_EACH_ENTRY(sh, &shader->variants, list) {
+ if (!incremental || !sh->cache_seqno)
+ offset = align(offset, 64) + sh->kernel_size;
+ }
+ }
+
+ /*
+ * From the Sandy Bridge PRM, volume 4 part 2, page 112:
+ *
+ * "Due to prefetch of the instruction stream, the EUs may attempt to
+ * access up to 8 instructions (128 bytes) beyond the end of the
+ * kernel program - possibly into the next memory page. Although
+ * these instructions will not be executed, software must account for
+ * the prefetch in order to avoid invalid page access faults."
+ */
+ if (offset > base)
+ offset += 128;
+
+ return (int) (offset - base);
+}
+
+/**
+ * Upload managed shaders to the bo. When incremental is true, only shaders
+ * that are changed or added after the last upload are uploaded.
+ */
+int
+ilo_shader_cache_upload(struct ilo_shader_cache *shc,
+ struct intel_bo *bo, unsigned offset,
+ bool incremental)
+{
+ struct ilo_shader_state *shader, *next;
+ int size = 0, s;
+
+ if (!bo)
+ return ilo_shader_cache_get_upload_size(shc, offset, incremental);
+
+ if (!incremental) {
+ LIST_FOR_EACH_ENTRY(shader, &shc->shaders, list) {
+ s = ilo_shader_cache_upload_shader(shc, shader,
+ bo, offset, incremental);
+ if (unlikely(s < 0))
+ return s;
+
+ size += s;
+ offset += s;
+ }
+ }
+
+ LIST_FOR_EACH_ENTRY_SAFE(shader, next, &shc->changed, list) {
+ s = ilo_shader_cache_upload_shader(shc, shader,
+ bo, offset, incremental);
+ if (unlikely(s < 0))
+ return s;
+
+ size += s;
+ offset += s;
+
+ list_del(&shader->list);
+ list_add(&shader->list, &shc->shaders);
+ }
+
+ return size;
+}
+
/**
* Initialize a shader variant.
*/
@@ -366,6 +564,9 @@ ilo_shader_state_add_shader(struct ilo_shader_state *state,
list_add(&sh->list, &state->variants);
state->num_variants++;
state->total_size += sh->kernel_size;
+
+ if (state->cache)
+ ilo_shader_cache_notify_change(state->cache, state);
}
/**
@@ -490,119 +691,3 @@ ilo_shader_state_use_variant(struct ilo_shader_state *state,
return true;
}
-
-/**
- * Reset the shader cache.
- */
-static void
-ilo_shader_cache_reset(struct ilo_shader_cache *shc)
-{
- if (shc->bo)
- intel_bo_unreference(shc->bo);
-
- shc->bo = intel_winsys_alloc_buffer(shc->winsys,
- "shader cache", shc->size, 0);
- shc->busy = false;
- shc->cur = 0;
- shc->seqno++;
- if (!shc->seqno)
- shc->seqno = 1;
-}
-
-/**
- * Create a shader cache. A shader cache is a bo holding all compiled shaders.
- * When the bo is full, a larger bo is allocated and all cached shaders are
- * invalidated. This is how outdated shaders get dropped. Active shaders
- * will be added to the new bo when used.
- */
-struct ilo_shader_cache *
-ilo_shader_cache_create(struct intel_winsys *winsys)
-{
- struct ilo_shader_cache *shc;
-
- shc = CALLOC_STRUCT(ilo_shader_cache);
- if (!shc)
- return NULL;
-
- shc->winsys = winsys;
- /* initial cache size */
- shc->size = 4096;
-
- ilo_shader_cache_reset(shc);
-
- return shc;
-}
-
-/**
- * Destroy a shader cache.
- */
-void
-ilo_shader_cache_destroy(struct ilo_shader_cache *shc)
-{
- if (shc->bo)
- intel_bo_unreference(shc->bo);
-
- FREE(shc);
-}
-
-/**
- * Add shaders to the cache. This may invalidate all other shaders in the
- * cache.
- */
-void
-ilo_shader_cache_set(struct ilo_shader_cache *shc,
- struct ilo_shader **shaders,
- int num_shaders)
-{
- int new_cur, i;
-
- /* calculate the space needed */
- new_cur = shc->cur;
- for (i = 0; i < num_shaders; i++) {
- if (shaders[i]->cache_seqno != shc->seqno)
- new_cur = align(new_cur, 64) + shaders[i]->kernel_size;
- }
-
- /* all shaders are already in the cache */
- if (new_cur == shc->cur)
- return;
-
- /*
- * From the Sandy Bridge PRM, volume 4 part 2, page 112:
- *
- * "Due to prefetch of the instruction stream, the EUs may attempt to
- * access up to 8 instructions (128 bytes) beyond the end of the kernel
- * program - possibly into the next memory page. Although these
- * instructions will not be executed, software must account for the
- * prefetch in order to avoid invalid page access faults."
- */
- new_cur += 128;
-
- /*
- * we should be able to append data without being blocked even the bo
- * is busy...
- */
-
- /* reallocate when the cache is full or busy */
- if (new_cur > shc->size || shc->busy) {
- while (new_cur > shc->size)
- shc->size <<= 1;
-
- ilo_shader_cache_reset(shc);
- }
-
- /* upload now */
- for (i = 0; i < num_shaders; i++) {
- if (shaders[i]->cache_seqno != shc->seqno) {
- /* kernels must be aligned to 64-byte */
- shc->cur = align(shc->cur, 64);
- intel_bo_pwrite(shc->bo, shc->cur,
- shaders[i]->kernel_size, shaders[i]->kernel);
-
- shaders[i]->cache_seqno = shc->seqno;
- shaders[i]->cache_offset = shc->cur;
-
- shc->cur += shaders[i]->kernel_size;
- }
- }
-}
diff --git a/src/gallium/drivers/ilo/ilo_shader.h b/src/gallium/drivers/ilo/ilo_shader.h
index 1ea0b4a0d1d..a19f85e4b35 100644
--- a/src/gallium/drivers/ilo/ilo_shader.h
+++ b/src/gallium/drivers/ilo/ilo_shader.h
@@ -31,6 +31,8 @@
#include "ilo_common.h"
#include "ilo_context.h"
+struct ilo_shader_cache;
+
/* XXX The interface needs to be reworked */
/**
@@ -117,6 +119,7 @@ struct ilo_shader {
struct list_head list;
+ /* managed by shader cache */
uint32_t cache_seqno;
uint32_t cache_offset;
};
@@ -160,17 +163,30 @@ struct ilo_shader_state {
int num_variants, total_size;
struct ilo_shader *shader;
+
+ /* managed by shader cache */
+ struct ilo_shader_cache *cache;
+ struct list_head list;
};
-struct ilo_shader_cache {
- struct intel_winsys *winsys;
- struct intel_bo *bo;
- int cur, size;
- bool busy;
+struct ilo_shader_cache *
+ilo_shader_cache_create(void);
- /* starting from 1, incremented whenever a new bo is allocated */
- uint32_t seqno;
-};
+void
+ilo_shader_cache_destroy(struct ilo_shader_cache *shc);
+
+void
+ilo_shader_cache_add(struct ilo_shader_cache *shc,
+ struct ilo_shader_state *shader);
+
+void
+ilo_shader_cache_remove(struct ilo_shader_cache *shc,
+ struct ilo_shader_state *shader);
+
+int
+ilo_shader_cache_upload(struct ilo_shader_cache *shc,
+ struct intel_bo *bo, unsigned offset,
+ bool incremental);
void
ilo_shader_variant_init(struct ilo_shader_variant *variant,
@@ -192,24 +208,6 @@ bool
ilo_shader_state_use_variant(struct ilo_shader_state *state,
const struct ilo_shader_variant *variant);
-struct ilo_shader_cache *
-ilo_shader_cache_create(struct intel_winsys *winsys);
-
-void
-ilo_shader_cache_destroy(struct ilo_shader_cache *shc);
-
-void
-ilo_shader_cache_set(struct ilo_shader_cache *shc,
- struct ilo_shader **shaders,
- int num_shaders);
-
-static inline void
-ilo_shader_cache_mark_busy(struct ilo_shader_cache *shc)
-{
- if (shc->cur)
- shc->busy = true;
-}
-
struct ilo_shader *
ilo_shader_compile_vs(const struct ilo_shader_state *state,
const struct ilo_shader_variant *variant);
diff --git a/src/gallium/drivers/ilo/ilo_state.c b/src/gallium/drivers/ilo/ilo_state.c
index ba0efdd09ce..b284e7c112b 100644
--- a/src/gallium/drivers/ilo/ilo_state.c
+++ b/src/gallium/drivers/ilo/ilo_state.c
@@ -46,14 +46,12 @@ finalize_shader_states(struct ilo_context *ilo)
const struct {
struct ilo_shader_state *state;
struct ilo_shader *prev_shader;
- uint32_t prev_cache_seqno;
uint32_t dirty;
uint32_t deps;
} sh[PIPE_SHADER_TYPES] = {
[PIPE_SHADER_VERTEX] = {
.state = ilo->vs,
.prev_shader = (ilo->vs) ? ilo->vs->shader : NULL,
- .prev_cache_seqno = (ilo->vs) ? ilo->vs->shader->cache_seqno : 0,
.dirty = ILO_DIRTY_VS,
.deps = ILO_DIRTY_VERTEX_SAMPLER_VIEWS |
ILO_DIRTY_RASTERIZER,
@@ -61,7 +59,6 @@ finalize_shader_states(struct ilo_context *ilo)
[PIPE_SHADER_FRAGMENT] = {
.state = ilo->fs,
.prev_shader = (ilo->fs) ? ilo->fs->shader : NULL,
- .prev_cache_seqno = (ilo->fs) ? ilo->fs->shader->cache_seqno : 0,
.dirty = ILO_DIRTY_FS,
.deps = ILO_DIRTY_FRAGMENT_SAMPLER_VIEWS |
ILO_DIRTY_RASTERIZER |
@@ -70,7 +67,6 @@ finalize_shader_states(struct ilo_context *ilo)
[PIPE_SHADER_GEOMETRY] = {
.state = ilo->gs,
.prev_shader = (ilo->gs) ? ilo->gs->shader : NULL,
- .prev_cache_seqno = (ilo->gs) ? ilo->gs->shader->cache_seqno : 0,
.dirty = ILO_DIRTY_GS,
.deps = ILO_DIRTY_GEOMETRY_SAMPLER_VIEWS |
ILO_DIRTY_VS |
@@ -79,13 +75,11 @@ finalize_shader_states(struct ilo_context *ilo)
[PIPE_SHADER_COMPUTE] = {
.state = NULL,
.prev_shader = NULL,
- .prev_cache_seqno = 0,
.dirty = 0,
.deps = 0,
},
};
- struct ilo_shader *shaders[PIPE_SHADER_TYPES];
- int num_shaders = 0, i;
+ int i;
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
/* no state bound */
@@ -99,25 +93,15 @@ finalize_shader_states(struct ilo_context *ilo)
ilo_shader_variant_init(&variant, &sh[i].state->info, ilo);
ilo_shader_state_use_variant(sh[i].state, &variant);
}
-
- shaders[num_shaders++] = sh[i].state->shader;
}
- ilo_shader_cache_set(ilo->shader_cache, shaders, num_shaders);
-
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
/* no state bound */
if (!sh[i].state)
continue;
- /*
- * mark the shader state dirty if
- *
- * - a new variant is selected, or
- * - the kernel is uploaded to a different bo
- */
- if (sh[i].state->shader != sh[i].prev_shader ||
- sh[i].state->shader->cache_seqno != sh[i].prev_cache_seqno)
+ /* mark the shader state dirty if new variant is selected */
+ if (sh[i].state->shader != sh[i].prev_shader)
ilo->dirty |= sh[i].dirty;
}
}
@@ -367,7 +351,14 @@ ilo_create_fs_state(struct pipe_context *pipe,
const struct pipe_shader_state *state)
{
struct ilo_context *ilo = ilo_context(pipe);
- return ilo_shader_state_create(ilo, PIPE_SHADER_FRAGMENT, state);
+ struct ilo_shader_state *shader;
+
+ shader = ilo_shader_state_create(ilo, PIPE_SHADER_FRAGMENT, state);
+ assert(shader);
+
+ ilo_shader_cache_add(ilo->shader_cache, shader);
+
+ return shader;
}
static void
@@ -383,7 +374,10 @@ ilo_bind_fs_state(struct pipe_context *pipe, void *state)
static void
ilo_delete_fs_state(struct pipe_context *pipe, void *state)
{
+ struct ilo_context *ilo = ilo_context(pipe);
struct ilo_shader_state *fs = (struct ilo_shader_state *) state;
+
+ ilo_shader_cache_remove(ilo->shader_cache, fs);
ilo_shader_state_destroy(fs);
}
@@ -392,7 +386,14 @@ ilo_create_vs_state(struct pipe_context *pipe,
const struct pipe_shader_state *state)
{
struct ilo_context *ilo = ilo_context(pipe);
- return ilo_shader_state_create(ilo, PIPE_SHADER_VERTEX, state);
+ struct ilo_shader_state *shader;
+
+ shader = ilo_shader_state_create(ilo, PIPE_SHADER_VERTEX, state);
+ assert(shader);
+
+ ilo_shader_cache_add(ilo->shader_cache, shader);
+
+ return shader;
}
static void
@@ -408,7 +409,10 @@ ilo_bind_vs_state(struct pipe_context *pipe, void *state)
static void
ilo_delete_vs_state(struct pipe_context *pipe, void *state)
{
+ struct ilo_context *ilo = ilo_context(pipe);
struct ilo_shader_state *vs = (struct ilo_shader_state *) state;
+
+ ilo_shader_cache_remove(ilo->shader_cache, vs);
ilo_shader_state_destroy(vs);
}
@@ -417,7 +421,14 @@ ilo_create_gs_state(struct pipe_context *pipe,
const struct pipe_shader_state *state)
{
struct ilo_context *ilo = ilo_context(pipe);
- return ilo_shader_state_create(ilo, PIPE_SHADER_GEOMETRY, state);
+ struct ilo_shader_state *shader;
+
+ shader = ilo_shader_state_create(ilo, PIPE_SHADER_GEOMETRY, state);
+ assert(shader);
+
+ ilo_shader_cache_add(ilo->shader_cache, shader);
+
+ return shader;
}
static void
@@ -433,7 +444,10 @@ ilo_bind_gs_state(struct pipe_context *pipe, void *state)
static void
ilo_delete_gs_state(struct pipe_context *pipe, void *state)
{
+ struct ilo_context *ilo = ilo_context(pipe);
struct ilo_shader_state *gs = (struct ilo_shader_state *) state;
+
+ ilo_shader_cache_remove(ilo->shader_cache, gs);
ilo_shader_state_destroy(gs);
}
@@ -975,7 +989,14 @@ ilo_create_compute_state(struct pipe_context *pipe,
const struct pipe_compute_state *state)
{
struct ilo_context *ilo = ilo_context(pipe);
- return ilo_shader_state_create(ilo, PIPE_SHADER_COMPUTE, state);
+ struct ilo_shader_state *shader;
+
+ shader = ilo_shader_state_create(ilo, PIPE_SHADER_COMPUTE, state);
+ assert(shader);
+
+ ilo_shader_cache_add(ilo->shader_cache, shader);
+
+ return shader;
}
static void
@@ -991,7 +1012,10 @@ ilo_bind_compute_state(struct pipe_context *pipe, void *state)
static void
ilo_delete_compute_state(struct pipe_context *pipe, void *state)
{
+ struct ilo_context *ilo = ilo_context(pipe);
struct ilo_shader_state *cs = (struct ilo_shader_state *) state;
+
+ ilo_shader_cache_remove(ilo->shader_cache, cs);
ilo_shader_state_destroy(cs);
}