summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri
diff options
context:
space:
mode:
authorBrian Paul <[email protected]>2009-10-01 13:35:42 -0600
committerBrian Paul <[email protected]>2009-10-01 13:35:42 -0600
commit5d2413fca4c252ec5c7880fa7f983b5df3d762ba (patch)
tree28d9fb9fc5724b65579f06fc9258461f9c7fc47f /src/mesa/drivers/dri
parent15c57648cd87d344777e3aafa79a9be970b83979 (diff)
parent18883cdf2334511005973155fc517eb678dc0043 (diff)
Merge branch 'mesa_7_6_branch'
Diffstat (limited to 'src/mesa/drivers/dri')
-rw-r--r--src/mesa/drivers/dri/i965/brw_state.h1
-rw-r--r--src/mesa/drivers/dri/i965/brw_state_cache.c49
-rw-r--r--src/mesa/drivers/dri/intel/intel_mipmap_tree.c16
3 files changed, 66 insertions, 0 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_state.h b/src/mesa/drivers/dri/i965/brw_state.h
index 5335eac8951..d639656b9d4 100644
--- a/src/mesa/drivers/dri/i965/brw_state.h
+++ b/src/mesa/drivers/dri/i965/brw_state.h
@@ -151,6 +151,7 @@ void brw_state_cache_check_size( struct brw_context *brw );
void brw_init_caches( struct brw_context *brw );
void brw_destroy_caches( struct brw_context *brw );
+void brw_state_cache_bo_delete(struct brw_cache *cache, dri_bo *bo);
/***********************************************************************
* brw_state_batch.c
diff --git a/src/mesa/drivers/dri/i965/brw_state_cache.c b/src/mesa/drivers/dri/i965/brw_state_cache.c
index e40d7a04164..f8e46aacf71 100644
--- a/src/mesa/drivers/dri/i965/brw_state_cache.c
+++ b/src/mesa/drivers/dri/i965/brw_state_cache.c
@@ -517,6 +517,55 @@ brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
brw->state.dirty.cache |= ~0;
}
+/* Clear all entries from the cache that point to the given bo.
+ *
+ * This lets us release memory for reuse earlier for known-dead buffers,
+ * at the cost of walking the entire hash table.
+ */
+void
+brw_state_cache_bo_delete(struct brw_cache *cache, dri_bo *bo)
+{
+ struct brw_cache_item **prev;
+ GLuint i;
+
+ if (INTEL_DEBUG & DEBUG_STATE)
+ _mesa_printf("%s\n", __FUNCTION__);
+
+ for (i = 0; i < cache->size; i++) {
+ for (prev = &cache->items[i]; *prev;) {
+ struct brw_cache_item *c = *prev;
+ int j;
+
+ for (j = 0; j < c->nr_reloc_bufs; j++) {
+ if (c->reloc_bufs[j] == bo)
+ break;
+ }
+
+ if (j != c->nr_reloc_bufs) {
+
+ *prev = c->next;
+
+ for (j = 0; j < c->nr_reloc_bufs; j++)
+ dri_bo_unreference(c->reloc_bufs[j]);
+ dri_bo_unreference(c->bo);
+ free((void *)c->key);
+ free(c);
+ cache->n_items--;
+
+ /* Delete up the tree. Notably we're trying to get from
+ * a request to delete the surface, to deleting the surface state
+ * object, to deleting the binding table. We're slack and restart
+ * the deletion process when we do this because the other delete
+ * may kill our *prev.
+ */
+ brw_state_cache_bo_delete(cache, c->bo);
+ prev = &cache->items[i];
+ } else {
+ prev = &(*prev)->next;
+ }
+ }
+ }
+}
void
brw_state_cache_check_size(struct brw_context *brw)
diff --git a/src/mesa/drivers/dri/intel/intel_mipmap_tree.c b/src/mesa/drivers/dri/intel/intel_mipmap_tree.c
index c985da5aa25..4f5101a3128 100644
--- a/src/mesa/drivers/dri/intel/intel_mipmap_tree.c
+++ b/src/mesa/drivers/dri/intel/intel_mipmap_tree.c
@@ -29,6 +29,9 @@
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_chipset.h"
+#ifndef I915
+#include "brw_state.h"
+#endif
#include "main/enums.h"
#define FILE_DEBUG_FLAG DEBUG_MIPTREE
@@ -269,6 +272,19 @@ intel_miptree_release(struct intel_context *intel,
DBG("%s deleting %p\n", __FUNCTION__, *mt);
+#ifndef I915
+ /* Free up cached binding tables holding a reference on our buffer, to
+ * avoid excessive memory consumption.
+ *
+ * This isn't as aggressive as we could be, as we'd like to do
+ * it from any time we free the last ref on a region. But intel_region.c
+ * is context-agnostic. Perhaps our constant state cache should be, as
+ * well.
+ */
+ brw_state_cache_bo_delete(&brw_context(&intel->ctx)->surface_cache,
+ (*mt)->region->buffer);
+#endif
+
intel_region_release(&((*mt)->region));
for (i = 0; i < MAX_TEXTURE_LEVELS; i++)