diff options
author | Eric Anholt <[email protected]> | 2009-02-12 03:54:58 -0800 |
---|---|---|
committer | Eric Anholt <[email protected]> | 2009-09-30 11:27:27 -0700 |
commit | 49fbdd18ed738feaf73b7faba4d3577cd9cc3e59 (patch) | |
tree | 65a5cca320d3f4ad3baadefe975a3628982587e4 | |
parent | b77469871a57240b33b61f12cde0da078470237b (diff) |
i965: Fix massive memory allocation for streaming texture usage.
Once we've freed a miptree, we won't see any more state cache requests
that would hit the things that pointed at it until we've let the miptree
get released back into the BO cache to be reused. By leaving those
surface state and binding table pointers that pointed at it around, we
would end up with up to (500 * texture size) in memory uselessly consumed
by the state cache.
Bug #20057
Bug #23530
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_state.h | 1 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_state_cache.c | 49 | ||||
-rw-r--r-- | src/mesa/drivers/dri/intel/intel_mipmap_tree.c | 16 |
3 files changed, 66 insertions, 0 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_state.h b/src/mesa/drivers/dri/i965/brw_state.h index 5335eac8951..d639656b9d4 100644 --- a/src/mesa/drivers/dri/i965/brw_state.h +++ b/src/mesa/drivers/dri/i965/brw_state.h @@ -151,6 +151,7 @@ void brw_state_cache_check_size( struct brw_context *brw ); void brw_init_caches( struct brw_context *brw ); void brw_destroy_caches( struct brw_context *brw ); +void brw_state_cache_bo_delete(struct brw_cache *cache, dri_bo *bo); /*********************************************************************** * brw_state_batch.c diff --git a/src/mesa/drivers/dri/i965/brw_state_cache.c b/src/mesa/drivers/dri/i965/brw_state_cache.c index e40d7a04164..f8e46aacf71 100644 --- a/src/mesa/drivers/dri/i965/brw_state_cache.c +++ b/src/mesa/drivers/dri/i965/brw_state_cache.c @@ -517,6 +517,55 @@ brw_clear_cache(struct brw_context *brw, struct brw_cache *cache) brw->state.dirty.cache |= ~0; } +/* Clear all entries from the cache that point to the given bo. + * + * This lets us release memory for reuse earlier for known-dead buffers, + * at the cost of walking the entire hash table. + */ +void +brw_state_cache_bo_delete(struct brw_cache *cache, dri_bo *bo) +{ + struct brw_cache_item **prev; + GLuint i; + + if (INTEL_DEBUG & DEBUG_STATE) + _mesa_printf("%s\n", __FUNCTION__); + + for (i = 0; i < cache->size; i++) { + for (prev = &cache->items[i]; *prev;) { + struct brw_cache_item *c = *prev; + int j; + + for (j = 0; j < c->nr_reloc_bufs; j++) { + if (c->reloc_bufs[j] == bo) + break; + } + + if (j != c->nr_reloc_bufs) { + + *prev = c->next; + + for (j = 0; j < c->nr_reloc_bufs; j++) + dri_bo_unreference(c->reloc_bufs[j]); + dri_bo_unreference(c->bo); + free((void *)c->key); + free(c); + cache->n_items--; + + /* Delete up the tree. Notably we're trying to get from + * a request to delete the surface, to deleting the surface state + * object, to deleting the binding table. We're slack and restart + * the deletion process when we do this because the other delete + * may kill our *prev. + */ + brw_state_cache_bo_delete(cache, c->bo); + prev = &cache->items[i]; + } else { + prev = &(*prev)->next; + } + } + } +} void brw_state_cache_check_size(struct brw_context *brw) diff --git a/src/mesa/drivers/dri/intel/intel_mipmap_tree.c b/src/mesa/drivers/dri/intel/intel_mipmap_tree.c index c985da5aa25..4f5101a3128 100644 --- a/src/mesa/drivers/dri/intel/intel_mipmap_tree.c +++ b/src/mesa/drivers/dri/intel/intel_mipmap_tree.c @@ -29,6 +29,9 @@ #include "intel_mipmap_tree.h" #include "intel_regions.h" #include "intel_chipset.h" +#ifndef I915 +#include "brw_state.h" +#endif #include "main/enums.h" #define FILE_DEBUG_FLAG DEBUG_MIPTREE @@ -269,6 +272,19 @@ intel_miptree_release(struct intel_context *intel, DBG("%s deleting %p\n", __FUNCTION__, *mt); +#ifndef I915 + /* Free up cached binding tables holding a reference on our buffer, to + * avoid excessive memory consumption. + * + * This isn't as aggressive as we could be, as we'd like to do + * it from any time we free the last ref on a region. But intel_region.c + * is context-agnostic. Perhaps our constant state cache should be, as + * well. + */ + brw_state_cache_bo_delete(&brw_context(&intel->ctx)->surface_cache, + (*mt)->region->buffer); +#endif + intel_region_release(&((*mt)->region)); for (i = 0; i < MAX_TEXTURE_LEVELS; i++) |