summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2014-08-20 23:53:40 +0200
committerMarek Olšák <[email protected]>2014-09-01 20:17:48 +0200
commitb419c651fbbf3660d7b53623dfa2e5a4c9bd3b98 (patch)
treec20732bea135011f51aafda7ed0dec1db630d934 /src/gallium/auxiliary
parentbba7d29a86862df89f43b682be674c33326fa248 (diff)
gallium/pb_bufmgr_cache: limit the size of cache
This should make a machine which is running piglit more responsive at times. e.g. streaming-texture-leak can easily eat 600 MB because of how fast it creates new textures.
Diffstat (limited to 'src/gallium/auxiliary')
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr.h3
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c20
2 files changed, 20 insertions, 3 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
index d5b0ee2ac96..147ce39041c 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
@@ -163,7 +163,8 @@ struct pb_manager *
pb_cache_manager_create(struct pb_manager *provider,
unsigned usecs,
float size_factor,
- unsigned bypass_usage);
+ unsigned bypass_usage,
+ uint64_t maximum_cache_size);
struct pb_fence_ops;
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
index 32a88754042..5eb8d06a091 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
@@ -84,6 +84,7 @@ struct pb_cache_manager
pb_size numDelayed;
float size_factor;
unsigned bypass_usage;
+ uint64_t cache_size, max_cache_size;
};
@@ -114,6 +115,7 @@ _pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
LIST_DEL(&buf->head);
assert(mgr->numDelayed);
--mgr->numDelayed;
+ mgr->cache_size -= buf->base.size;
assert(!pipe_is_referenced(&buf->base.reference));
pb_reference(&buf->buffer, NULL);
FREE(buf);
@@ -158,11 +160,20 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf)
assert(!pipe_is_referenced(&buf->base.reference));
_pb_cache_buffer_list_check_free(mgr);
-
+
+ /* Directly release any buffer that exceeds the limit. */
+ if (mgr->cache_size + buf->base.size > mgr->max_cache_size) {
+ pb_reference(&buf->buffer, NULL);
+ FREE(buf);
+ pipe_mutex_unlock(mgr->mutex);
+ return;
+ }
+
buf->start = os_time_get();
buf->end = buf->start + mgr->usecs;
LIST_ADDTAIL(&buf->head, &mgr->delayed);
++mgr->numDelayed;
+ mgr->cache_size += buf->base.size;
pipe_mutex_unlock(mgr->mutex);
}
@@ -314,6 +325,7 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
}
if(buf) {
+ mgr->cache_size -= buf->base.size;
LIST_DEL(&buf->head);
--mgr->numDelayed;
pipe_mutex_unlock(mgr->mutex);
@@ -400,12 +412,15 @@ pb_cache_manager_destroy(struct pb_manager *mgr)
* the requested size as cache hits.
* @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
* buffer allocation requests are redirected to the provider.
+ * @param maximum_cache_size Maximum size of all unused buffers the cache can
+ * hold.
*/
struct pb_manager *
pb_cache_manager_create(struct pb_manager *provider,
unsigned usecs,
float size_factor,
- unsigned bypass_usage)
+ unsigned bypass_usage,
+ uint64_t maximum_cache_size)
{
struct pb_cache_manager *mgr;
@@ -425,6 +440,7 @@ pb_cache_manager_create(struct pb_manager *provider,
mgr->bypass_usage = bypass_usage;
LIST_INITHEAD(&mgr->delayed);
mgr->numDelayed = 0;
+ mgr->max_cache_size = maximum_cache_size;
pipe_mutex_init(mgr->mutex);
return &mgr->base;