aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2018-03-26 16:14:29 -0700
committerKenneth Graunke <[email protected]>2019-02-21 10:26:05 -0800
commitf6be3d4f3aeaa2dfca34ff32610929561adb16a1 (patch)
tree2075809bfb56d9510d26ede5ab10b72cd380eddf /src
parent902a122404ddb7664d2ee9320f1de9fc58544c1c (diff)
iris: bufmgr updates.
Drop BO_ALLOC_BUSY (best not to hand people a loaded gun...) Drop vestiges of alignment
Diffstat (limited to 'src')
-rw-r--r--src/gallium/drivers/iris/iris_batch.c5
-rw-r--r--src/gallium/drivers/iris/iris_bufmgr.c55
-rw-r--r--src/gallium/drivers/iris/iris_bufmgr.h15
3 files changed, 20 insertions, 55 deletions
diff --git a/src/gallium/drivers/iris/iris_batch.c b/src/gallium/drivers/iris/iris_batch.c
index 0734a2a3d41..7a5c3df3a2e 100644
--- a/src/gallium/drivers/iris/iris_batch.c
+++ b/src/gallium/drivers/iris/iris_batch.c
@@ -95,7 +95,7 @@ create_batch_buffer(struct iris_bufmgr *bufmgr,
struct iris_batch_buffer *buf,
const char *name, unsigned size)
{
- buf->bo = iris_bo_alloc(bufmgr, name, size, 4096);
+ buf->bo = iris_bo_alloc(bufmgr, name, size);
buf->bo->kflags |= EXEC_OBJECT_CAPTURE;
buf->map = iris_bo_map(NULL, buf->bo, MAP_READ | MAP_WRITE);
buf->map_next = buf->map;
@@ -164,7 +164,6 @@ add_exec_bo(struct iris_batch *batch, struct iris_bo *bo)
batch->validation_list[batch->exec_count] =
(struct drm_i915_gem_exec_object2) {
.handle = bo->gem_handle,
- .alignment = bo->align,
.offset = bo->gtt_offset,
.flags = bo->kflags,
};
@@ -300,7 +299,7 @@ grow_buffer(struct iris_batch *batch,
const unsigned existing_bytes = buffer_bytes_used(buf);
struct iris_bo *new_bo =
- iris_bo_alloc(bufmgr, bo->name, new_size, bo->align);
+ iris_bo_alloc(bufmgr, bo->name, new_size);
buf->map = iris_bo_map(NULL, new_bo, MAP_READ | MAP_WRITE);
buf->map_next = buf->map + existing_bytes;
diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c
index ce1066beaf0..9bc101ee5ab 100644
--- a/src/gallium/drivers/iris/iris_bufmgr.c
+++ b/src/gallium/drivers/iris/iris_bufmgr.c
@@ -239,7 +239,7 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
uint64_t size,
unsigned flags,
uint32_t tiling_mode,
- uint32_t stride, uint64_t alignment)
+ uint32_t stride)
{
struct iris_bo *bo;
unsigned int page_size = getpagesize();
@@ -247,20 +247,11 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
struct bo_cache_bucket *bucket;
bool alloc_from_cache;
uint64_t bo_size;
- bool busy = false;
bool zeroed = false;
- if (flags & BO_ALLOC_BUSY)
- busy = true;
-
if (flags & BO_ALLOC_ZEROED)
zeroed = true;
- /* BUSY does doesn't really jive with ZEROED as we have to wait for it to
- * be idle before we can memset. Just disallow that combination.
- */
- assert(!(busy && zeroed));
-
/* Round the allocated size up to a power of two number of pages. */
bucket = bucket_for_size(bufmgr, size);
@@ -280,31 +271,13 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
retry:
alloc_from_cache = false;
if (bucket != NULL && !list_empty(&bucket->head)) {
- if (busy && !zeroed) {
- /* Allocate new render-target BOs from the tail (MRU)
- * of the list, as it will likely be hot in the GPU
- * cache and in the aperture for us. If the caller
- * asked us to zero the buffer, we don't want this
- * because we are going to mmap it.
- */
- bo = LIST_ENTRY(struct iris_bo, bucket->head.prev, head);
- list_del(&bo->head);
+ /* If the last BO in the cache is idle, then reuse it. Otherwise,
+ * allocate a fresh buffer to avoid stalling.
+ */
+ bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
+ if (!iris_bo_busy(bo)) {
alloc_from_cache = true;
- bo->align = alignment;
- } else {
- assert(alignment == 0);
- /* For non-render-target BOs (where we're probably
- * going to map it first thing in order to fill it
- * with data), check if the last BO in the cache is
- * unbusy, and only reuse in that case. Otherwise,
- * allocating a new buffer is probably faster than
- * waiting for the GPU to finish.
- */
- bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
- if (!iris_bo_busy(bo)) {
- alloc_from_cache = true;
- list_del(&bo->head);
- }
+ list_del(&bo->head);
}
if (alloc_from_cache) {
@@ -352,7 +325,6 @@ retry:
bo->gem_handle = create.handle;
bo->bufmgr = bufmgr;
- bo->align = alignment;
bo->tiling_mode = I915_TILING_NONE;
bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
@@ -397,17 +369,18 @@ err:
struct iris_bo *
iris_bo_alloc(struct iris_bufmgr *bufmgr,
- const char *name, uint64_t size, uint64_t alignment)
+ const char *name,
+ uint64_t size)
{
- return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
+ return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0);
}
struct iris_bo *
iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
- uint64_t size, uint32_t tiling_mode, uint32_t pitch,
- unsigned flags)
+ uint64_t size, uint32_t tiling_mode, uint32_t pitch,
+ unsigned flags)
{
- return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch, 0);
+ return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch);
}
/**
@@ -418,7 +391,7 @@ iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
*/
struct iris_bo *
iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
- const char *name, unsigned int handle)
+ const char *name, unsigned int handle)
{
struct iris_bo *bo;
diff --git a/src/gallium/drivers/iris/iris_bufmgr.h b/src/gallium/drivers/iris/iris_bufmgr.h
index 035c908f3d9..fa4df2a53df 100644
--- a/src/gallium/drivers/iris/iris_bufmgr.h
+++ b/src/gallium/drivers/iris/iris_bufmgr.h
@@ -44,13 +44,6 @@ struct iris_bo {
*/
uint64_t size;
- /**
- * Alignment requirement for object
- *
- * Used for GTT mapping & pinning the object.
- */
- uint64_t align;
-
/** Buffer manager context associated with this buffer object */
struct iris_bufmgr *bufmgr;
@@ -152,8 +145,7 @@ struct iris_bo {
bool cache_coherent;
};
-#define BO_ALLOC_BUSY (1<<0)
-#define BO_ALLOC_ZEROED (1<<1)
+#define BO_ALLOC_ZEROED (1<<0)
/**
* Allocate a buffer object.
@@ -162,8 +154,9 @@ struct iris_bo {
* address space or graphics device aperture. They must be mapped
* using iris_bo_map() to be used by the CPU.
*/
-struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr, const char *name,
- uint64_t size, uint64_t alignment);
+struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr,
+ const char *name,
+ uint64_t size);
/**
* Allocate a tiled buffer object.