aboutsummaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/panfrost/pan_bo.c
diff options
context:
space:
mode:
authorBoris Brezillon <[email protected]>2019-08-31 18:51:20 +0200
committerBoris Brezillon <[email protected]>2019-10-03 16:55:38 -0400
commit2225383af8b848a59e1a21335ee19b3845292671 (patch)
tree5d16da6df86f722bb4e9ba12d0a868895a45a3df /src/gallium/drivers/panfrost/pan_bo.c
parent22190bc27b93b8314db481dbf6002eeae6e39f62 (diff)
panfrost: Make sure the BO is 'ready' when picked from the cache
This is needed if we want to free the panfrost_batch object at submit time in order to not have to GC the batch on the next job submission. Signed-off-by: Boris Brezillon <[email protected]> Reviewed-by: Alyssa Rosenzweig <[email protected]>
Diffstat (limited to 'src/gallium/drivers/panfrost/pan_bo.c')
-rw-r--r--src/gallium/drivers/panfrost/pan_bo.c114
1 files changed, 90 insertions, 24 deletions
diff --git a/src/gallium/drivers/panfrost/pan_bo.c b/src/gallium/drivers/panfrost/pan_bo.c
index 9daddf9d0cc..37602688d63 100644
--- a/src/gallium/drivers/panfrost/pan_bo.c
+++ b/src/gallium/drivers/panfrost/pan_bo.c
@@ -23,6 +23,7 @@
* Authors (Collabora):
* Alyssa Rosenzweig <[email protected]>
*/
+#include <errno.h>
#include <stdio.h>
#include <fcntl.h>
#include <xf86drm.h>
@@ -101,6 +102,63 @@ panfrost_bo_free(struct panfrost_bo *bo)
ralloc_free(bo);
}
+/* Returns true if the BO is ready, false otherwise.
+ * access_type is encoding the type of access one wants to ensure is done.
+ * Say you want to make sure all writers are done writing, you should pass
+ * PAN_BO_ACCESS_WRITE.
+ * If you want to wait for all users, you should pass PAN_BO_ACCESS_RW.
+ * PAN_BO_ACCESS_READ would work too as waiting for readers implies
+ * waiting for writers as well, but we want to make things explicit and waiting
+ * only for readers is impossible.
+ */
+bool
+panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns,
+ uint32_t access_type)
+{
+ struct drm_panfrost_wait_bo req = {
+ .handle = bo->gem_handle,
+ .timeout_ns = timeout_ns,
+ };
+ int ret;
+
+ assert(access_type == PAN_BO_ACCESS_WRITE ||
+ access_type == PAN_BO_ACCESS_RW);
+
+ /* If the BO has been exported or imported we can't rely on the cached
+ * state, we need to call the WAIT_BO ioctl.
+ */
+ if (!(bo->flags & (PAN_BO_IMPORTED | PAN_BO_EXPORTED))) {
+ /* If ->gpu_access is 0, the BO is idle, no need to wait. */
+ if (!bo->gpu_access)
+ return true;
+
+ /* If the caller only wants to wait for writers and no
+ * writes are pending, we don't have to wait.
+ */
+ if (access_type == PAN_BO_ACCESS_WRITE &&
+ !(bo->gpu_access & PAN_BO_ACCESS_WRITE))
+ return true;
+ }
+
+ /* The ioctl returns >= 0 value when the BO we are waiting for is ready
+ * -1 otherwise.
+ */
+ ret = drmIoctl(bo->screen->fd, DRM_IOCTL_PANFROST_WAIT_BO, &req);
+ if (ret != -1) {
+ /* Set gpu_access to 0 so that the next call to bo_wait()
+ * doesn't have to call the WAIT_BO ioctl.
+ */
+ bo->gpu_access = 0;
+ return true;
+ }
+
+ /* If errno is not ETIMEDOUT or EBUSY that means the handle we passed
+ * is invalid, which shouldn't happen here.
+ */
+ assert(errno == ETIMEDOUT || errno == EBUSY);
+ return false;
+}
+
/* Helper to calculate the bucket index of a BO */
static unsigned
@@ -137,9 +195,8 @@ pan_bucket(struct panfrost_screen *screen, unsigned size)
* BO. */
static struct panfrost_bo *
-panfrost_bo_cache_fetch(
- struct panfrost_screen *screen,
- size_t size, uint32_t flags)
+panfrost_bo_cache_fetch(struct panfrost_screen *screen,
+ size_t size, uint32_t flags, bool dontwait)
{
pthread_mutex_lock(&screen->bo_cache_lock);
struct list_head *bucket = pan_bucket(screen, size);
@@ -147,27 +204,30 @@ panfrost_bo_cache_fetch(
/* Iterate the bucket looking for something suitable */
list_for_each_entry_safe(struct panfrost_bo, entry, bucket, link) {
- if (entry->size >= size &&
- entry->flags == flags) {
- int ret;
- struct drm_panfrost_madvise madv;
+ if (entry->size < size || entry->flags != flags)
+ continue;
- /* This one works, splice it out of the cache */
- list_del(&entry->link);
+ if (!panfrost_bo_wait(entry, dontwait ? 0 : INT64_MAX,
+ PAN_BO_ACCESS_RW))
+ continue;
+
+ struct drm_panfrost_madvise madv = {
+ .handle = entry->gem_handle,
+ .madv = PANFROST_MADV_WILLNEED,
+ };
+ int ret;
+
+ /* This one works, splice it out of the cache */
+ list_del(&entry->link);
- madv.handle = entry->gem_handle;
- madv.madv = PANFROST_MADV_WILLNEED;
- madv.retained = 0;
-
- ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_MADVISE, &madv);
- if (!ret && !madv.retained) {
- panfrost_bo_free(entry);
- continue;
- }
- /* Let's go! */
- bo = entry;
- break;
+ ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_MADVISE, &madv);
+ if (!ret && !madv.retained) {
+ panfrost_bo_free(entry);
+ continue;
}
+ /* Let's go! */
+ bo = entry;
+ break;
}
pthread_mutex_unlock(&screen->bo_cache_lock);
@@ -281,12 +341,18 @@ panfrost_bo_create(struct panfrost_screen *screen, size_t size,
if (flags & PAN_BO_GROWABLE)
assert(flags & PAN_BO_INVISIBLE);
- /* Before creating a BO, we first want to check the cache, otherwise,
- * the cache misses and we need to allocate a BO fresh from the kernel
+ /* Before creating a BO, we first want to check the cache but without
+ * waiting for BO readiness (BOs in the cache can still be referenced
+ * by jobs that are not finished yet).
+ * If the cached allocation fails we fall back on fresh BO allocation,
+ * and if that fails too, we try one more time to allocate from the
+ * cache, but this time we accept to wait.
*/
- bo = panfrost_bo_cache_fetch(screen, size, flags);
+ bo = panfrost_bo_cache_fetch(screen, size, flags, true);
if (!bo)
bo = panfrost_bo_alloc(screen, size, flags);
+ if (!bo)
+ bo = panfrost_bo_cache_fetch(screen, size, flags, false);
if (!bo)
fprintf(stderr, "BO creation failed\n");