aboutsummaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/swr
diff options
context:
space:
mode:
authorTomasz Pyra <[email protected]>2020-03-10 13:00:03 +0100
committerMarge Bot <[email protected]>2020-03-19 11:11:26 +0000
commit36ec3cbcf88e9dc4898bbe2319cc4a5a71ba72e1 (patch)
treec768a91d3a3f15fc4d3931d438dc474b882c3fc0 /src/gallium/drivers/swr
parentdb5cc6a7ddeddbeb1e360156db520f55a5852b99 (diff)
gallium/swr: spin-lock performance improvement
Currently, the worker threads are very aggresively polling for new tasks. If the work is not constantly fed into the pipeline (which is a case for most of interactive applications), this creates unnecessary memory pressure and is using CPU cycles that could otherwise be used by the applications. The change implements simple back off mechanism to help with this problem Change by Tomasz Pyra ([email protected]) Reviewed-by: Alok Hota <[email protected]> Reviewed-by: Jan Zielinski <[email protected]> Tested-by: Marge Bot <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4226> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4226>
Diffstat (limited to 'src/gallium/drivers/swr')
-rw-r--r--src/gallium/drivers/swr/rasterizer/core/threads.cpp15
1 files changed, 14 insertions, 1 deletions
diff --git a/src/gallium/drivers/swr/rasterizer/core/threads.cpp b/src/gallium/drivers/swr/rasterizer/core/threads.cpp
index c75fb568431..556e02e99ef 100644
--- a/src/gallium/drivers/swr/rasterizer/core/threads.cpp
+++ b/src/gallium/drivers/swr/rasterizer/core/threads.cpp
@@ -592,17 +592,20 @@ bool WorkOnFifoBE(SWR_CONTEXT* pContext,
pDC->pTileMgr->getTileIndices(tileID, x, y);
if (((x ^ y) & numaMask) != numaNode)
{
+ _mm_pause();
continue;
}
if (!tile->getNumQueued())
{
+ _mm_pause();
continue;
}
// can only work on this draw if it's not in use by other threads
if (lockedTiles.get(tileID))
{
+ _mm_pause();
continue;
}
@@ -663,6 +666,7 @@ bool WorkOnFifoBE(SWR_CONTEXT* pContext,
// This tile is already locked. So let's add it to our locked tiles set. This way we
// don't try locking this one again.
lockedTiles.set(tileID);
+ _mm_pause();
}
}
}
@@ -750,7 +754,7 @@ void WorkOnFifoFE(SWR_CONTEXT* pContext, uint32_t workerId, uint32_t& curDrawFE)
uint32_t dcSlot = curDraw % pContext->MAX_DRAWS_IN_FLIGHT;
DRAW_CONTEXT* pDC = &pContext->dcRing[dcSlot];
- if (!pDC->isCompute && !pDC->FeLock)
+ if (!pDC->FeLock && !pDC->isCompute)
{
if (CheckDependencyFE(pContext, pDC, lastRetiredFE))
{
@@ -765,7 +769,16 @@ void WorkOnFifoFE(SWR_CONTEXT* pContext, uint32_t workerId, uint32_t& curDrawFE)
CompleteDrawFE(pContext, workerId, pDC);
}
+ else
+ {
+ _mm_pause();
+ }
+ }
+ else
+ {
+ _mm_pause();
}
+
curDraw++;
}
}