summaryrefslogtreecommitdiffstats
path: root/src/amd
diff options
context:
space:
mode:
authorDave Airlie <[email protected]>2017-02-10 00:58:59 +0000
committerDave Airlie <[email protected]>2017-02-12 19:00:19 +0000
commitf466d4dd6af76c3e3edef403c542180094009fc3 (patch)
tree76ad1aa01e89135548774ea213fae08f234ad454 /src/amd
parent48f04862c1d74844db9534b32ef73e5a2bc0ae74 (diff)
radv: reduce CPU overhead merging bo lists.
Just noticed we do a fair bit of unneeded searching here. Since we know that the buffers in a CS are unique already, the first time we get any buffers, we can just memcpy those into place, and when we are searching for subsequent CSes, we only have to search up until where the previous unique buffers were. Reviewed-by: Bas Nieuwenhuizen <[email protected]> Signed-off-by: Dave Airlie <[email protected]>
Diffstat (limited to 'src/amd')
-rw-r--r--src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
index b58f5db0622..9e468bd4ca8 100644
--- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
+++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
@@ -486,9 +486,19 @@ static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
else
cs = (struct radv_amdgpu_cs*)cs_array[i];
+ if (!cs->num_buffers)
+ continue;
+
+ if (unique_bo_count == 0) {
+ memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
+ memcpy(priorities, cs->priorities, cs->num_buffers * sizeof(uint8_t));
+ unique_bo_count = cs->num_buffers;
+ continue;
+ }
+ int unique_bo_so_far = unique_bo_count;
for (unsigned j = 0; j < cs->num_buffers; ++j) {
bool found = false;
- for (unsigned k = 0; k < unique_bo_count; ++k) {
+ for (unsigned k = 0; k < unique_bo_so_far; ++k) {
if (handles[k] == cs->handles[j]) {
found = true;
priorities[k] = MAX2(priorities[k],