diff options
author | Marek Olšák <[email protected]> | 2016-01-19 17:24:57 +0100 |
---|---|---|
committer | Marek Olšák <[email protected]> | 2016-01-22 15:02:40 +0100 |
commit | 0d8e4f958f83e0b67f07030c661a30b4e7c19425 (patch) | |
tree | 1d468251f8b1e316b114e9782d97589a84f1e436 /src/gallium | |
parent | 99dfeb01bd5c3e059968e934f3ec88b2fe43e3f4 (diff) |
gallium/radeon: rename max_compute_units -> num_good_compute_units
radeon sets this correctly, but not amdgpu
Reviewed-by: Nicolai Hähnle <[email protected]>
Diffstat (limited to 'src/gallium')
-rw-r--r-- | src/gallium/drivers/radeon/r600_pipe_common.c | 4 | ||||
-rw-r--r-- | src/gallium/drivers/radeon/radeon_winsys.h | 2 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_compute.c | 4 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_pipe.c | 2 | ||||
-rw-r--r-- | src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c | 6 | ||||
-rw-r--r-- | src/gallium/winsys/radeon/drm/radeon_drm_winsys.c | 4 |
6 files changed, 11 insertions, 11 deletions
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c index e926f56023f..4c066c14cd8 100644 --- a/src/gallium/drivers/radeon/r600_pipe_common.c +++ b/src/gallium/drivers/radeon/r600_pipe_common.c @@ -705,7 +705,7 @@ static int r600_get_compute_param(struct pipe_screen *screen, case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS: if (ret) { uint32_t *max_compute_units = ret; - *max_compute_units = rscreen->info.max_compute_units; + *max_compute_units = rscreen->info.num_good_compute_units; } return sizeof(uint32_t); @@ -973,7 +973,7 @@ bool r600_common_screen_init(struct r600_common_screen *rscreen, printf("gart_size = %i MB\n", (int)(rscreen->info.gart_size >> 20)); printf("vram_size = %i MB\n", (int)(rscreen->info.vram_size >> 20)); printf("max_sclk = %i\n", rscreen->info.max_sclk); - printf("max_compute_units = %i\n", rscreen->info.max_compute_units); + printf("num_good_compute_units = %i\n", rscreen->info.num_good_compute_units); printf("max_se = %i\n", rscreen->info.max_se); printf("max_sh_per_se = %i\n", rscreen->info.max_sh_per_se); printf("drm = %i.%i.%i\n", rscreen->info.drm_major, diff --git a/src/gallium/drivers/radeon/radeon_winsys.h b/src/gallium/drivers/radeon/radeon_winsys.h index ad304747eab..2e5caa67d10 100644 --- a/src/gallium/drivers/radeon/radeon_winsys.h +++ b/src/gallium/drivers/radeon/radeon_winsys.h @@ -251,7 +251,7 @@ struct radeon_info { uint64_t gart_size; uint64_t vram_size; uint32_t max_sclk; - uint32_t max_compute_units; + uint32_t num_good_compute_units; uint32_t max_se; uint32_t max_sh_per_se; diff --git a/src/gallium/drivers/radeonsi/si_compute.c b/src/gallium/drivers/radeonsi/si_compute.c index 5a08cbfb198..6ef6eeec178 100644 --- a/src/gallium/drivers/radeonsi/si_compute.c +++ b/src/gallium/drivers/radeonsi/si_compute.c @@ -61,7 +61,7 @@ static void init_scratch_buffer(struct si_context *sctx, struct si_compute *prog /* Compute the scratch buffer size using the maximum number of waves. * This way we don't need to recompute it for each kernel launch. */ - unsigned scratch_waves = 32 * sctx->screen->b.info.max_compute_units; + unsigned scratch_waves = 32 * sctx->screen->b.info.num_good_compute_units; for (i = 0; i < program->shader.binary.global_symbol_count; i++) { unsigned offset = program->shader.binary.global_symbol_offsets[i]; @@ -402,7 +402,7 @@ static void si_launch_grid( num_waves_for_scratch = MIN2(num_waves_for_scratch, - 32 * sctx->screen->b.info.max_compute_units); + 32 * sctx->screen->b.info.num_good_compute_units); si_pm4_set_reg(pm4, R_00B860_COMPUTE_TMPRING_SIZE, /* The maximum value for WAVES is 32 * num CU. * If you program this value incorrectly, the GPU will hang if diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c index 3e20c3b81fa..0c1ae90f9da 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.c +++ b/src/gallium/drivers/radeonsi/si_pipe.c @@ -208,7 +208,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, * this for non-cs shaders. Using the wrong value here can result in * GPU lockups, but the maximum value seems to always work. */ - sctx->scratch_waves = 32 * sscreen->b.info.max_compute_units; + sctx->scratch_waves = 32 * sscreen->b.info.num_good_compute_units; #if HAVE_LLVM >= 0x0306 /* Initialize LLVM TargetMachine */ diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c index 39d3aa4f783..9835024036b 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c @@ -273,10 +273,10 @@ static boolean do_winsys_init(struct amdgpu_winsys *ws) for (j = 0; j < ws->info.max_sh_per_se; j++) { unsigned max = util_last_bit(ws->amdinfo.cu_bitmap[i][j]); - if (ws->info.max_compute_units < max) - ws->info.max_compute_units = max; + if (ws->info.num_good_compute_units < max) + ws->info.num_good_compute_units = max; } - ws->info.max_compute_units *= ws->info.max_se * ws->info.max_sh_per_se; + ws->info.num_good_compute_units *= ws->info.max_se * ws->info.max_sh_per_se; memcpy(ws->info.si_tile_mode_array, ws->amdinfo.gb_tile_mode, sizeof(ws->amdinfo.gb_tile_mode)); diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c index c7e058bf3da..8a1ed3ae08c 100644 --- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c +++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c @@ -419,9 +419,9 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws) &ws->info.r600_max_pipes); /* All GPUs have at least one compute unit */ - ws->info.max_compute_units = 1; + ws->info.num_good_compute_units = 1; radeon_get_drm_value(ws->fd, RADEON_INFO_ACTIVE_CU_COUNT, NULL, - &ws->info.max_compute_units); + &ws->info.num_good_compute_units); radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_SE, NULL, &ws->info.max_se); |