summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/radeon
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2018-04-01 18:32:54 -0400
committerMarek Olšák <[email protected]>2018-04-05 15:34:58 -0400
commiteced536ed6ab44e183561138b28dff56119a8609 (patch)
tree6b1a6251c8b36c19b670a0d22a4465907bc2ed58 /src/gallium/drivers/radeon
parent72e9e98076d0ee0281aa3982602a6e85cd14bf2b (diff)
radeonsi: rename query definitions R600_ -> SI_
Acked-by: Timothy Arceri <[email protected]>
Diffstat (limited to 'src/gallium/drivers/radeon')
-rw-r--r--src/gallium/drivers/radeon/r600_gpu_load.c42
-rw-r--r--src/gallium/drivers/radeon/r600_perfcounter.c64
-rw-r--r--src/gallium/drivers/radeon/r600_query.c386
-rw-r--r--src/gallium/drivers/radeon/r600_query.h170
-rw-r--r--src/gallium/drivers/radeon/r600_texture.c2
5 files changed, 332 insertions, 332 deletions
diff --git a/src/gallium/drivers/radeon/r600_gpu_load.c b/src/gallium/drivers/radeon/r600_gpu_load.c
index 8828626e092..660ac1d0725 100644
--- a/src/gallium/drivers/radeon/r600_gpu_load.c
+++ b/src/gallium/drivers/radeon/r600_gpu_load.c
@@ -220,47 +220,47 @@ static unsigned busy_index_from_type(struct si_screen *sscreen,
unsigned type)
{
switch (type) {
- case R600_QUERY_GPU_LOAD:
+ case SI_QUERY_GPU_LOAD:
return BUSY_INDEX(sscreen, gpu);
- case R600_QUERY_GPU_SHADERS_BUSY:
+ case SI_QUERY_GPU_SHADERS_BUSY:
return BUSY_INDEX(sscreen, spi);
- case R600_QUERY_GPU_TA_BUSY:
+ case SI_QUERY_GPU_TA_BUSY:
return BUSY_INDEX(sscreen, ta);
- case R600_QUERY_GPU_GDS_BUSY:
+ case SI_QUERY_GPU_GDS_BUSY:
return BUSY_INDEX(sscreen, gds);
- case R600_QUERY_GPU_VGT_BUSY:
+ case SI_QUERY_GPU_VGT_BUSY:
return BUSY_INDEX(sscreen, vgt);
- case R600_QUERY_GPU_IA_BUSY:
+ case SI_QUERY_GPU_IA_BUSY:
return BUSY_INDEX(sscreen, ia);
- case R600_QUERY_GPU_SX_BUSY:
+ case SI_QUERY_GPU_SX_BUSY:
return BUSY_INDEX(sscreen, sx);
- case R600_QUERY_GPU_WD_BUSY:
+ case SI_QUERY_GPU_WD_BUSY:
return BUSY_INDEX(sscreen, wd);
- case R600_QUERY_GPU_BCI_BUSY:
+ case SI_QUERY_GPU_BCI_BUSY:
return BUSY_INDEX(sscreen, bci);
- case R600_QUERY_GPU_SC_BUSY:
+ case SI_QUERY_GPU_SC_BUSY:
return BUSY_INDEX(sscreen, sc);
- case R600_QUERY_GPU_PA_BUSY:
+ case SI_QUERY_GPU_PA_BUSY:
return BUSY_INDEX(sscreen, pa);
- case R600_QUERY_GPU_DB_BUSY:
+ case SI_QUERY_GPU_DB_BUSY:
return BUSY_INDEX(sscreen, db);
- case R600_QUERY_GPU_CP_BUSY:
+ case SI_QUERY_GPU_CP_BUSY:
return BUSY_INDEX(sscreen, cp);
- case R600_QUERY_GPU_CB_BUSY:
+ case SI_QUERY_GPU_CB_BUSY:
return BUSY_INDEX(sscreen, cb);
- case R600_QUERY_GPU_SDMA_BUSY:
+ case SI_QUERY_GPU_SDMA_BUSY:
return BUSY_INDEX(sscreen, sdma);
- case R600_QUERY_GPU_PFP_BUSY:
+ case SI_QUERY_GPU_PFP_BUSY:
return BUSY_INDEX(sscreen, pfp);
- case R600_QUERY_GPU_MEQ_BUSY:
+ case SI_QUERY_GPU_MEQ_BUSY:
return BUSY_INDEX(sscreen, meq);
- case R600_QUERY_GPU_ME_BUSY:
+ case SI_QUERY_GPU_ME_BUSY:
return BUSY_INDEX(sscreen, me);
- case R600_QUERY_GPU_SURF_SYNC_BUSY:
+ case SI_QUERY_GPU_SURF_SYNC_BUSY:
return BUSY_INDEX(sscreen, surf_sync);
- case R600_QUERY_GPU_CP_DMA_BUSY:
+ case SI_QUERY_GPU_CP_DMA_BUSY:
return BUSY_INDEX(sscreen, cp_dma);
- case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
+ case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
return BUSY_INDEX(sscreen, scratch_ram);
default:
unreachable("invalid query type");
diff --git a/src/gallium/drivers/radeon/r600_perfcounter.c b/src/gallium/drivers/radeon/r600_perfcounter.c
index 29b3c97d360..c5b55cdcc3b 100644
--- a/src/gallium/drivers/radeon/r600_perfcounter.c
+++ b/src/gallium/drivers/radeon/r600_perfcounter.c
@@ -28,7 +28,7 @@
#include "amd/common/sid.h"
/* Max counters per HW block */
-#define R600_QUERY_MAX_COUNTERS 16
+#define SI_QUERY_MAX_COUNTERS 16
static struct si_perfcounter_block *
lookup_counter(struct si_perfcounters *pc, unsigned index,
@@ -76,7 +76,7 @@ struct si_pc_group {
int se;
int instance;
unsigned num_counters;
- unsigned selectors[R600_QUERY_MAX_COUNTERS];
+ unsigned selectors[SI_QUERY_MAX_COUNTERS];
};
struct si_pc_counter {
@@ -166,7 +166,7 @@ static void si_pc_query_emit_stop(struct si_context *sctx,
unsigned se = group->se >= 0 ? group->se : 0;
unsigned se_end = se + 1;
- if ((block->flags & R600_PC_BLOCK_SE) && (group->se < 0))
+ if ((block->flags & SI_PC_BLOCK_SE) && (group->se < 0))
se_end = sctx->screen->info.max_se;
do {
@@ -247,13 +247,13 @@ static struct si_pc_group *get_group_state(struct si_screen *screen,
group->block = block;
group->sub_gid = sub_gid;
- if (block->flags & R600_PC_BLOCK_SHADER) {
+ if (block->flags & SI_PC_BLOCK_SHADER) {
unsigned sub_gids = block->num_instances;
unsigned shader_id;
unsigned shaders;
unsigned query_shaders;
- if (block->flags & R600_PC_BLOCK_SE_GROUPS)
+ if (block->flags & SI_PC_BLOCK_SE_GROUPS)
sub_gids = sub_gids * screen->info.max_se;
shader_id = sub_gid / sub_gids;
sub_gid = sub_gid % sub_gids;
@@ -269,20 +269,20 @@ static struct si_pc_group *get_group_state(struct si_screen *screen,
query->shaders = shaders;
}
- if (block->flags & R600_PC_BLOCK_SHADER_WINDOWED && !query->shaders) {
+ if (block->flags & SI_PC_BLOCK_SHADER_WINDOWED && !query->shaders) {
// A non-zero value in query->shaders ensures that the shader
// masking is reset unless the user explicitly requests one.
query->shaders = SI_PC_SHADERS_WINDOWING;
}
- if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
+ if (block->flags & SI_PC_BLOCK_SE_GROUPS) {
group->se = sub_gid / block->num_instances;
sub_gid = sub_gid % block->num_instances;
} else {
group->se = -1;
}
- if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
+ if (block->flags & SI_PC_BLOCK_INSTANCE_GROUPS) {
group->instance = sub_gid;
} else {
group->instance = -1;
@@ -323,10 +323,10 @@ struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
for (i = 0; i < num_queries; ++i) {
unsigned sub_gid;
- if (query_types[i] < R600_QUERY_FIRST_PERFCOUNTER)
+ if (query_types[i] < SI_QUERY_FIRST_PERFCOUNTER)
goto error;
- block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
+ block = lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER,
&base_gid, &sub_index);
if (!block)
goto error;
@@ -358,7 +358,7 @@ struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
unsigned read_dw;
unsigned instances = 1;
- if ((block->flags & R600_PC_BLOCK_SE) && group->se < 0)
+ if ((block->flags & SI_PC_BLOCK_SE) && group->se < 0)
instances = screen->info.max_se;
if (group->instance < 0)
instances *= block->num_instances;
@@ -383,7 +383,7 @@ struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
struct si_pc_counter *counter = &query->counters[i];
struct si_perfcounter_block *block;
- block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
+ block = lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER,
&base_gid, &sub_index);
sub_gid = sub_index / block->num_selectors;
@@ -401,7 +401,7 @@ struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
counter->stride = group->num_counters;
counter->qwords = 1;
- if ((block->flags & R600_PC_BLOCK_SE) && group->se < 0)
+ if ((block->flags & SI_PC_BLOCK_SE) && group->se < 0)
counter->qwords = screen->info.max_se;
if (group->instance < 0)
counter->qwords *= block->num_instances;
@@ -426,25 +426,25 @@ static bool si_init_block_names(struct si_screen *screen,
char *groupname;
char *p;
- if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
+ if (block->flags & SI_PC_BLOCK_INSTANCE_GROUPS)
groups_instance = block->num_instances;
- if (block->flags & R600_PC_BLOCK_SE_GROUPS)
+ if (block->flags & SI_PC_BLOCK_SE_GROUPS)
groups_se = screen->info.max_se;
- if (block->flags & R600_PC_BLOCK_SHADER)
+ if (block->flags & SI_PC_BLOCK_SHADER)
groups_shader = screen->perfcounters->num_shader_types;
namelen = strlen(block->basename);
block->group_name_stride = namelen + 1;
- if (block->flags & R600_PC_BLOCK_SHADER)
+ if (block->flags & SI_PC_BLOCK_SHADER)
block->group_name_stride += 3;
- if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
+ if (block->flags & SI_PC_BLOCK_SE_GROUPS) {
assert(groups_se <= 10);
block->group_name_stride += 1;
- if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
+ if (block->flags & SI_PC_BLOCK_INSTANCE_GROUPS)
block->group_name_stride += 1;
}
- if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
+ if (block->flags & SI_PC_BLOCK_INSTANCE_GROUPS) {
assert(groups_instance <= 100);
block->group_name_stride += 2;
}
@@ -462,18 +462,18 @@ static bool si_init_block_names(struct si_screen *screen,
strcpy(groupname, block->basename);
p = groupname + namelen;
- if (block->flags & R600_PC_BLOCK_SHADER) {
+ if (block->flags & SI_PC_BLOCK_SHADER) {
strcpy(p, shader_suffix);
p += shaderlen;
}
- if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
+ if (block->flags & SI_PC_BLOCK_SE_GROUPS) {
p += sprintf(p, "%d", j);
- if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
+ if (block->flags & SI_PC_BLOCK_INSTANCE_GROUPS)
*p++ = '_';
}
- if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
+ if (block->flags & SI_PC_BLOCK_INSTANCE_GROUPS)
p += sprintf(p, "%d", k);
groupname += block->group_name_stride;
@@ -532,7 +532,7 @@ int si_get_perfcounter_info(struct si_screen *screen,
return 0;
}
info->name = block->selector_names + sub * block->selector_name_stride;
- info->query_type = R600_QUERY_FIRST_PERFCOUNTER + index;
+ info->query_type = SI_QUERY_FIRST_PERFCOUNTER + index;
info->max_value.u64 = 0;
info->type = PIPE_DRIVER_QUERY_TYPE_UINT64;
info->result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_AVERAGE;
@@ -597,7 +597,7 @@ void si_perfcounters_add_block(struct si_screen *sscreen,
{
struct si_perfcounter_block *block = &pc->blocks[pc->num_blocks];
- assert(counters <= R600_QUERY_MAX_COUNTERS);
+ assert(counters <= SI_QUERY_MAX_COUNTERS);
block->basename = name;
block->flags = flags;
@@ -606,20 +606,20 @@ void si_perfcounters_add_block(struct si_screen *sscreen,
block->num_instances = MAX2(instances, 1);
block->data = data;
- if (pc->separate_se && (block->flags & R600_PC_BLOCK_SE))
- block->flags |= R600_PC_BLOCK_SE_GROUPS;
+ if (pc->separate_se && (block->flags & SI_PC_BLOCK_SE))
+ block->flags |= SI_PC_BLOCK_SE_GROUPS;
if (pc->separate_instance && block->num_instances > 1)
- block->flags |= R600_PC_BLOCK_INSTANCE_GROUPS;
+ block->flags |= SI_PC_BLOCK_INSTANCE_GROUPS;
- if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
+ if (block->flags & SI_PC_BLOCK_INSTANCE_GROUPS) {
block->num_groups = block->num_instances;
} else {
block->num_groups = 1;
}
- if (block->flags & R600_PC_BLOCK_SE_GROUPS)
+ if (block->flags & SI_PC_BLOCK_SE_GROUPS)
block->num_groups *= sscreen->info.max_se;
- if (block->flags & R600_PC_BLOCK_SHADER)
+ if (block->flags & SI_PC_BLOCK_SHADER)
block->num_groups *= pc->num_shader_types;
++pc->num_blocks;
diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c
index 1f8f6f39cbf..a2553d17b1d 100644
--- a/src/gallium/drivers/radeon/r600_query.c
+++ b/src/gallium/drivers/radeon/r600_query.c
@@ -69,26 +69,26 @@ static void si_query_sw_destroy(struct si_screen *sscreen,
static enum radeon_value_id winsys_id_from_type(unsigned type)
{
switch (type) {
- case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
- case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
- case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
- case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
- case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
- case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
- case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
- case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
- case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
- case R600_QUERY_GFX_IB_SIZE: return RADEON_GFX_IB_SIZE_COUNTER;
- case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
- case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
- case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
- case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
- case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
- case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
- case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
- case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
- case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
- case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
+ case SI_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
+ case SI_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
+ case SI_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
+ case SI_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
+ case SI_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
+ case SI_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
+ case SI_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
+ case SI_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
+ case SI_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
+ case SI_QUERY_GFX_IB_SIZE: return RADEON_GFX_IB_SIZE_COUNTER;
+ case SI_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
+ case SI_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
+ case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
+ case SI_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
+ case SI_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
+ case SI_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
+ case SI_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
+ case SI_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
+ case SI_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
+ case SI_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
default: unreachable("query type does not correspond to winsys id");
}
}
@@ -103,146 +103,146 @@ static bool si_query_sw_begin(struct si_context *sctx,
case PIPE_QUERY_TIMESTAMP_DISJOINT:
case PIPE_QUERY_GPU_FINISHED:
break;
- case R600_QUERY_DRAW_CALLS:
+ case SI_QUERY_DRAW_CALLS:
query->begin_result = sctx->b.num_draw_calls;
break;
- case R600_QUERY_DECOMPRESS_CALLS:
+ case SI_QUERY_DECOMPRESS_CALLS:
query->begin_result = sctx->b.num_decompress_calls;
break;
- case R600_QUERY_MRT_DRAW_CALLS:
+ case SI_QUERY_MRT_DRAW_CALLS:
query->begin_result = sctx->b.num_mrt_draw_calls;
break;
- case R600_QUERY_PRIM_RESTART_CALLS:
+ case SI_QUERY_PRIM_RESTART_CALLS:
query->begin_result = sctx->b.num_prim_restart_calls;
break;
- case R600_QUERY_SPILL_DRAW_CALLS:
+ case SI_QUERY_SPILL_DRAW_CALLS:
query->begin_result = sctx->b.num_spill_draw_calls;
break;
- case R600_QUERY_COMPUTE_CALLS:
+ case SI_QUERY_COMPUTE_CALLS:
query->begin_result = sctx->b.num_compute_calls;
break;
- case R600_QUERY_SPILL_COMPUTE_CALLS:
+ case SI_QUERY_SPILL_COMPUTE_CALLS:
query->begin_result = sctx->b.num_spill_compute_calls;
break;
- case R600_QUERY_DMA_CALLS:
+ case SI_QUERY_DMA_CALLS:
query->begin_result = sctx->b.num_dma_calls;
break;
- case R600_QUERY_CP_DMA_CALLS:
+ case SI_QUERY_CP_DMA_CALLS:
query->begin_result = sctx->b.num_cp_dma_calls;
break;
- case R600_QUERY_NUM_VS_FLUSHES:
+ case SI_QUERY_NUM_VS_FLUSHES:
query->begin_result = sctx->b.num_vs_flushes;
break;
- case R600_QUERY_NUM_PS_FLUSHES:
+ case SI_QUERY_NUM_PS_FLUSHES:
query->begin_result = sctx->b.num_ps_flushes;
break;
- case R600_QUERY_NUM_CS_FLUSHES:
+ case SI_QUERY_NUM_CS_FLUSHES:
query->begin_result = sctx->b.num_cs_flushes;
break;
- case R600_QUERY_NUM_CB_CACHE_FLUSHES:
+ case SI_QUERY_NUM_CB_CACHE_FLUSHES:
query->begin_result = sctx->b.num_cb_cache_flushes;
break;
- case R600_QUERY_NUM_DB_CACHE_FLUSHES:
+ case SI_QUERY_NUM_DB_CACHE_FLUSHES:
query->begin_result = sctx->b.num_db_cache_flushes;
break;
- case R600_QUERY_NUM_L2_INVALIDATES:
+ case SI_QUERY_NUM_L2_INVALIDATES:
query->begin_result = sctx->b.num_L2_invalidates;
break;
- case R600_QUERY_NUM_L2_WRITEBACKS:
+ case SI_QUERY_NUM_L2_WRITEBACKS:
query->begin_result = sctx->b.num_L2_writebacks;
break;
- case R600_QUERY_NUM_RESIDENT_HANDLES:
+ case SI_QUERY_NUM_RESIDENT_HANDLES:
query->begin_result = sctx->b.num_resident_handles;
break;
- case R600_QUERY_TC_OFFLOADED_SLOTS:
+ case SI_QUERY_TC_OFFLOADED_SLOTS:
query->begin_result = sctx->b.tc ? sctx->b.tc->num_offloaded_slots : 0;
break;
- case R600_QUERY_TC_DIRECT_SLOTS:
+ case SI_QUERY_TC_DIRECT_SLOTS:
query->begin_result = sctx->b.tc ? sctx->b.tc->num_direct_slots : 0;
break;
- case R600_QUERY_TC_NUM_SYNCS:
+ case SI_QUERY_TC_NUM_SYNCS:
query->begin_result = sctx->b.tc ? sctx->b.tc->num_syncs : 0;
break;
- case R600_QUERY_REQUESTED_VRAM:
- case R600_QUERY_REQUESTED_GTT:
- case R600_QUERY_MAPPED_VRAM:
- case R600_QUERY_MAPPED_GTT:
- case R600_QUERY_VRAM_USAGE:
- case R600_QUERY_VRAM_VIS_USAGE:
- case R600_QUERY_GTT_USAGE:
- case R600_QUERY_GPU_TEMPERATURE:
- case R600_QUERY_CURRENT_GPU_SCLK:
- case R600_QUERY_CURRENT_GPU_MCLK:
- case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
- case R600_QUERY_NUM_MAPPED_BUFFERS:
+ case SI_QUERY_REQUESTED_VRAM:
+ case SI_QUERY_REQUESTED_GTT:
+ case SI_QUERY_MAPPED_VRAM:
+ case SI_QUERY_MAPPED_GTT:
+ case SI_QUERY_VRAM_USAGE:
+ case SI_QUERY_VRAM_VIS_USAGE:
+ case SI_QUERY_GTT_USAGE:
+ case SI_QUERY_GPU_TEMPERATURE:
+ case SI_QUERY_CURRENT_GPU_SCLK:
+ case SI_QUERY_CURRENT_GPU_MCLK:
+ case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
+ case SI_QUERY_NUM_MAPPED_BUFFERS:
query->begin_result = 0;
break;
- case R600_QUERY_BUFFER_WAIT_TIME:
- case R600_QUERY_GFX_IB_SIZE:
- case R600_QUERY_NUM_GFX_IBS:
- case R600_QUERY_NUM_SDMA_IBS:
- case R600_QUERY_NUM_BYTES_MOVED:
- case R600_QUERY_NUM_EVICTIONS:
- case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
+ case SI_QUERY_BUFFER_WAIT_TIME:
+ case SI_QUERY_GFX_IB_SIZE:
+ case SI_QUERY_NUM_GFX_IBS:
+ case SI_QUERY_NUM_SDMA_IBS:
+ case SI_QUERY_NUM_BYTES_MOVED:
+ case SI_QUERY_NUM_EVICTIONS:
+ case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
query->begin_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
break;
}
- case R600_QUERY_GFX_BO_LIST_SIZE:
+ case SI_QUERY_GFX_BO_LIST_SIZE:
ws_id = winsys_id_from_type(query->b.type);
query->begin_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
query->begin_time = sctx->b.ws->query_value(sctx->b.ws,
RADEON_NUM_GFX_IBS);
break;
- case R600_QUERY_CS_THREAD_BUSY:
+ case SI_QUERY_CS_THREAD_BUSY:
ws_id = winsys_id_from_type(query->b.type);
query->begin_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
query->begin_time = os_time_get_nano();
break;
- case R600_QUERY_GALLIUM_THREAD_BUSY:
+ case SI_QUERY_GALLIUM_THREAD_BUSY:
query->begin_result =
sctx->b.tc ? util_queue_get_thread_time_nano(&sctx->b.tc->queue, 0) : 0;
query->begin_time = os_time_get_nano();
break;
- case R600_QUERY_GPU_LOAD:
- case R600_QUERY_GPU_SHADERS_BUSY:
- case R600_QUERY_GPU_TA_BUSY:
- case R600_QUERY_GPU_GDS_BUSY:
- case R600_QUERY_GPU_VGT_BUSY:
- case R600_QUERY_GPU_IA_BUSY:
- case R600_QUERY_GPU_SX_BUSY:
- case R600_QUERY_GPU_WD_BUSY:
- case R600_QUERY_GPU_BCI_BUSY:
- case R600_QUERY_GPU_SC_BUSY:
- case R600_QUERY_GPU_PA_BUSY:
- case R600_QUERY_GPU_DB_BUSY:
- case R600_QUERY_GPU_CP_BUSY:
- case R600_QUERY_GPU_CB_BUSY:
- case R600_QUERY_GPU_SDMA_BUSY:
- case R600_QUERY_GPU_PFP_BUSY:
- case R600_QUERY_GPU_MEQ_BUSY:
- case R600_QUERY_GPU_ME_BUSY:
- case R600_QUERY_GPU_SURF_SYNC_BUSY:
- case R600_QUERY_GPU_CP_DMA_BUSY:
- case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
+ case SI_QUERY_GPU_LOAD:
+ case SI_QUERY_GPU_SHADERS_BUSY:
+ case SI_QUERY_GPU_TA_BUSY:
+ case SI_QUERY_GPU_GDS_BUSY:
+ case SI_QUERY_GPU_VGT_BUSY:
+ case SI_QUERY_GPU_IA_BUSY:
+ case SI_QUERY_GPU_SX_BUSY:
+ case SI_QUERY_GPU_WD_BUSY:
+ case SI_QUERY_GPU_BCI_BUSY:
+ case SI_QUERY_GPU_SC_BUSY:
+ case SI_QUERY_GPU_PA_BUSY:
+ case SI_QUERY_GPU_DB_BUSY:
+ case SI_QUERY_GPU_CP_BUSY:
+ case SI_QUERY_GPU_CB_BUSY:
+ case SI_QUERY_GPU_SDMA_BUSY:
+ case SI_QUERY_GPU_PFP_BUSY:
+ case SI_QUERY_GPU_MEQ_BUSY:
+ case SI_QUERY_GPU_ME_BUSY:
+ case SI_QUERY_GPU_SURF_SYNC_BUSY:
+ case SI_QUERY_GPU_CP_DMA_BUSY:
+ case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
query->begin_result = si_begin_counter(sctx->screen,
query->b.type);
break;
- case R600_QUERY_NUM_COMPILATIONS:
+ case SI_QUERY_NUM_COMPILATIONS:
query->begin_result = p_atomic_read(&sctx->screen->num_compilations);
break;
- case R600_QUERY_NUM_SHADERS_CREATED:
+ case SI_QUERY_NUM_SHADERS_CREATED:
query->begin_result = p_atomic_read(&sctx->screen->num_shaders_created);
break;
- case R600_QUERY_NUM_SHADER_CACHE_HITS:
+ case SI_QUERY_NUM_SHADER_CACHE_HITS:
query->begin_result =
p_atomic_read(&sctx->screen->num_shader_cache_hits);
break;
- case R600_QUERY_GPIN_ASIC_ID:
- case R600_QUERY_GPIN_NUM_SIMD:
- case R600_QUERY_GPIN_NUM_RB:
- case R600_QUERY_GPIN_NUM_SPI:
- case R600_QUERY_GPIN_NUM_SE:
+ case SI_QUERY_GPIN_ASIC_ID:
+ case SI_QUERY_GPIN_NUM_SIMD:
+ case SI_QUERY_GPIN_NUM_RB:
+ case SI_QUERY_GPIN_NUM_SPI:
+ case SI_QUERY_GPIN_NUM_SE:
break;
default:
unreachable("si_query_sw_begin: bad query type");
@@ -263,148 +263,148 @@ static bool si_query_sw_end(struct si_context *sctx,
case PIPE_QUERY_GPU_FINISHED:
sctx->b.b.flush(&sctx->b.b, &query->fence, PIPE_FLUSH_DEFERRED);
break;
- case R600_QUERY_DRAW_CALLS:
+ case SI_QUERY_DRAW_CALLS:
query->end_result = sctx->b.num_draw_calls;
break;
- case R600_QUERY_DECOMPRESS_CALLS:
+ case SI_QUERY_DECOMPRESS_CALLS:
query->end_result = sctx->b.num_decompress_calls;
break;
- case R600_QUERY_MRT_DRAW_CALLS:
+ case SI_QUERY_MRT_DRAW_CALLS:
query->end_result = sctx->b.num_mrt_draw_calls;
break;
- case R600_QUERY_PRIM_RESTART_CALLS:
+ case SI_QUERY_PRIM_RESTART_CALLS:
query->end_result = sctx->b.num_prim_restart_calls;
break;
- case R600_QUERY_SPILL_DRAW_CALLS:
+ case SI_QUERY_SPILL_DRAW_CALLS:
query->end_result = sctx->b.num_spill_draw_calls;
break;
- case R600_QUERY_COMPUTE_CALLS:
+ case SI_QUERY_COMPUTE_CALLS:
query->end_result = sctx->b.num_compute_calls;
break;
- case R600_QUERY_SPILL_COMPUTE_CALLS:
+ case SI_QUERY_SPILL_COMPUTE_CALLS:
query->end_result = sctx->b.num_spill_compute_calls;
break;
- case R600_QUERY_DMA_CALLS:
+ case SI_QUERY_DMA_CALLS:
query->end_result = sctx->b.num_dma_calls;
break;
- case R600_QUERY_CP_DMA_CALLS:
+ case SI_QUERY_CP_DMA_CALLS:
query->end_result = sctx->b.num_cp_dma_calls;
break;
- case R600_QUERY_NUM_VS_FLUSHES:
+ case SI_QUERY_NUM_VS_FLUSHES:
query->end_result = sctx->b.num_vs_flushes;
break;
- case R600_QUERY_NUM_PS_FLUSHES:
+ case SI_QUERY_NUM_PS_FLUSHES:
query->end_result = sctx->b.num_ps_flushes;
break;
- case R600_QUERY_NUM_CS_FLUSHES:
+ case SI_QUERY_NUM_CS_FLUSHES:
query->end_result = sctx->b.num_cs_flushes;
break;
- case R600_QUERY_NUM_CB_CACHE_FLUSHES:
+ case SI_QUERY_NUM_CB_CACHE_FLUSHES:
query->end_result = sctx->b.num_cb_cache_flushes;
break;
- case R600_QUERY_NUM_DB_CACHE_FLUSHES:
+ case SI_QUERY_NUM_DB_CACHE_FLUSHES:
query->end_result = sctx->b.num_db_cache_flushes;
break;
- case R600_QUERY_NUM_L2_INVALIDATES:
+ case SI_QUERY_NUM_L2_INVALIDATES:
query->end_result = sctx->b.num_L2_invalidates;
break;
- case R600_QUERY_NUM_L2_WRITEBACKS:
+ case SI_QUERY_NUM_L2_WRITEBACKS:
query->end_result = sctx->b.num_L2_writebacks;
break;
- case R600_QUERY_NUM_RESIDENT_HANDLES:
+ case SI_QUERY_NUM_RESIDENT_HANDLES:
query->end_result = sctx->b.num_resident_handles;
break;
- case R600_QUERY_TC_OFFLOADED_SLOTS:
+ case SI_QUERY_TC_OFFLOADED_SLOTS:
query->end_result = sctx->b.tc ? sctx->b.tc->num_offloaded_slots : 0;
break;
- case R600_QUERY_TC_DIRECT_SLOTS:
+ case SI_QUERY_TC_DIRECT_SLOTS:
query->end_result = sctx->b.tc ? sctx->b.tc->num_direct_slots : 0;
break;
- case R600_QUERY_TC_NUM_SYNCS:
+ case SI_QUERY_TC_NUM_SYNCS:
query->end_result = sctx->b.tc ? sctx->b.tc->num_syncs : 0;
break;
- case R600_QUERY_REQUESTED_VRAM:
- case R600_QUERY_REQUESTED_GTT:
- case R600_QUERY_MAPPED_VRAM:
- case R600_QUERY_MAPPED_GTT:
- case R600_QUERY_VRAM_USAGE:
- case R600_QUERY_VRAM_VIS_USAGE:
- case R600_QUERY_GTT_USAGE:
- case R600_QUERY_GPU_TEMPERATURE:
- case R600_QUERY_CURRENT_GPU_SCLK:
- case R600_QUERY_CURRENT_GPU_MCLK:
- case R600_QUERY_BUFFER_WAIT_TIME:
- case R600_QUERY_GFX_IB_SIZE:
- case R600_QUERY_NUM_MAPPED_BUFFERS:
- case R600_QUERY_NUM_GFX_IBS:
- case R600_QUERY_NUM_SDMA_IBS:
- case R600_QUERY_NUM_BYTES_MOVED:
- case R600_QUERY_NUM_EVICTIONS:
- case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
+ case SI_QUERY_REQUESTED_VRAM:
+ case SI_QUERY_REQUESTED_GTT:
+ case SI_QUERY_MAPPED_VRAM:
+ case SI_QUERY_MAPPED_GTT:
+ case SI_QUERY_VRAM_USAGE:
+ case SI_QUERY_VRAM_VIS_USAGE:
+ case SI_QUERY_GTT_USAGE:
+ case SI_QUERY_GPU_TEMPERATURE:
+ case SI_QUERY_CURRENT_GPU_SCLK:
+ case SI_QUERY_CURRENT_GPU_MCLK:
+ case SI_QUERY_BUFFER_WAIT_TIME:
+ case SI_QUERY_GFX_IB_SIZE:
+ case SI_QUERY_NUM_MAPPED_BUFFERS:
+ case SI_QUERY_NUM_GFX_IBS:
+ case SI_QUERY_NUM_SDMA_IBS:
+ case SI_QUERY_NUM_BYTES_MOVED:
+ case SI_QUERY_NUM_EVICTIONS:
+ case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
query->end_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
break;
}
- case R600_QUERY_GFX_BO_LIST_SIZE:
+ case SI_QUERY_GFX_BO_LIST_SIZE:
ws_id = winsys_id_from_type(query->b.type);
query->end_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
query->end_time = sctx->b.ws->query_value(sctx->b.ws,
RADEON_NUM_GFX_IBS);
break;
- case R600_QUERY_CS_THREAD_BUSY:
+ case SI_QUERY_CS_THREAD_BUSY:
ws_id = winsys_id_from_type(query->b.type);
query->end_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
query->end_time = os_time_get_nano();
break;
- case R600_QUERY_GALLIUM_THREAD_BUSY:
+ case SI_QUERY_GALLIUM_THREAD_BUSY:
query->end_result =
sctx->b.tc ? util_queue_get_thread_time_nano(&sctx->b.tc->queue, 0) : 0;
query->end_time = os_time_get_nano();
break;
- case R600_QUERY_GPU_LOAD:
- case R600_QUERY_GPU_SHADERS_BUSY:
- case R600_QUERY_GPU_TA_BUSY:
- case R600_QUERY_GPU_GDS_BUSY:
- case R600_QUERY_GPU_VGT_BUSY:
- case R600_QUERY_GPU_IA_BUSY:
- case R600_QUERY_GPU_SX_BUSY:
- case R600_QUERY_GPU_WD_BUSY:
- case R600_QUERY_GPU_BCI_BUSY:
- case R600_QUERY_GPU_SC_BUSY:
- case R600_QUERY_GPU_PA_BUSY:
- case R600_QUERY_GPU_DB_BUSY:
- case R600_QUERY_GPU_CP_BUSY:
- case R600_QUERY_GPU_CB_BUSY:
- case R600_QUERY_GPU_SDMA_BUSY:
- case R600_QUERY_GPU_PFP_BUSY:
- case R600_QUERY_GPU_MEQ_BUSY:
- case R600_QUERY_GPU_ME_BUSY:
- case R600_QUERY_GPU_SURF_SYNC_BUSY:
- case R600_QUERY_GPU_CP_DMA_BUSY:
- case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
+ case SI_QUERY_GPU_LOAD:
+ case SI_QUERY_GPU_SHADERS_BUSY:
+ case SI_QUERY_GPU_TA_BUSY:
+ case SI_QUERY_GPU_GDS_BUSY:
+ case SI_QUERY_GPU_VGT_BUSY:
+ case SI_QUERY_GPU_IA_BUSY:
+ case SI_QUERY_GPU_SX_BUSY:
+ case SI_QUERY_GPU_WD_BUSY:
+ case SI_QUERY_GPU_BCI_BUSY:
+ case SI_QUERY_GPU_SC_BUSY:
+ case SI_QUERY_GPU_PA_BUSY:
+ case SI_QUERY_GPU_DB_BUSY:
+ case SI_QUERY_GPU_CP_BUSY:
+ case SI_QUERY_GPU_CB_BUSY:
+ case SI_QUERY_GPU_SDMA_BUSY:
+ case SI_QUERY_GPU_PFP_BUSY:
+ case SI_QUERY_GPU_MEQ_BUSY:
+ case SI_QUERY_GPU_ME_BUSY:
+ case SI_QUERY_GPU_SURF_SYNC_BUSY:
+ case SI_QUERY_GPU_CP_DMA_BUSY:
+ case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
query->end_result = si_end_counter(sctx->screen,
query->b.type,
query->begin_result);
query->begin_result = 0;
break;
- case R600_QUERY_NUM_COMPILATIONS:
+ case SI_QUERY_NUM_COMPILATIONS:
query->end_result = p_atomic_read(&sctx->screen->num_compilations);
break;
- case R600_QUERY_NUM_SHADERS_CREATED:
+ case SI_QUERY_NUM_SHADERS_CREATED:
query->end_result = p_atomic_read(&sctx->screen->num_shaders_created);
break;
- case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
+ case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
query->end_result = sctx->b.last_tex_ps_draw_ratio;
break;
- case R600_QUERY_NUM_SHADER_CACHE_HITS:
+ case SI_QUERY_NUM_SHADER_CACHE_HITS:
query->end_result =
p_atomic_read(&sctx->screen->num_shader_cache_hits);
break;
- case R600_QUERY_GPIN_ASIC_ID:
- case R600_QUERY_GPIN_NUM_SIMD:
- case R600_QUERY_GPIN_NUM_RB:
- case R600_QUERY_GPIN_NUM_SPI:
- case R600_QUERY_GPIN_NUM_SE:
+ case SI_QUERY_GPIN_ASIC_ID:
+ case SI_QUERY_GPIN_NUM_SIMD:
+ case SI_QUERY_GPIN_NUM_RB:
+ case SI_QUERY_GPIN_NUM_SPI:
+ case SI_QUERY_GPIN_NUM_SE:
break;
default:
unreachable("si_query_sw_end: bad query type");
@@ -436,28 +436,28 @@ static bool si_query_sw_get_result(struct si_context *sctx,
return result->b;
}
- case R600_QUERY_GFX_BO_LIST_SIZE:
+ case SI_QUERY_GFX_BO_LIST_SIZE:
result->u64 = (query->end_result - query->begin_result) /
(query->end_time - query->begin_time);
return true;
- case R600_QUERY_CS_THREAD_BUSY:
- case R600_QUERY_GALLIUM_THREAD_BUSY:
+ case SI_QUERY_CS_THREAD_BUSY:
+ case SI_QUERY_GALLIUM_THREAD_BUSY:
result->u64 = (query->end_result - query->begin_result) * 100 /
(query->end_time - query->begin_time);
return true;
- case R600_QUERY_GPIN_ASIC_ID:
+ case SI_QUERY_GPIN_ASIC_ID:
result->u32 = 0;
return true;
- case R600_QUERY_GPIN_NUM_SIMD:
+ case SI_QUERY_GPIN_NUM_SIMD:
result->u32 = sctx->screen->info.num_good_compute_units;
return true;
- case R600_QUERY_GPIN_NUM_RB:
+ case SI_QUERY_GPIN_NUM_RB:
result->u32 = sctx->screen->info.num_render_backends;
return true;
- case R600_QUERY_GPIN_NUM_SPI:
+ case SI_QUERY_GPIN_NUM_SPI:
result->u32 = 1; /* all supported chips have one SPI per SE */
return true;
- case R600_QUERY_GPIN_NUM_SE:
+ case SI_QUERY_GPIN_NUM_SE:
result->u32 = sctx->screen->info.max_se;
return true;
}
@@ -465,12 +465,12 @@ static bool si_query_sw_get_result(struct si_context *sctx,
result->u64 = query->end_result - query->begin_result;
switch (query->b.type) {
- case R600_QUERY_BUFFER_WAIT_TIME:
- case R600_QUERY_GPU_TEMPERATURE:
+ case SI_QUERY_BUFFER_WAIT_TIME:
+ case SI_QUERY_GPU_TEMPERATURE:
result->u64 /= 1000;
break;
- case R600_QUERY_CURRENT_GPU_SCLK:
- case R600_QUERY_CURRENT_GPU_MCLK:
+ case SI_QUERY_CURRENT_GPU_SCLK:
+ case SI_QUERY_CURRENT_GPU_MCLK:
result->u64 *= 1000000;
break;
}
@@ -656,7 +656,7 @@ static struct pipe_query *si_query_hw_create(struct si_screen *sscreen,
case PIPE_QUERY_TIMESTAMP:
query->result_size = 16;
query->num_cs_dw_end = 8 + si_gfx_write_fence_dwords(sscreen);
- query->flags = R600_QUERY_HW_FLAG_NO_START;
+ query->flags = SI_QUERY_HW_FLAG_NO_START;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
case PIPE_QUERY_PRIMITIVES_GENERATED:
@@ -899,7 +899,7 @@ static void si_query_hw_emit_stop(struct si_context *sctx,
return; // previous buffer allocation failure
/* The queries which need begin already called this in begin_query. */
- if (query->flags & R600_QUERY_HW_FLAG_NO_START)
+ if (query->flags & SI_QUERY_HW_FLAG_NO_START)
si_need_gfx_cs_space(sctx);
/* emit end query */
@@ -909,7 +909,7 @@ static void si_query_hw_emit_stop(struct si_context *sctx,
query->buffer.results_end += query->result_size;
- if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
+ if (!(query->flags & SI_QUERY_HW_FLAG_NO_START))
sctx->b.num_cs_dw_queries_suspend -= query->num_cs_dw_end;
si_update_occlusion_query_state(sctx, query->b.type, -1);
@@ -1079,12 +1079,12 @@ bool si_query_hw_begin(struct si_context *sctx,
{
struct si_query_hw *query = (struct si_query_hw *)rquery;
- if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
+ if (query->flags & SI_QUERY_HW_FLAG_NO_START) {
assert(0);
return false;
}
- if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
+ if (!(query->flags & SI_QUERY_HW_FLAG_BEGIN_RESUMES))
si_query_hw_reset_buffers(sctx, query);
r600_resource_reference(&query->workaround_buf, NULL);
@@ -1110,12 +1110,12 @@ bool si_query_hw_end(struct si_context *sctx,
{
struct si_query_hw *query = (struct si_query_hw *)rquery;
- if (query->flags & R600_QUERY_HW_FLAG_NO_START)
+ if (query->flags & SI_QUERY_HW_FLAG_NO_START)
si_query_hw_reset_buffers(sctx, query);
si_query_hw_emit_stop(sctx, query);
- if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
+ if (!(query->flags & SI_QUERY_HW_FLAG_NO_START))
LIST_DELINIT(&query->list);
if (!query->buffer.buf)
@@ -1856,7 +1856,7 @@ void si_resume_queries(struct si_context *sctx)
#define XFULL(name_, query_type_, type_, result_type_, group_id_) \
{ \
.name = name_, \
- .query_type = R600_QUERY_##query_type_, \
+ .query_type = SI_QUERY_##query_type_, \
.type = PIPE_DRIVER_QUERY_TYPE_##type_, \
.result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
.group_id = group_id_ \
@@ -1866,7 +1866,7 @@ void si_resume_queries(struct si_context *sctx)
XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
#define XG(group_, name_, query_type_, type_, result_type_) \
- XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
+ XFULL(name_, query_type_, type_, result_type_, SI_QUERY_GROUP_##group_)
static struct pipe_driver_query_info si_driver_query_list[] = {
X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
@@ -1990,20 +1990,20 @@ static int si_get_driver_query_info(struct pipe_screen *screen,
*info = si_driver_query_list[index];
switch (info->query_type) {
- case R600_QUERY_REQUESTED_VRAM:
- case R600_QUERY_VRAM_USAGE:
- case R600_QUERY_MAPPED_VRAM:
+ case SI_QUERY_REQUESTED_VRAM:
+ case SI_QUERY_VRAM_USAGE:
+ case SI_QUERY_MAPPED_VRAM:
info->max_value.u64 = sscreen->info.vram_size;
break;
- case R600_QUERY_REQUESTED_GTT:
- case R600_QUERY_GTT_USAGE:
- case R600_QUERY_MAPPED_GTT:
+ case SI_QUERY_REQUESTED_GTT:
+ case SI_QUERY_GTT_USAGE:
+ case SI_QUERY_MAPPED_GTT:
info->max_value.u64 = sscreen->info.gart_size;
break;
- case R600_QUERY_GPU_TEMPERATURE:
+ case SI_QUERY_GPU_TEMPERATURE:
info->max_value.u64 = 125;
break;
- case R600_QUERY_VRAM_VIS_USAGE:
+ case SI_QUERY_VRAM_VIS_USAGE:
info->max_value.u64 = sscreen->info.vram_vis_size;
break;
}
@@ -2029,13 +2029,13 @@ static int si_get_driver_query_group_info(struct pipe_screen *screen,
num_pc_groups = sscreen->perfcounters->num_groups;
if (!info)
- return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
+ return num_pc_groups + SI_NUM_SW_QUERY_GROUPS;
if (index < num_pc_groups)
return si_get_perfcounter_group_info(sscreen, index, info);
index -= num_pc_groups;
- if (index >= R600_NUM_SW_QUERY_GROUPS)
+ if (index >= SI_NUM_SW_QUERY_GROUPS)
return 0;
info->name = "GPIN";
diff --git a/src/gallium/drivers/radeon/r600_query.h b/src/gallium/drivers/radeon/r600_query.h
index d862bed0624..3f60208e2f8 100644
--- a/src/gallium/drivers/radeon/r600_query.h
+++ b/src/gallium/drivers/radeon/r600_query.h
@@ -22,8 +22,8 @@
* SOFTWARE.
*/
-#ifndef R600_QUERY_H
-#define R600_QUERY_H
+#ifndef SI_QUERY_H
+#define SI_QUERY_H
#include "util/u_threaded_context.h"
@@ -38,84 +38,84 @@ struct si_query_hw;
struct r600_resource;
enum {
- R600_QUERY_DRAW_CALLS = PIPE_QUERY_DRIVER_SPECIFIC,
- R600_QUERY_DECOMPRESS_CALLS,
- R600_QUERY_MRT_DRAW_CALLS,
- R600_QUERY_PRIM_RESTART_CALLS,
- R600_QUERY_SPILL_DRAW_CALLS,
- R600_QUERY_COMPUTE_CALLS,
- R600_QUERY_SPILL_COMPUTE_CALLS,
- R600_QUERY_DMA_CALLS,
- R600_QUERY_CP_DMA_CALLS,
- R600_QUERY_NUM_VS_FLUSHES,
- R600_QUERY_NUM_PS_FLUSHES,
- R600_QUERY_NUM_CS_FLUSHES,
- R600_QUERY_NUM_CB_CACHE_FLUSHES,
- R600_QUERY_NUM_DB_CACHE_FLUSHES,
- R600_QUERY_NUM_L2_INVALIDATES,
- R600_QUERY_NUM_L2_WRITEBACKS,
- R600_QUERY_NUM_RESIDENT_HANDLES,
- R600_QUERY_TC_OFFLOADED_SLOTS,
- R600_QUERY_TC_DIRECT_SLOTS,
- R600_QUERY_TC_NUM_SYNCS,
- R600_QUERY_CS_THREAD_BUSY,
- R600_QUERY_GALLIUM_THREAD_BUSY,
- R600_QUERY_REQUESTED_VRAM,
- R600_QUERY_REQUESTED_GTT,
- R600_QUERY_MAPPED_VRAM,
- R600_QUERY_MAPPED_GTT,
- R600_QUERY_BUFFER_WAIT_TIME,
- R600_QUERY_NUM_MAPPED_BUFFERS,
- R600_QUERY_NUM_GFX_IBS,
- R600_QUERY_NUM_SDMA_IBS,
- R600_QUERY_GFX_BO_LIST_SIZE,
- R600_QUERY_GFX_IB_SIZE,
- R600_QUERY_NUM_BYTES_MOVED,
- R600_QUERY_NUM_EVICTIONS,
- R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS,
- R600_QUERY_VRAM_USAGE,
- R600_QUERY_VRAM_VIS_USAGE,
- R600_QUERY_GTT_USAGE,
- R600_QUERY_GPU_TEMPERATURE,
- R600_QUERY_CURRENT_GPU_SCLK,
- R600_QUERY_CURRENT_GPU_MCLK,
- R600_QUERY_GPU_LOAD,
- R600_QUERY_GPU_SHADERS_BUSY,
- R600_QUERY_GPU_TA_BUSY,
- R600_QUERY_GPU_GDS_BUSY,
- R600_QUERY_GPU_VGT_BUSY,
- R600_QUERY_GPU_IA_BUSY,
- R600_QUERY_GPU_SX_BUSY,
- R600_QUERY_GPU_WD_BUSY,
- R600_QUERY_GPU_BCI_BUSY,
- R600_QUERY_GPU_SC_BUSY,
- R600_QUERY_GPU_PA_BUSY,
- R600_QUERY_GPU_DB_BUSY,
- R600_QUERY_GPU_CP_BUSY,
- R600_QUERY_GPU_CB_BUSY,
- R600_QUERY_GPU_SDMA_BUSY,
- R600_QUERY_GPU_PFP_BUSY,
- R600_QUERY_GPU_MEQ_BUSY,
- R600_QUERY_GPU_ME_BUSY,
- R600_QUERY_GPU_SURF_SYNC_BUSY,
- R600_QUERY_GPU_CP_DMA_BUSY,
- R600_QUERY_GPU_SCRATCH_RAM_BUSY,
- R600_QUERY_NUM_COMPILATIONS,
- R600_QUERY_NUM_SHADERS_CREATED,
- R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO,
- R600_QUERY_NUM_SHADER_CACHE_HITS,
- R600_QUERY_GPIN_ASIC_ID,
- R600_QUERY_GPIN_NUM_SIMD,
- R600_QUERY_GPIN_NUM_RB,
- R600_QUERY_GPIN_NUM_SPI,
- R600_QUERY_GPIN_NUM_SE,
-
- R600_QUERY_FIRST_PERFCOUNTER = PIPE_QUERY_DRIVER_SPECIFIC + 100,
+ SI_QUERY_DRAW_CALLS = PIPE_QUERY_DRIVER_SPECIFIC,
+ SI_QUERY_DECOMPRESS_CALLS,
+ SI_QUERY_MRT_DRAW_CALLS,
+ SI_QUERY_PRIM_RESTART_CALLS,
+ SI_QUERY_SPILL_DRAW_CALLS,
+ SI_QUERY_COMPUTE_CALLS,
+ SI_QUERY_SPILL_COMPUTE_CALLS,
+ SI_QUERY_DMA_CALLS,
+ SI_QUERY_CP_DMA_CALLS,
+ SI_QUERY_NUM_VS_FLUSHES,
+ SI_QUERY_NUM_PS_FLUSHES,
+ SI_QUERY_NUM_CS_FLUSHES,
+ SI_QUERY_NUM_CB_CACHE_FLUSHES,
+ SI_QUERY_NUM_DB_CACHE_FLUSHES,
+ SI_QUERY_NUM_L2_INVALIDATES,
+ SI_QUERY_NUM_L2_WRITEBACKS,
+ SI_QUERY_NUM_RESIDENT_HANDLES,
+ SI_QUERY_TC_OFFLOADED_SLOTS,
+ SI_QUERY_TC_DIRECT_SLOTS,
+ SI_QUERY_TC_NUM_SYNCS,
+ SI_QUERY_CS_THREAD_BUSY,
+ SI_QUERY_GALLIUM_THREAD_BUSY,
+ SI_QUERY_REQUESTED_VRAM,
+ SI_QUERY_REQUESTED_GTT,
+ SI_QUERY_MAPPED_VRAM,
+ SI_QUERY_MAPPED_GTT,
+ SI_QUERY_BUFFER_WAIT_TIME,
+ SI_QUERY_NUM_MAPPED_BUFFERS,
+ SI_QUERY_NUM_GFX_IBS,
+ SI_QUERY_NUM_SDMA_IBS,
+ SI_QUERY_GFX_BO_LIST_SIZE,
+ SI_QUERY_GFX_IB_SIZE,
+ SI_QUERY_NUM_BYTES_MOVED,
+ SI_QUERY_NUM_EVICTIONS,
+ SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS,
+ SI_QUERY_VRAM_USAGE,
+ SI_QUERY_VRAM_VIS_USAGE,
+ SI_QUERY_GTT_USAGE,
+ SI_QUERY_GPU_TEMPERATURE,
+ SI_QUERY_CURRENT_GPU_SCLK,
+ SI_QUERY_CURRENT_GPU_MCLK,
+ SI_QUERY_GPU_LOAD,
+ SI_QUERY_GPU_SHADERS_BUSY,
+ SI_QUERY_GPU_TA_BUSY,
+ SI_QUERY_GPU_GDS_BUSY,
+ SI_QUERY_GPU_VGT_BUSY,
+ SI_QUERY_GPU_IA_BUSY,
+ SI_QUERY_GPU_SX_BUSY,
+ SI_QUERY_GPU_WD_BUSY,
+ SI_QUERY_GPU_BCI_BUSY,
+ SI_QUERY_GPU_SC_BUSY,
+ SI_QUERY_GPU_PA_BUSY,
+ SI_QUERY_GPU_DB_BUSY,
+ SI_QUERY_GPU_CP_BUSY,
+ SI_QUERY_GPU_CB_BUSY,
+ SI_QUERY_GPU_SDMA_BUSY,
+ SI_QUERY_GPU_PFP_BUSY,
+ SI_QUERY_GPU_MEQ_BUSY,
+ SI_QUERY_GPU_ME_BUSY,
+ SI_QUERY_GPU_SURF_SYNC_BUSY,
+ SI_QUERY_GPU_CP_DMA_BUSY,
+ SI_QUERY_GPU_SCRATCH_RAM_BUSY,
+ SI_QUERY_NUM_COMPILATIONS,
+ SI_QUERY_NUM_SHADERS_CREATED,
+ SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO,
+ SI_QUERY_NUM_SHADER_CACHE_HITS,
+ SI_QUERY_GPIN_ASIC_ID,
+ SI_QUERY_GPIN_NUM_SIMD,
+ SI_QUERY_GPIN_NUM_RB,
+ SI_QUERY_GPIN_NUM_SPI,
+ SI_QUERY_GPIN_NUM_SE,
+
+ SI_QUERY_FIRST_PERFCOUNTER = PIPE_QUERY_DRIVER_SPECIFIC + 100,
};
enum {
- R600_QUERY_GROUP_GPIN = 0,
- R600_NUM_SW_QUERY_GROUPS
+ SI_QUERY_GROUP_GPIN = 0,
+ SI_NUM_SW_QUERY_GROUPS
};
struct si_query_ops {
@@ -142,10 +142,10 @@ struct si_query {
};
enum {
- R600_QUERY_HW_FLAG_NO_START = (1 << 0),
+ SI_QUERY_HW_FLAG_NO_START = (1 << 0),
/* gap */
/* whether begin_query doesn't clear the result */
- R600_QUERY_HW_FLAG_BEGIN_RESUMES = (1 << 2),
+ SI_QUERY_HW_FLAG_BEGIN_RESUMES = (1 << 2),
};
struct si_query_hw_ops {
@@ -213,20 +213,20 @@ bool si_query_hw_get_result(struct si_context *sctx,
/* Performance counters */
enum {
/* This block is part of the shader engine */
- R600_PC_BLOCK_SE = (1 << 0),
+ SI_PC_BLOCK_SE = (1 << 0),
/* Expose per-instance groups instead of summing all instances (within
* an SE). */
- R600_PC_BLOCK_INSTANCE_GROUPS = (1 << 1),
+ SI_PC_BLOCK_INSTANCE_GROUPS = (1 << 1),
/* Expose per-SE groups instead of summing instances across SEs. */
- R600_PC_BLOCK_SE_GROUPS = (1 << 2),
+ SI_PC_BLOCK_SE_GROUPS = (1 << 2),
/* Shader block */
- R600_PC_BLOCK_SHADER = (1 << 3),
+ SI_PC_BLOCK_SHADER = (1 << 3),
/* Non-shader block with perfcounters windowed by shaders. */
- R600_PC_BLOCK_SHADER_WINDOWED = (1 << 4),
+ SI_PC_BLOCK_SHADER_WINDOWED = (1 << 4),
};
/* Describes a hardware block with performance counters. Multiple instances of
@@ -315,4 +315,4 @@ struct si_qbo_state {
struct pipe_shader_buffer saved_ssbo[3];
};
-#endif /* R600_QUERY_H */
+#endif /* SI_QUERY_H */
diff --git a/src/gallium/drivers/radeon/r600_texture.c b/src/gallium/drivers/radeon/r600_texture.c
index 163847eefcd..af226877e8f 100644
--- a/src/gallium/drivers/radeon/r600_texture.c
+++ b/src/gallium/drivers/radeon/r600_texture.c
@@ -2191,7 +2191,7 @@ vi_create_resuming_pipestats_query(struct si_context *sctx)
struct si_query_hw *query = (struct si_query_hw*)
sctx->b.b.create_query(&sctx->b.b, PIPE_QUERY_PIPELINE_STATISTICS, 0);
- query->flags |= R600_QUERY_HW_FLAG_BEGIN_RESUMES;
+ query->flags |= SI_QUERY_HW_FLAG_BEGIN_RESUMES;
return (struct pipe_query*)query;
}