aboutsummaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri/i965/brw_performance_query.c
diff options
context:
space:
mode:
authorMark Janes <[email protected]>2019-06-28 18:16:07 -0700
committerMark Janes <[email protected]>2019-08-07 21:33:56 -0700
commit9f84efb452f810494e8ba78a68b56444e343e5f6 (patch)
tree4b47af7c22a763863b46cc4948e93e2d73ffba74 /src/mesa/drivers/dri/i965/brw_performance_query.c
parent73eccdc4a5d04196f5d437b285dabd10043b01f4 (diff)
intel/perf: move get_query_data into gen_perf
This refactor moves several helper functions for get_query_data as well: - accumulate_oa_reports - read_gt_frequency - get_pipeline_stats_data - get_oa_counter_data Functions which are no longer referenced in brw_performance_query.c have been removed. Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/mesa/drivers/dri/i965/brw_performance_query.c')
-rw-r--r--src/mesa/drivers/dri/i965/brw_performance_query.c389
1 files changed, 2 insertions, 387 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_performance_query.c b/src/mesa/drivers/dri/i965/brw_performance_query.c
index 45298e79112..dc4a833f563 100644
--- a/src/mesa/drivers/dri/i965/brw_performance_query.c
+++ b/src/mesa/drivers/dri/i965/brw_performance_query.c
@@ -244,246 +244,12 @@ brw_get_perf_counter_info(struct gl_context *ctx,
*raw_max = counter->raw_max;
}
-/**
- * Remove a query from the global list of unaccumulated queries once
- * after successfully accumulating the OA reports associated with the
- * query in accumulate_oa_reports() or when discarding unwanted query
- * results.
- */
-static void
-drop_from_unaccumulated_query_list(struct brw_context *brw,
- struct gen_perf_query_object *obj)
-{
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
- if (perf_ctx->unaccumulated[i] == obj) {
- int last_elt = --perf_ctx->unaccumulated_elements;
-
- if (i == last_elt)
- perf_ctx->unaccumulated[i] = NULL;
- else {
- perf_ctx->unaccumulated[i] =
- perf_ctx->unaccumulated[last_elt];
- }
-
- break;
- }
- }
-
- /* Drop our samples_head reference so that associated periodic
- * sample data buffers can potentially be reaped if they aren't
- * referenced by any other queries...
- */
-
- struct oa_sample_buf *buf =
- exec_node_data(struct oa_sample_buf, obj->oa.samples_head, link);
-
- assert(buf->refcount > 0);
- buf->refcount--;
-
- obj->oa.samples_head = NULL;
-
- gen_perf_reap_old_sample_buffers(&brw->perf_ctx);
-}
-
-/* In general if we see anything spurious while accumulating results,
- * we don't try and continue accumulating the current query, hoping
- * for the best, we scrap anything outstanding, and then hope for the
- * best with new queries.
- */
-static void
-discard_all_queries(struct brw_context *brw)
-{
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- while (perf_ctx->unaccumulated_elements) {
- struct gen_perf_query_object *obj = perf_ctx->unaccumulated[0];
-
- obj->oa.results_accumulated = true;
- drop_from_unaccumulated_query_list(brw, perf_ctx->unaccumulated[0]);
-
- gen_perf_dec_n_users(perf_ctx);
- }
-}
-
enum OaReadStatus {
OA_READ_STATUS_ERROR,
OA_READ_STATUS_UNFINISHED,
OA_READ_STATUS_FINISHED,
};
-/**
- * Accumulate raw OA counter values based on deltas between pairs of
- * OA reports.
- *
- * Accumulation starts from the first report captured via
- * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
- * last MI_RPC report requested by brw_end_perf_query(). Between these
- * two reports there may also some number of periodically sampled OA
- * reports collected via the i915 perf interface - depending on the
- * duration of the query.
- *
- * These periodic snapshots help to ensure we handle counter overflow
- * correctly by being frequent enough to ensure we don't miss multiple
- * overflows of a counter between snapshots. For Gen8+ the i915 perf
- * snapshots provide the extra context-switch reports that let us
- * subtract out the progress of counters associated with other
- * contexts running on the system.
- */
-static void
-accumulate_oa_reports(struct brw_context *brw,
- struct brw_perf_query_object *brw_query)
-{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
- struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- uint32_t *start;
- uint32_t *last;
- uint32_t *end;
- struct exec_node *first_samples_node;
- bool in_ctx = true;
- int out_duration = 0;
-
- assert(brw_query->base.Ready);
- assert(obj->oa.map != NULL);
-
- start = last = obj->oa.map;
- end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
-
- if (start[0] != obj->oa.begin_report_id) {
- DBG("Spurious start report id=%"PRIu32"\n", start[0]);
- goto error;
- }
- if (end[0] != (obj->oa.begin_report_id + 1)) {
- DBG("Spurious end report id=%"PRIu32"\n", end[0]);
- goto error;
- }
-
- /* See if we have any periodic reports to accumulate too... */
-
- /* N.B. The oa.samples_head was set when the query began and
- * pointed to the tail of the perf_ctx->sample_buffers list at
- * the time the query started. Since the buffer existed before the
- * first MI_REPORT_PERF_COUNT command was emitted we therefore know
- * that no data in this particular node's buffer can possibly be
- * associated with the query - so skip ahead one...
- */
- first_samples_node = obj->oa.samples_head->next;
-
- foreach_list_typed_from(struct oa_sample_buf, buf, link,
- &brw->perf_ctx.sample_buffers,
- first_samples_node)
- {
- int offset = 0;
-
- while (offset < buf->len) {
- const struct drm_i915_perf_record_header *header =
- (const struct drm_i915_perf_record_header *)(buf->buf + offset);
-
- assert(header->size != 0);
- assert(header->size <= buf->len);
-
- offset += header->size;
-
- switch (header->type) {
- case DRM_I915_PERF_RECORD_SAMPLE: {
- uint32_t *report = (uint32_t *)(header + 1);
- bool add = true;
-
- /* Ignore reports that come before the start marker.
- * (Note: takes care to allow overflow of 32bit timestamps)
- */
- if (gen_device_info_timebase_scale(devinfo,
- report[1] - start[1]) > 5000000000) {
- continue;
- }
-
- /* Ignore reports that come after the end marker.
- * (Note: takes care to allow overflow of 32bit timestamps)
- */
- if (gen_device_info_timebase_scale(devinfo,
- report[1] - end[1]) <= 5000000000) {
- goto end;
- }
-
- /* For Gen8+ since the counters continue while other
- * contexts are running we need to discount any unrelated
- * deltas. The hardware automatically generates a report
- * on context switch which gives us a new reference point
- * to continuing adding deltas from.
- *
- * For Haswell we can rely on the HW to stop the progress
- * of OA counters while any other context is acctive.
- */
- if (devinfo->gen >= 8) {
- if (in_ctx && report[2] != obj->oa.result.hw_id) {
- DBG("i915 perf: Switch AWAY (observed by ID change)\n");
- in_ctx = false;
- out_duration = 0;
- } else if (in_ctx == false && report[2] == obj->oa.result.hw_id) {
- DBG("i915 perf: Switch TO\n");
- in_ctx = true;
-
- /* From experimentation in IGT, we found that the OA unit
- * might label some report as "idle" (using an invalid
- * context ID), right after a report for a given context.
- * Deltas generated by those reports actually belong to the
- * previous context, even though they're not labelled as
- * such.
- *
- * We didn't *really* Switch AWAY in the case that we e.g.
- * saw a single periodic report while idle...
- */
- if (out_duration >= 1)
- add = false;
- } else if (in_ctx) {
- assert(report[2] == obj->oa.result.hw_id);
- DBG("i915 perf: Continuation IN\n");
- } else {
- assert(report[2] != obj->oa.result.hw_id);
- DBG("i915 perf: Continuation OUT\n");
- add = false;
- out_duration++;
- }
- }
-
- if (add) {
- gen_perf_query_result_accumulate(&obj->oa.result, obj->queryinfo,
- last, report);
- }
-
- last = report;
-
- break;
- }
-
- case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
- DBG("i915 perf: OA error: all reports lost\n");
- goto error;
- case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
- DBG("i915 perf: OA report lost\n");
- break;
- }
- }
- }
-
-end:
-
- gen_perf_query_result_accumulate(&obj->oa.result, obj->queryinfo,
- last, end);
-
- DBG("Marking %d accumulated - results gathered\n", brw_query->base.Id);
-
- obj->oa.results_accumulated = true;
- drop_from_unaccumulated_query_list(brw, obj);
- gen_perf_dec_n_users(perf_ctx);
-
- return;
-
-error:
-
- discard_all_queries(brw);
-}
-
/******************************************************************************/
static void
@@ -574,123 +340,6 @@ brw_is_perf_query_ready(struct gl_context *ctx,
return gen_perf_is_query_ready(&brw->perf_ctx, obj, &brw->batch);
}
-static void
-read_slice_unslice_frequencies(struct brw_context *brw,
- struct gen_perf_query_object *obj)
-{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
- uint32_t *begin_report = obj->oa.map, *end_report = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
-
- gen_perf_query_result_read_frequencies(&obj->oa.result,
- devinfo, begin_report, end_report);
-}
-
-static void
-read_gt_frequency(struct brw_context *brw,
- struct gen_perf_query_object *obj)
-{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
- uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
- end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
-
- switch (devinfo->gen) {
- case 7:
- case 8:
- obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
- obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
- break;
- case 9:
- case 10:
- case 11:
- obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
- obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
- break;
- default:
- unreachable("unexpected gen");
- }
-
- /* Put the numbers into Hz. */
- obj->oa.gt_frequency[0] *= 1000000ULL;
- obj->oa.gt_frequency[1] *= 1000000ULL;
-}
-
-static int
-get_oa_counter_data(struct brw_context *brw,
- struct gen_perf_query_object *obj,
- size_t data_size,
- uint8_t *data)
-{
- struct gen_perf_config *perf = brw->perf_ctx.perf;
- const struct gen_perf_query_info *query = obj->queryinfo;
- int n_counters = query->n_counters;
- int written = 0;
-
- for (int i = 0; i < n_counters; i++) {
- const struct gen_perf_query_counter *counter = &query->counters[i];
- uint64_t *out_uint64;
- float *out_float;
- size_t counter_size = gen_perf_query_counter_get_size(counter);
-
- if (counter_size) {
- switch (counter->data_type) {
- case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
- out_uint64 = (uint64_t *)(data + counter->offset);
- *out_uint64 =
- counter->oa_counter_read_uint64(perf, query,
- obj->oa.result.accumulator);
- break;
- case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
- out_float = (float *)(data + counter->offset);
- *out_float =
- counter->oa_counter_read_float(perf, query,
- obj->oa.result.accumulator);
- break;
- default:
- /* So far we aren't using uint32, double or bool32... */
- unreachable("unexpected counter data type");
- }
- written = counter->offset + counter_size;
- }
- }
-
- return written;
-}
-
-static int
-get_pipeline_stats_data(struct brw_context *brw,
- struct gen_perf_query_object *obj,
- size_t data_size,
- uint8_t *data)
-
-{
- const struct gen_perf_query_info *query = obj->queryinfo;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
- int n_counters = obj->queryinfo->n_counters;
- uint8_t *p = data;
-
- uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, obj->pipeline_stats.bo, MAP_READ);
- uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
-
- for (int i = 0; i < n_counters; i++) {
- const struct gen_perf_query_counter *counter = &query->counters[i];
- uint64_t value = end[i] - start[i];
-
- if (counter->pipeline_stat.numerator !=
- counter->pipeline_stat.denominator) {
- value *= counter->pipeline_stat.numerator;
- value /= counter->pipeline_stat.denominator;
- }
-
- *((uint64_t *)p) = value;
- p += 8;
- }
-
- perf_cfg->vtbl.bo_unmap(obj->pipeline_stats.bo);
-
- return p - data;
-}
-
/**
* Driver hook for glGetPerfQueryDataINTEL().
*/
@@ -704,7 +353,6 @@ brw_get_perf_query_data(struct gl_context *ctx,
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- int written = 0;
assert(brw_is_perf_query_ready(ctx, o));
@@ -718,41 +366,8 @@ brw_get_perf_query_data(struct gl_context *ctx,
*/
assert(o->Ready);
- switch (obj->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- if (!obj->oa.results_accumulated) {
- read_gt_frequency(brw, obj);
- read_slice_unslice_frequencies(brw, obj);
- accumulate_oa_reports(brw, brw_query);
- assert(obj->oa.results_accumulated);
-
- brw->perf_ctx.perf->vtbl.bo_unmap(obj->oa.bo);
- obj->oa.map = NULL;
- }
- if (obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) {
- written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
- } else {
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
-
- written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size,
- devinfo, &obj->oa.result,
- obj->oa.gt_frequency[0],
- obj->oa.gt_frequency[1]);
- }
- break;
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
-
- if (bytes_written)
- *bytes_written = written;
+ gen_perf_get_query_data(&brw->perf_ctx, obj,
+ data_size, data, bytes_written);
}
static struct gl_perf_query_object *