summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/ilo/ilo_3d.c
diff options
context:
space:
mode:
authorChia-I Wu <[email protected]>2013-05-20 12:13:34 +0800
committerChia-I Wu <[email protected]>2013-05-21 11:47:20 +0800
commit0d42a9e9414a1c015e1ceced4d773a455e5b39d1 (patch)
tree14557a6a80f12047f813d2e7d610d9e70733a7bc /src/gallium/drivers/ilo/ilo_3d.c
parenta04d8574c61f286fd9ec51f667648f73e332462f (diff)
ilo: replace cp hooks by cp owner and flush callback
The problem with cp hooks is that when we switch from 3D ring to 2D ring, and when there are active queries, we will emit 3D commands to 2D ring because the new-batch hook is called. This commit introduces the idea of cp owner. When the cp is flushed, or when another owner takes place, the current owner is notified, giving it a chance to emit whatever commands there need to be. With this mechanism, we can resume queries when the 3D pipeline owns the cp, and pause queries when it loses the cp. Ring switch will just work. As we still need to know when the cp bo is reallocated, a flush callback is added.
Diffstat (limited to 'src/gallium/drivers/ilo/ilo_3d.c')
-rw-r--r--src/gallium/drivers/ilo/ilo_3d.c276
1 files changed, 147 insertions, 129 deletions
diff --git a/src/gallium/drivers/ilo/ilo_3d.c b/src/gallium/drivers/ilo/ilo_3d.c
index e03e530f90c..817cf89adfe 100644
--- a/src/gallium/drivers/ilo/ilo_3d.c
+++ b/src/gallium/drivers/ilo/ilo_3d.c
@@ -35,6 +35,135 @@
#include "ilo_state.h"
#include "ilo_3d.h"
+static void
+process_query_for_occlusion_counter(struct ilo_3d *hw3d,
+ struct ilo_query *q)
+{
+ uint64_t *vals, depth_count = 0;
+ int i;
+
+ /* in pairs */
+ assert(q->reg_read % 2 == 0);
+
+ q->bo->map(q->bo, false);
+ vals = q->bo->get_virtual(q->bo);
+ for (i = 1; i < q->reg_read; i += 2)
+ depth_count += vals[i] - vals[i - 1];
+ q->bo->unmap(q->bo);
+
+ /* accumulate so that the query can be resumed if wanted */
+ q->data.u64 += depth_count;
+ q->reg_read = 0;
+}
+
+static uint64_t
+timestamp_to_ns(uint64_t timestamp)
+{
+ /* see ilo_get_timestamp() */
+ return (timestamp & 0xffffffff) * 80;
+}
+
+static void
+process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
+{
+ uint64_t *vals, timestamp;
+
+ assert(q->reg_read == 1);
+
+ q->bo->map(q->bo, false);
+ vals = q->bo->get_virtual(q->bo);
+ timestamp = vals[0];
+ q->bo->unmap(q->bo);
+
+ q->data.u64 = timestamp_to_ns(timestamp);
+ q->reg_read = 0;
+}
+
+static void
+process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
+{
+ uint64_t *vals, elapsed = 0;
+ int i;
+
+ /* in pairs */
+ assert(q->reg_read % 2 == 0);
+
+ q->bo->map(q->bo, false);
+ vals = q->bo->get_virtual(q->bo);
+
+ for (i = 1; i < q->reg_read; i += 2)
+ elapsed += vals[i] - vals[i - 1];
+
+ q->bo->unmap(q->bo);
+
+ /* accumulate so that the query can be resumed if wanted */
+ q->data.u64 += timestamp_to_ns(elapsed);
+ q->reg_read = 0;
+}
+
+static void
+ilo_3d_resume_queries(struct ilo_3d *hw3d)
+{
+ struct ilo_query *q;
+
+ /* resume occlusion queries */
+ LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
+ /* accumulate the result if the bo is alreay full */
+ if (q->reg_read >= q->reg_total)
+ process_query_for_occlusion_counter(hw3d, q);
+
+ ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
+ q->bo, q->reg_read++);
+ }
+
+ /* resume timer queries */
+ LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
+ /* accumulate the result if the bo is alreay full */
+ if (q->reg_read >= q->reg_total)
+ process_query_for_time_elapsed(hw3d, q);
+
+ ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
+ q->bo, q->reg_read++);
+ }
+}
+
+static void
+ilo_3d_pause_queries(struct ilo_3d *hw3d)
+{
+ struct ilo_query *q;
+
+ /* pause occlusion queries */
+ LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
+ assert(q->reg_read < q->reg_total);
+ ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
+ q->bo, q->reg_read++);
+ }
+
+ /* pause timer queries */
+ LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
+ assert(q->reg_read < q->reg_total);
+ ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
+ q->bo, q->reg_read++);
+ }
+}
+
+static void
+ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
+{
+ struct ilo_3d *hw3d = data;
+
+ ilo_3d_pause_queries(hw3d);
+}
+
+static void
+ilo_3d_own_render_ring(struct ilo_3d *hw3d)
+{
+ ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
+
+ if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
+ ilo_3d_resume_queries(hw3d);
+}
+
/**
* Begin a query.
*/
@@ -43,14 +172,15 @@ ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
{
struct ilo_3d *hw3d = ilo->hw3d;
- ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
+ ilo_3d_own_render_ring(hw3d);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
- ilo_cp_reserve_for_pre_flush(hw3d->cp, q->reg_cmd_size);
+ hw3d->owner_reserve += q->reg_cmd_size;
+ ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
q->data.u64 = 0;
@@ -69,7 +199,8 @@ ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
- ilo_cp_reserve_for_pre_flush(hw3d->cp, q->reg_cmd_size);
+ hw3d->owner_reserve += q->reg_cmd_size;
+ ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
q->data.u64 = 0;
@@ -103,14 +234,15 @@ ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
{
struct ilo_3d *hw3d = ilo->hw3d;
- ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
+ ilo_3d_own_render_ring(hw3d);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
list_del(&q->list);
assert(q->reg_read < q->reg_total);
- ilo_cp_reserve_for_pre_flush(hw3d->cp, -q->reg_cmd_size);
+ hw3d->owner_reserve -= q->reg_cmd_size;
+ ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
break;
@@ -126,7 +258,8 @@ ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
list_del(&q->list);
assert(q->reg_read < q->reg_total);
- ilo_cp_reserve_for_pre_flush(hw3d->cp, -q->reg_cmd_size);
+ hw3d->owner_reserve -= q->reg_cmd_size;
+ ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
break;
@@ -140,72 +273,6 @@ ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
}
}
-static void
-process_query_for_occlusion_counter(struct ilo_3d *hw3d,
- struct ilo_query *q)
-{
- uint64_t *vals, depth_count = 0;
- int i;
-
- /* in pairs */
- assert(q->reg_read % 2 == 0);
-
- q->bo->map(q->bo, false);
- vals = q->bo->get_virtual(q->bo);
- for (i = 1; i < q->reg_read; i += 2)
- depth_count += vals[i] - vals[i - 1];
- q->bo->unmap(q->bo);
-
- /* accumulate so that the query can be resumed if wanted */
- q->data.u64 += depth_count;
- q->reg_read = 0;
-}
-
-static uint64_t
-timestamp_to_ns(uint64_t timestamp)
-{
- /* see ilo_get_timestamp() */
- return (timestamp & 0xffffffff) * 80;
-}
-
-static void
-process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
-{
- uint64_t *vals, timestamp;
-
- assert(q->reg_read == 1);
-
- q->bo->map(q->bo, false);
- vals = q->bo->get_virtual(q->bo);
- timestamp = vals[0];
- q->bo->unmap(q->bo);
-
- q->data.u64 = timestamp_to_ns(timestamp);
- q->reg_read = 0;
-}
-
-static void
-process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
-{
- uint64_t *vals, elapsed = 0;
- int i;
-
- /* in pairs */
- assert(q->reg_read % 2 == 0);
-
- q->bo->map(q->bo, false);
- vals = q->bo->get_virtual(q->bo);
-
- for (i = 1; i < q->reg_read; i += 2)
- elapsed += vals[i] - vals[i - 1];
-
- q->bo->unmap(q->bo);
-
- /* accumulate so that the query can be resumed if wanted */
- q->data.u64 += timestamp_to_ns(elapsed);
- q->reg_read = 0;
-}
-
/**
* Process the raw query data.
*/
@@ -240,11 +307,10 @@ ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
* Hook for CP new-batch.
*/
void
-ilo_3d_new_cp_batch(struct ilo_3d *hw3d)
+ilo_3d_cp_flushed(struct ilo_3d *hw3d)
{
- struct ilo_query *q;
-
- hw3d->new_batch = true;
+ if (ilo_debug & ILO_DEBUG_3D)
+ ilo_3d_pipeline_dump(hw3d->pipeline);
/* invalidate the pipeline */
ilo_3d_pipeline_invalidate(hw3d->pipeline,
@@ -255,58 +321,7 @@ ilo_3d_new_cp_batch(struct ilo_3d *hw3d)
ILO_3D_PIPELINE_INVALIDATE_HW);
}
- /* resume occlusion queries */
- LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
- /* accumulate the result if the bo is alreay full */
- if (q->reg_read >= q->reg_total)
- process_query_for_occlusion_counter(hw3d, q);
-
- ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
- q->bo, q->reg_read++);
- }
-
- /* resume timer queries */
- LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
- /* accumulate the result if the bo is alreay full */
- if (q->reg_read >= q->reg_total)
- process_query_for_time_elapsed(hw3d, q);
-
- ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
- q->bo, q->reg_read++);
- }
-}
-
-/**
- * Hook for CP pre-flush.
- */
-void
-ilo_3d_pre_cp_flush(struct ilo_3d *hw3d)
-{
- struct ilo_query *q;
-
- /* pause occlusion queries */
- LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
- assert(q->reg_read < q->reg_total);
- ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
- q->bo, q->reg_read++);
- }
-
- /* pause timer queries */
- LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
- assert(q->reg_read < q->reg_total);
- ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
- q->bo, q->reg_read++);
- }
-}
-
-/**
- * Hook for CP post-flush
- */
-void
-ilo_3d_post_cp_flush(struct ilo_3d *hw3d)
-{
- if (ilo_debug & ILO_DEBUG_3D)
- ilo_3d_pipeline_dump(hw3d->pipeline);
+ hw3d->new_batch = true;
}
/**
@@ -322,6 +337,9 @@ ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
return NULL;
hw3d->cp = cp;
+ hw3d->owner.release_callback = ilo_3d_release_render_ring;
+ hw3d->owner.release_data = hw3d;
+
hw3d->new_batch = true;
list_inithead(&hw3d->occlusion_queries);
@@ -356,7 +374,7 @@ draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
bool need_flush;
int max_len;
- ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
+ ilo_3d_own_render_ring(hw3d);
/*
* Without a better tracking mechanism, when the framebuffer changes, we