diff options
author | Rob Clark <[email protected]> | 2016-05-20 15:36:10 -0400 |
---|---|---|
committer | Rob Clark <[email protected]> | 2016-07-30 09:23:42 -0400 |
commit | 9bbd239a4039522d7c1023ecb21764679447bb2d (patch) | |
tree | de6633fb1936d05c929e34a88987c69884ed45f9 /src/gallium/drivers/freedreno/freedreno_context.h | |
parent | 12aec78993edface7f530eede9e018b5fa1897b7 (diff) |
freedreno: introduce fd_batch
Introduce the batch object, to track a batch/submit's worth of
ringbuffers and other bookkeeping. In this first step, just move
the ringbuffers into batch, since that is mostly uninteresting
churn.
For now there is just a single batch at a time. Note that one
outcome of this change is that rb's are allocated/freed on each
use. But the expectation is that the bo pool in libdrm_freedreno
will save us the GEM bo alloc/free which was the initial reason
to implement a rb pool in gallium.
The purpose of the batch is to eventually facilitate out-of-order
rendering, with batches associated to framebuffer state, and
tracking the dependencies on other batches.
Signed-off-by: Rob Clark <[email protected]>
Diffstat (limited to 'src/gallium/drivers/freedreno/freedreno_context.h')
-rw-r--r-- | src/gallium/drivers/freedreno/freedreno_context.h | 38 |
1 files changed, 12 insertions, 26 deletions
diff --git a/src/gallium/drivers/freedreno/freedreno_context.h b/src/gallium/drivers/freedreno/freedreno_context.h index 53b4e1dbaa7..cdf40146881 100644 --- a/src/gallium/drivers/freedreno/freedreno_context.h +++ b/src/gallium/drivers/freedreno/freedreno_context.h @@ -36,6 +36,7 @@ #include "util/u_slab.h" #include "util/u_string.h" +#include "freedreno_batch.h" #include "freedreno_screen.h" #include "freedreno_gmem.h" #include "freedreno_util.h" @@ -246,33 +247,19 @@ struct fd_context { uint64_t batch_total, batch_sysmem, batch_gmem, batch_restore; } stats; - /* we can't really sanely deal with wraparound point in ringbuffer - * and because of the way tiling works we can't really flush at - * arbitrary points (without a big performance hit). When we get - * too close to the end of the current ringbuffer, cycle to the next - * one (and wait for pending rendering from next rb to complete). - * We want the # of ringbuffers to be high enough that we don't - * normally have to wait before resetting to the start of the next - * rb. + /* TODO get rid of this.. only used in gmem/tiling code paths (and + * NULL the rest of the time). Just leaving for now to reduce some + * churn.. */ - struct fd_ringbuffer *rings[8]; - unsigned rings_idx; - - /* NOTE: currently using a single ringbuffer for both draw and - * tiling commands, we need to make sure we need to leave enough - * room at the end to append the tiling commands when we flush. - * 0x7000 dwords should be a couple times more than we ever need - * so should be a nice conservative threshold. - */ -#define FD_TILING_COMMANDS_DWORDS 0x7000 - - /* normal draw/clear cmds: */ struct fd_ringbuffer *ring; - struct fd_ringmarker *draw_start, *draw_end; - /* binning pass draw/clear cmds: */ - struct fd_ringbuffer *binning_ring; - struct fd_ringmarker *binning_start, *binning_end; + /* Current batch.. the rule here is that you can deref ctx->batch + * in codepaths from pipe_context entrypoints. But not in code- + * paths from fd_batch_flush() (basically, the stuff that gets + * called from GMEM code), since in those code-paths the batch + * you care about is not necessarily the same as ctx->batch. + */ + struct fd_batch *batch; /* Keep track if WAIT_FOR_IDLE is needed for registers we need * to update via RMW: @@ -400,8 +387,7 @@ struct fd_context { uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets); /* indirect-branch emit: */ - void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringmarker *start, - struct fd_ringmarker *end); + void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringbuffer *target); }; static inline struct fd_context * |