summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri/i965/intel_batchbuffer.c
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2017-08-02 23:40:50 -0700
committerKenneth Graunke <[email protected]>2017-08-04 10:26:37 -0700
commit29ba502a4e28471f67e4e904ae503157087efd20 (patch)
treec77f61ecc0c241d1d14e78902d8a978705c91ff3 /src/mesa/drivers/dri/i965/intel_batchbuffer.c
parente24f3fb7c84a6fa9445300347dbfa7da8a0dade8 (diff)
i965: Use I915_EXEC_BATCH_FIRST when available.
This will make it easier to use I915_EXEC_HANDLE_LUT. Based on a patch by Chris Wilson.
Diffstat (limited to 'src/mesa/drivers/dri/i965/intel_batchbuffer.c')
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.c37
1 files changed, 27 insertions, 10 deletions
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index 19a6c0edb7c..e9a30ef34ab 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -62,7 +62,7 @@ intel_batchbuffer_init(struct intel_batchbuffer *batch,
struct brw_bufmgr *bufmgr,
bool has_llc)
{
- intel_batchbuffer_reset(batch, bufmgr, has_llc);
+ struct brw_context *brw = container_of(batch, brw, batch);
if (!has_llc) {
batch->cpu_map = malloc(BATCH_SZ);
@@ -85,6 +85,11 @@ intel_batchbuffer_init(struct intel_batchbuffer *batch,
batch->state_batch_sizes =
_mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
}
+
+ batch->use_batch_first =
+ brw->screen->kernel_features & KERNEL_ALLOWS_EXEC_BATCH_FIRST;
+
+ intel_batchbuffer_reset(batch, bufmgr, has_llc);
}
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
@@ -120,13 +125,8 @@ add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
struct drm_i915_gem_exec_object2 *validation_entry =
&batch->validation_list[batch->exec_count];
validation_entry->handle = bo->gem_handle;
- if (bo == batch->bo) {
- validation_entry->relocation_count = batch->reloc_count;
- validation_entry->relocs_ptr = (uintptr_t) batch->relocs;
- } else {
- validation_entry->relocation_count = 0;
- validation_entry->relocs_ptr = 0;
- }
+ validation_entry->relocation_count = 0;
+ validation_entry->relocs_ptr = 0;
validation_entry->alignment = bo->align;
validation_entry->offset = bo->offset64;
validation_entry->flags = bo->kflags;
@@ -157,6 +157,9 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch,
}
batch->map_next = batch->map;
+ add_exec_bo(batch, batch->bo);
+ assert(batch->bo->index == 0);
+
batch->reserved_space = BATCH_RESERVED;
batch->state_batch_offset = batch->bo->size;
batch->needs_sol_reset = false;
@@ -662,8 +665,22 @@ do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
if (ret == 0) {
uint32_t hw_ctx = batch->ring == RENDER_RING ? brw->hw_ctx : 0;
- /* Add the batch itself to the end of the validation list */
- add_exec_bo(batch, batch->bo);
+ struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[0];
+ assert(entry->handle == batch->bo->gem_handle);
+ entry->relocation_count = batch->reloc_count;
+ entry->relocs_ptr = (uintptr_t) batch->relocs;
+
+ if (batch->use_batch_first) {
+ flags |= I915_EXEC_BATCH_FIRST;
+ } else {
+ /* Move the batch to the end of the validation list */
+ struct drm_i915_gem_exec_object2 tmp;
+ const unsigned index = batch->exec_count - 1;
+
+ tmp = *entry;
+ *entry = batch->validation_list[index];
+ batch->validation_list[index] = tmp;
+ }
ret = execbuffer(dri_screen->fd, batch, hw_ctx,
4 * USED_BATCH(*batch),