summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2017-08-02 23:58:07 -0700
committerKenneth Graunke <[email protected]>2017-08-04 10:26:37 -0700
commit68d611ed8e62fd3b166c52bbfcb96730ae1459c0 (patch)
tree7c563faf74e46c79dcdf153b03717cec8eebce54
parent29ba502a4e28471f67e4e904ae503157087efd20 (diff)
i965: Simplify some bo != batch->bo special cases.
Extracted from a patch by Chris Wilson. Now that the batch is always at the front of the validation list, we don't need to special case it - the usual "go find an existing BO" code will work just fine.
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.c46
1 files changed, 19 insertions, 27 deletions
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index e9a30ef34ab..6f152e9b264 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -97,20 +97,19 @@ intel_batchbuffer_init(struct intel_batchbuffer *batch,
static unsigned
add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
- if (bo != batch->bo) {
- unsigned index = READ_ONCE(bo->index);
+ unsigned index = READ_ONCE(bo->index);
- if (index < batch->exec_count && batch->exec_bos[index] == bo)
- return index;
+ if (index < batch->exec_count && batch->exec_bos[index] == bo)
+ return index;
- /* May have been shared between multiple active batches */
- for (index = 0; index < batch->exec_count; index++) {
- if (batch->exec_bos[index] == bo)
- return index;
- }
+ /* May have been shared between multiple active batches */
+ for (index = 0; index < batch->exec_count; index++) {
+ if (batch->exec_bos[index] == bo)
+ return index;
+ }
+ if (bo != batch->bo)
brw_bo_reference(bo);
- }
if (batch->exec_count == batch->exec_array_size) {
batch->exec_array_size *= 2;
@@ -807,25 +806,18 @@ brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
assert(batch_offset <= BATCH_SZ - sizeof(uint32_t));
assert(_mesa_bitcount(write_domain) <= 1);
- uint64_t offset64;
- if (target != batch->bo) {
- unsigned int index = add_exec_bo(batch, target);
- struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
+ unsigned int index = add_exec_bo(batch, target);
+ struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
- if (write_domain) {
- entry->flags |= EXEC_OBJECT_WRITE;
+ if (write_domain) {
+ entry->flags |= EXEC_OBJECT_WRITE;
/* PIPECONTROL needs a w/a on gen6 */
- if (write_domain == I915_GEM_DOMAIN_INSTRUCTION) {
- struct brw_context *brw = container_of(batch, brw, batch);
- if (brw->gen == 6)
- entry->flags |= EXEC_OBJECT_NEEDS_GTT;
- }
+ if (write_domain == I915_GEM_DOMAIN_INSTRUCTION) {
+ struct brw_context *brw = container_of(batch, brw, batch);
+ if (brw->gen == 6)
+ entry->flags |= EXEC_OBJECT_NEEDS_GTT;
}
-
- offset64 = entry->offset;
- } else {
- offset64 = target->offset64;
}
batch->relocs[batch->reloc_count++] =
@@ -833,14 +825,14 @@ brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
.offset = batch_offset,
.delta = target_offset,
.target_handle = target->gem_handle,
- .presumed_offset = offset64,
+ .presumed_offset = entry->offset,
};
/* Using the old buffer offset, write in what the right data would be, in
* case the buffer doesn't move and we can short-circuit the relocation
* processing in the kernel
*/
- return offset64 + target_offset;
+ return entry->offset + target_offset;
}
void