diff options
author | Brian <[email protected]> | 2007-11-06 10:14:53 -0700 |
---|---|---|
committer | Brian <[email protected]> | 2007-11-06 15:27:14 -0700 |
commit | 5e24e3c4266779704fc30737ac5e005ba71fd797 (patch) | |
tree | 8e3addffed2119eeea9ae1bf727c8809bd7fdc4f | |
parent | ea286d4df270897ca2a8f9e5e41b82cea419bdae (diff) |
code clean-up
-rw-r--r-- | src/mesa/drivers/dri/intel_winsys/intel_batchbuffer.c | 39 |
1 files changed, 12 insertions, 27 deletions
diff --git a/src/mesa/drivers/dri/intel_winsys/intel_batchbuffer.c b/src/mesa/drivers/dri/intel_winsys/intel_batchbuffer.c index fc9010f58a1..89d72c94483 100644 --- a/src/mesa/drivers/dri/intel_winsys/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/intel_winsys/intel_batchbuffer.c @@ -80,10 +80,10 @@ intel_dump_batchbuffer(uint offset, uint * ptr, uint count) printf("END BATCH\n\n\n"); } + void intel_batchbuffer_reset(struct intel_batchbuffer *batch) { - int i; if (batch->map) { @@ -91,11 +91,9 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch) batch->map = NULL; } - /* * Get a new, free batchbuffer. */ - batch->size = BATCH_SZ; driBOData(batch->buffer, batch->size, NULL, 0); @@ -104,7 +102,6 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch) /* * Unreference buffers previously on the relocation list. */ - for (i = 0; i < batch->nr_relocs; i++) { struct buffer_reloc *r = &batch->reloc[i]; driBOUnReference(r->buf); @@ -119,7 +116,6 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch) * while it's on the list. */ - driBOAddListItem(&batch->list, batch->buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE); @@ -129,6 +125,7 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch) batch->ptr = batch->map; } + /*====================================================================== * Public functions */ @@ -148,6 +145,7 @@ intel_batchbuffer_alloc(struct intel_context *intel) return batch; } + void intel_batchbuffer_free(struct intel_batchbuffer *batch) { @@ -167,13 +165,9 @@ intel_batchbuffer_free(struct intel_batchbuffer *batch) } - - static void intel_batch_ioctl(struct intel_context *intel, - uint start_offset, - uint used, - boolean allow_unlock) + uint start_offset, uint used, boolean allow_unlock) { drmI830BatchBuffer batch; @@ -198,18 +192,14 @@ intel_batch_ioctl(struct intel_context *intel, } - - /* TODO: Push this whole function into bufmgr. */ static void do_flush_locked(struct intel_batchbuffer *batch, - uint used, - boolean allow_unlock) + uint used, boolean allow_unlock) { uint *ptr; - uint i; - unsigned fenceFlags; + uint i, fenceFlags; struct _DriFenceObject *fo; driBOValidateList(batch->intel->driFd, &batch->list); @@ -221,7 +211,6 @@ do_flush_locked(struct intel_batchbuffer *batch, ptr = (uint *) driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, DRM_BO_HINT_ALLOW_UNFENCED_MAP); - for (i = 0; i < batch->nr_relocs; i++) { struct buffer_reloc *r = &batch->reloc[i]; @@ -242,26 +231,22 @@ do_flush_locked(struct intel_batchbuffer *batch, * Kernel fencing. The flags tells the kernel that we've * programmed an MI_FLUSH. */ - fenceFlags = DRM_I915_FENCE_FLAG_FLUSHED; - fo = driFenceBuffers(batch->intel->driFd, - "Batch fence", fenceFlags); + fo = driFenceBuffers(batch->intel->driFd, "Batch fence", fenceFlags); /* * User space fencing. */ - driBOFence(batch->buffer, fo); if (driFenceType(fo) == DRM_FENCE_TYPE_EXE) { - /* * Oops. We only validated a batch buffer. This means we * didn't do any proper rendering. Discard this fence object. */ - driFenceUnReference(fo); - } else { + } + else { driFenceUnReference(batch->last_fence); batch->last_fence = fo; for (i = 0; i < batch->nr_relocs; i++) { @@ -277,12 +262,12 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch) { struct intel_context *intel = batch->intel; uint used = batch->ptr - batch->map; - boolean was_locked = intel->locked; + const boolean was_locked = intel->locked; if (used == 0) return batch->last_fence; -#define MI_FLUSH ((0<<29)|(4<<23)) +#define MI_FLUSH ((0 << 29) | (4 << 23)) /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a * performance drain that we would like to avoid. @@ -320,6 +305,7 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch) return batch->last_fence; } + void intel_batchbuffer_finish(struct intel_batchbuffer *batch) { @@ -355,7 +341,6 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, } - void intel_batchbuffer_data(struct intel_batchbuffer *batch, const void *data, uint bytes, uint flags) |