aboutsummaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri')
-rw-r--r--src/mesa/drivers/dri/Makefile.template1
-rw-r--r--src/mesa/drivers/dri/common/dri_bufmgr.c28
-rw-r--r--src/mesa/drivers/dri/common/dri_bufmgr.h40
-rw-r--r--src/mesa/drivers/dri/common/dri_bufmgr_fake.c136
-rw-r--r--src/mesa/drivers/dri/common/dri_bufmgr_ttm.c469
-rw-r--r--src/mesa/drivers/dri/i915/Makefile3
-rw-r--r--src/mesa/drivers/dri/i915/intel_batchbuffer.c128
-rw-r--r--src/mesa/drivers/dri/i915/intel_batchbuffer.h15
-rw-r--r--src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c853
-rw-r--r--src/mesa/drivers/dri/i915/intel_bufmgr_ttm.h17
-rw-r--r--src/mesa/drivers/dri/i915/intel_ioctl.c58
-rw-r--r--src/mesa/drivers/dri/i915/intel_ioctl.h4
-rw-r--r--src/mesa/drivers/dri/i915/intel_regions.c4
-rw-r--r--src/mesa/drivers/dri/i915/intel_screen.c12
-rw-r--r--src/mesa/drivers/dri/i965/intel_context.c2
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_layout.c1
-rw-r--r--src/mesa/drivers/dri/nouveau/nouveau_context.c36
-rw-r--r--src/mesa/drivers/dri/nouveau/nouveau_context.h6
-rw-r--r--src/mesa/drivers/dri/nouveau/nouveau_driver.c70
-rw-r--r--src/mesa/drivers/dri/nouveau/nv10_state.c86
-rw-r--r--src/mesa/drivers/dri/r300/r300_fragprog.c6
21 files changed, 1249 insertions, 726 deletions
diff --git a/src/mesa/drivers/dri/Makefile.template b/src/mesa/drivers/dri/Makefile.template
index 3abce004c92..6ed6fc15b54 100644
--- a/src/mesa/drivers/dri/Makefile.template
+++ b/src/mesa/drivers/dri/Makefile.template
@@ -13,7 +13,6 @@ COMMON_SOURCES = \
COMMON_BM_SOURCES = \
../common/dri_bufmgr.c \
- ../common/dri_bufmgr_ttm.c \
../common/dri_bufmgr_fake.c
diff --git a/src/mesa/drivers/dri/common/dri_bufmgr.c b/src/mesa/drivers/dri/common/dri_bufmgr.c
index 407409bf06d..83886480dd3 100644
--- a/src/mesa/drivers/dri/common/dri_bufmgr.c
+++ b/src/mesa/drivers/dri/common/dri_bufmgr.c
@@ -88,18 +88,6 @@ dri_bo_unmap(dri_bo *buf)
return buf->bufmgr->bo_unmap(buf);
}
-int
-dri_bo_validate(dri_bo *buf, unsigned int flags)
-{
- return buf->bufmgr->bo_validate(buf, flags);
-}
-
-dri_fence *
-dri_fence_validated(dri_bufmgr *bufmgr, const char *name, GLboolean flushed)
-{
- return bufmgr->fence_validated(bufmgr, name, flushed);
-}
-
void
dri_fence_wait(dri_fence *fence)
{
@@ -150,3 +138,19 @@ dri_bufmgr_destroy(dri_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
}
+
+
+void dri_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset, dri_bo *relocatee)
+{
+ batch_buf->bufmgr->emit_reloc(batch_buf, flags, delta, offset, relocatee);
+}
+
+void *dri_process_relocs(dri_bo *batch_buf, GLuint *count)
+{
+ return batch_buf->bufmgr->process_relocs(batch_buf, count);
+}
+
+void dri_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
+{
+ batch_buf->bufmgr->post_submit(batch_buf, last_fence);
+}
diff --git a/src/mesa/drivers/dri/common/dri_bufmgr.h b/src/mesa/drivers/dri/common/dri_bufmgr.h
index 3be342926f7..7dbb558949a 100644
--- a/src/mesa/drivers/dri/common/dri_bufmgr.h
+++ b/src/mesa/drivers/dri/common/dri_bufmgr.h
@@ -116,30 +116,6 @@ struct _dri_bufmgr {
/** Reduces the refcount on the userspace mapping of the buffer object. */
int (*bo_unmap)(dri_bo *buf);
- /**
- * Makes the buffer accessible to the graphics chip.
- *
- * The resulting offset of the buffer within the graphics aperture is then
- * available at buf->offset until the buffer is fenced.
- *
- * Flags should consist of the memory types that the buffer may be validated
- * into and the read/write/exe flags appropriate to the use of the buffer.
- */
- int (*bo_validate)(dri_bo *buf, unsigned int flags);
-
- /**
- * Associates the current set of validated buffers with a fence.
- *
- * Once fenced, the buffer manager will allow the validated buffers to be
- * evicted when the graphics device's execution has passed the fence
- * command.
- *
- * The fence object will have flags for the sum of the read/write/exe flags
- * of the validated buffers associated with it.
- */
- dri_fence * (*fence_validated)(dri_bufmgr *bufmgr, const char *name,
- GLboolean flushed);
-
/** Takes a reference on a fence object */
void (*fence_reference)(dri_fence *fence);
@@ -158,6 +134,15 @@ struct _dri_bufmgr {
* Tears down the buffer manager instance.
*/
void (*destroy)(dri_bufmgr *bufmgr);
+
+ /**
+ * Add relocation
+ */
+ void (*emit_reloc)(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset, dri_bo *relocatee);
+
+ void *(*process_relocs)(dri_bo *batch_buf, GLuint *count);
+
+ void (*post_submit)(dri_bo *batch_buf, dri_fence **fence);
};
dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
@@ -169,9 +154,6 @@ void dri_bo_reference(dri_bo *bo);
void dri_bo_unreference(dri_bo *bo);
int dri_bo_map(dri_bo *buf, GLboolean write_enable);
int dri_bo_unmap(dri_bo *buf);
-int dri_bo_validate(dri_bo *buf, unsigned int flags);
-dri_fence *dri_fence_validated(dri_bufmgr *bufmgr, const char *name,
- GLboolean flushed);
void dri_fence_wait(dri_fence *fence);
void dri_fence_reference(dri_fence *fence);
void dri_fence_unreference(dri_fence *fence);
@@ -195,4 +177,8 @@ void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
dri_bo *dri_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
unsigned int handle);
+void dri_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset, dri_bo *relocatee);
+void *dri_process_relocs(dri_bo *batch_buf, uint32_t *count);
+void dri_post_process_relocs(dri_bo *batch_buf);
+void dri_post_submit(dri_bo *batch_buf, dri_fence **last_fence);
#endif
diff --git a/src/mesa/drivers/dri/common/dri_bufmgr_fake.c b/src/mesa/drivers/dri/common/dri_bufmgr_fake.c
index e0d23a36477..bda45d921c9 100644
--- a/src/mesa/drivers/dri/common/dri_bufmgr_fake.c
+++ b/src/mesa/drivers/dri/common/dri_bufmgr_fake.c
@@ -59,6 +59,16 @@
* processed through the command queue wouldn't need to care about
* fences.
*/
+#define MAX_RELOCS 4096
+
+struct fake_buffer_reloc
+{
+ dri_bo *buf;
+ GLuint offset;
+ GLuint delta; /* not needed? */
+ GLuint validate_flags;
+};
+
struct block {
struct block *next, *prev;
struct mem_block *mem; /* BM_MEM_AGP */
@@ -107,6 +117,12 @@ typedef struct _bufmgr_fake {
int (*fence_wait)(void *private, unsigned int fence_cookie);
/** Driver-supplied argument to driver callbacks */
void *driver_priv;
+
+
+ /** fake relocation list */
+ struct fake_buffer_reloc reloc[MAX_RELOCS];
+ GLuint nr_relocs;
+ GLboolean performed_rendering;
} dri_bufmgr_fake;
typedef struct _dri_bo_fake {
@@ -837,6 +853,120 @@ dri_fake_destroy(dri_bufmgr *bufmgr)
free(bufmgr);
}
+static void
+dri_fake_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset,
+ dri_bo *relocatee)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
+ struct fake_buffer_reloc *r = &bufmgr_fake->reloc[bufmgr_fake->nr_relocs++];
+
+ assert(bufmgr_fake->nr_relocs <= MAX_RELOCS);
+
+ dri_bo_reference(relocatee);
+
+ r->buf = relocatee;
+ r->offset = offset;
+ r->delta = delta;
+ r->validate_flags = flags;
+
+ return;
+}
+
+
+static int
+relocation_sort(const void *a_in, const void *b_in) {
+ const struct fake_buffer_reloc *a = a_in, *b = b_in;
+
+ return (intptr_t)a->buf < (intptr_t)b->buf ? -1 : 1;
+}
+
+static void *
+dri_fake_process_relocs(dri_bo *batch_buf, GLuint *count_p)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
+ GLuint i;
+ GLuint *ptr;
+ GLuint count = 0;
+
+ assert(batch_buf->virtual != NULL);
+ ptr = batch_buf->virtual;
+
+ bufmgr_fake->performed_rendering = GL_FALSE;
+
+ /* Sort our relocation list in terms of referenced buffer pointer.
+ * This lets us uniquely validate the buffers with the sum of all the flags,
+ * while avoiding O(n^2) on number of relocations.
+ */
+ qsort(bufmgr_fake->reloc, bufmgr_fake->nr_relocs, sizeof(bufmgr_fake->reloc[0]),
+ relocation_sort);
+
+ /* Perform the necessary validations of buffers, and enter the relocations
+ * in the batchbuffer.
+ */
+ for (i = 0; i < bufmgr_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bufmgr_fake->reloc[i];
+
+ if (r->validate_flags & DRM_BO_FLAG_WRITE)
+ bufmgr_fake->performed_rendering = GL_TRUE;
+
+ /* If this is the first time we've seen this buffer in the relocation
+ * list, figure out our flags and validate it.
+ */
+ if (i == 0 || bufmgr_fake->reloc[i - 1].buf != r->buf) {
+ uint32_t validate_flags;
+ int j, ret;
+
+ /* Accumulate the flags we need for validating this buffer. */
+ validate_flags = r->validate_flags;
+ for (j = i + 1; j < bufmgr_fake->nr_relocs; j++) {
+ if (bufmgr_fake->reloc[j].buf != r->buf)
+ break;
+ validate_flags |= bufmgr_fake->reloc[j].validate_flags;
+ }
+
+ /* Validate. If we fail, fence to clear the unfenced list and bail
+ * out.
+ */
+ ret = dri_fake_bo_validate(r->buf, validate_flags);
+ if (ret != 0) {
+ dri_fence *fo;
+ dri_bo_unmap(batch_buf);
+ fo = dri_fake_fence_validated(batch_buf->bufmgr,
+ "batchbuffer failure fence", GL_TRUE);
+ dri_fence_unreference(fo);
+ goto done;
+ }
+ count++;
+ }
+ ptr[r->offset / 4] = r->buf->offset + r->delta;
+ dri_bo_unreference(r->buf);
+ }
+ dri_bo_unmap(batch_buf);
+
+ dri_fake_bo_validate(batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE);
+
+ *count_p = count;
+ bufmgr_fake->nr_relocs = 0;
+ done:
+ return NULL;
+}
+
+static void
+dri_fake_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
+ dri_fence *fo;
+
+ fo = dri_fake_fence_validated(batch_buf->bufmgr, "Batch fence", GL_TRUE);
+
+ if (bufmgr_fake->performed_rendering) {
+ dri_fence_unreference(*last_fence);
+ *last_fence = fo;
+ } else {
+ dri_fence_unreference(fo);
+ }
+}
+
dri_bufmgr *
dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
unsigned long size,
@@ -867,13 +997,13 @@ dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
- bufmgr_fake->bufmgr.bo_validate = dri_fake_bo_validate;
- bufmgr_fake->bufmgr.fence_validated = dri_fake_fence_validated;
bufmgr_fake->bufmgr.fence_wait = dri_fake_fence_wait;
bufmgr_fake->bufmgr.fence_reference = dri_fake_fence_reference;
bufmgr_fake->bufmgr.fence_unreference = dri_fake_fence_unreference;
bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
-
+ bufmgr_fake->bufmgr.emit_reloc = dri_fake_emit_reloc;
+ bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
+ bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit;
bufmgr_fake->fence_emit = fence_emit;
bufmgr_fake->fence_wait = fence_wait;
bufmgr_fake->driver_priv = driver_priv;
diff --git a/src/mesa/drivers/dri/common/dri_bufmgr_ttm.c b/src/mesa/drivers/dri/common/dri_bufmgr_ttm.c
deleted file mode 100644
index 235398eb872..00000000000
--- a/src/mesa/drivers/dri/common/dri_bufmgr_ttm.c
+++ /dev/null
@@ -1,469 +0,0 @@
-/**************************************************************************
- *
- * Copyright � 2007 Intel Corporation
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
- * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- * Eric Anholt <[email protected]>
- */
-
-#include <xf86drm.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include "glthread.h"
-#include "errno.h"
-#include "mtypes.h"
-#include "dri_bufmgr.h"
-#include "string.h"
-#include "imports.h"
-
-#define BUFMGR_DEBUG 0
-
-typedef struct _dri_bufmgr_ttm {
- dri_bufmgr bufmgr;
-
- int fd;
- _glthread_Mutex mutex;
- unsigned int fence_type;
- unsigned int fence_type_flush;
-} dri_bufmgr_ttm;
-
-typedef struct _dri_bo_ttm {
- dri_bo bo;
-
- int refcount; /* Protected by bufmgr->mutex */
- drmBO drm_bo;
- const char *name;
- /**
- * Note whether we are the owner of the buffer, to determine if we must
- * drmBODestroy or drmBOUnreference to unreference the buffer.
- */
- GLboolean owner;
-} dri_bo_ttm;
-
-typedef struct _dri_fence_ttm
-{
- dri_fence fence;
-
- int refcount; /* Protected by bufmgr->mutex */
- const char *name;
- drmFence drm_fence;
-} dri_fence_ttm;
-
-#if 0
-int
-driFenceSignaled(DriFenceObject * fence, unsigned type)
-{
- int signaled;
- int ret;
-
- if (fence == NULL)
- return GL_TRUE;
-
- _glthread_LOCK_MUTEX(fence->mutex);
- ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
- _glthread_UNLOCK_MUTEX(fence->mutex);
- BM_CKFATAL(ret);
- return signaled;
-}
-#endif
-
-static dri_bo *
-dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name,
- unsigned long size, unsigned int alignment,
- unsigned int location_mask)
-{
- dri_bufmgr_ttm *ttm_bufmgr;
- dri_bo_ttm *ttm_buf;
- unsigned int pageSize = getpagesize();
- int ret;
- unsigned int flags, hint;
-
- ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
-
- ttm_buf = malloc(sizeof(*ttm_buf));
- if (!ttm_buf)
- return NULL;
-
- /* The mask argument doesn't do anything for us that we want other than
- * determine which pool (TTM or local) the buffer is allocated into, so just
- * pass all of the allocation class flags.
- */
- flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_EXE;
- /* No hints we want to use. */
- hint = 0;
-
- ret = drmBOCreate(ttm_bufmgr->fd, 0, size, alignment / pageSize,
- NULL, drm_bo_type_dc,
- flags, hint, &ttm_buf->drm_bo);
- if (ret != 0) {
- free(ttm_buf);
- return NULL;
- }
- ttm_buf->bo.size = ttm_buf->drm_bo.size;
- ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
- ttm_buf->bo.virtual = NULL;
- ttm_buf->bo.bufmgr = bufmgr;
- ttm_buf->name = name;
- ttm_buf->refcount = 1;
- ttm_buf->owner = GL_TRUE;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_create: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-#endif
-
- return &ttm_buf->bo;
-}
-
-/* Our TTM backend doesn't allow creation of static buffers, as that requires
- * privelege for the non-fake case, and the lock in the fake case where we were
- * working around the X Server not creating buffers and passing handles to us.
- */
-static dri_bo *
-dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name,
- unsigned long offset, unsigned long size, void *virtual,
- unsigned int location_mask)
-{
- return NULL;
-}
-
-/** Returns a dri_bo wrapping the given buffer object handle.
- *
- * This can be used when one application needs to pass a buffer object
- * to another.
- */
-dri_bo *
-dri_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
- unsigned int handle)
-{
- dri_bufmgr_ttm *ttm_bufmgr;
- dri_bo_ttm *ttm_buf;
- int ret;
-
- ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
-
- ttm_buf = malloc(sizeof(*ttm_buf));
- if (!ttm_buf)
- return NULL;
-
- ret = drmBOReference(ttm_bufmgr->fd, handle, &ttm_buf->drm_bo);
- if (ret != 0) {
- free(ttm_buf);
- return NULL;
- }
- ttm_buf->bo.size = ttm_buf->drm_bo.size;
- ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
- ttm_buf->bo.virtual = NULL;
- ttm_buf->bo.bufmgr = bufmgr;
- ttm_buf->name = name;
- ttm_buf->refcount = 1;
- ttm_buf->owner = GL_FALSE;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_create_from_handle: %p (%s)\n", &ttm_buf->bo,
- ttm_buf->name);
-#endif
-
- return &ttm_buf->bo;
-}
-
-static void
-dri_ttm_bo_reference(dri_bo *buf)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
-
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- ttm_buf->refcount++;
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
-}
-
-static void
-dri_ttm_bo_unreference(dri_bo *buf)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
-
- if (!buf)
- return;
-
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- if (--ttm_buf->refcount == 0) {
- int ret;
-
- /* XXX Having to use drmBODestroy as the opposite of drmBOCreate instead
- * of simply unreferencing is madness, and leads to behaviors we may not
- * want (making the buffer unsharable).
- */
- if (ttm_buf->owner)
- ret = drmBODestroy(bufmgr_ttm->fd, &ttm_buf->drm_bo);
- else
- ret = drmBOUnReference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
- if (ret != 0) {
- fprintf(stderr, "drmBOUnReference failed (%s): %s\n", ttm_buf->name,
- strerror(-ret));
- }
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_unreference final: %p (%s)\n",
- &ttm_buf->bo, ttm_buf->name);
-#endif
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
- free(buf);
- return;
- }
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
-}
-
-static int
-dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- unsigned int flags;
-
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
-
- flags = DRM_BO_FLAG_READ;
- if (write_enable)
- flags |= DRM_BO_FLAG_WRITE;
-
- assert(buf->virtual == NULL);
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-#endif
-
- return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
-}
-
-static int
-dri_ttm_bo_unmap(dri_bo *buf)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
-
- if (buf == NULL)
- return 0;
-
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
-
- assert(buf->virtual != NULL);
-
- buf->virtual = NULL;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-#endif
-
- return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
-}
-
-static int
-dri_ttm_validate(dri_bo *buf, unsigned int flags)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- unsigned int mask;
- int err;
-
- /* XXX: Sanity-check whether we've already validated this one under
- * different flags. See drmAddValidateItem().
- */
-
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
-
- /* Calculate the appropriate mask to pass to the DRM. There appears to be
- * be a direct relationship to flags, so it's unnecessary to have it passed
- * in as an argument.
- */
- mask = DRM_BO_MASK_MEM;
- mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
-
- err = drmBOValidate(bufmgr_ttm->fd, &ttm_buf->drm_bo, 0, flags, mask, 0);
-
- if (err == 0) {
- /* XXX: add to fence list for sanity checking */
- } else {
- fprintf(stderr, "failed to validate buffer (%s): %s\n",
- ttm_buf->name, strerror(-err));
- }
-
- buf->offset = ttm_buf->drm_bo.offset;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_validate: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-#endif
-
- return err;
-}
-
-static dri_fence *
-dri_ttm_fence_validated(dri_bufmgr *bufmgr, const char *name,
- GLboolean flushed)
-{
- dri_fence_ttm *fence_ttm = malloc(sizeof(*fence_ttm));
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
- int ret;
- unsigned int type;
-
- if (!fence_ttm)
- return NULL;
-
- if (flushed)
- type = bufmgr_ttm->fence_type_flush;
- else
- type = bufmgr_ttm->fence_type;
-
- fence_ttm->refcount = 1;
- fence_ttm->name = name;
- fence_ttm->fence.bufmgr = bufmgr;
- ret = drmFenceBuffers(bufmgr_ttm->fd, type, 0, &fence_ttm->drm_fence);
- if (ret) {
- fprintf(stderr, "failed to fence (%s): %s\n", name, strerror(-ret));
- free(fence_ttm);
- return NULL;
- }
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "fence_validated: %p (%s)\n", &fence_ttm->fence,
- fence_ttm->name);
-#endif
-
- return &fence_ttm->fence;
-}
-
-static void
-dri_ttm_fence_reference(dri_fence *fence)
-{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
-
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- ++fence_ttm->refcount;
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
-}
-
-static void
-dri_ttm_fence_unreference(dri_fence *fence)
-{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
-
- if (!fence)
- return;
-
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- if (--fence_ttm->refcount == 0) {
- int ret;
-
- /* XXX Having to use drmFenceDestroy as the opposite of drmFenceBuffers
- * instead of simply unreferencing is madness, and leads to behaviors we
- * may not want (making the fence unsharable). This behavior by the DRM
- * ioctls should be fixed, and drmFenceDestroy eliminated.
- */
- ret = drmFenceDestroy(bufmgr_ttm->fd, &fence_ttm->drm_fence);
- if (ret != 0) {
- fprintf(stderr, "drmFenceDestroy failed (%s): %s\n",
- fence_ttm->name, strerror(-ret));
- }
-
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
- free(fence);
- return;
- }
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
-}
-
-static void
-dri_ttm_fence_wait(dri_fence *fence)
-{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
- int ret;
-
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- ret = drmFenceWait(bufmgr_ttm->fd, 0, &fence_ttm->drm_fence, 0);
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
- if (ret != 0) {
- _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
- __FILE__, __LINE__, ret, fence_ttm->name);
- abort();
- }
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "fence_wait: %p (%s)\n", &fence_ttm->fence,
- fence_ttm->name);
-#endif
-}
-
-static void
-dri_bufmgr_ttm_destroy(dri_bufmgr *bufmgr)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
-
- _glthread_DESTROY_MUTEX(bufmgr_ttm->mutex);
- free(bufmgr);
-}
-
-/**
- * Initializes the TTM buffer manager, which uses the kernel to allocate, map,
- * and manage map buffer objections.
- *
- * \param fd File descriptor of the opened DRM device.
- * \param fence_type Driver-specific fence type used for fences with no flush.
- * \param fence_type_flush Driver-specific fence type used for fences with a
- * flush.
- */
-dri_bufmgr *
-dri_bufmgr_ttm_init(int fd, unsigned int fence_type,
- unsigned int fence_type_flush)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
-
- bufmgr_ttm = malloc(sizeof(*bufmgr_ttm));
- bufmgr_ttm->fd = fd;
- bufmgr_ttm->fence_type = fence_type;
- bufmgr_ttm->fence_type_flush = fence_type_flush;
- _glthread_INIT_MUTEX(bufmgr_ttm->mutex);
-
- bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
- bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
- bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
- bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
- bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
- bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
- bufmgr_ttm->bufmgr.bo_validate = dri_ttm_validate;
- bufmgr_ttm->bufmgr.fence_validated = dri_ttm_fence_validated;
- bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
- bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
- bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
- bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
-
- return &bufmgr_ttm->bufmgr;
-}
diff --git a/src/mesa/drivers/dri/i915/Makefile b/src/mesa/drivers/dri/i915/Makefile
index 38e40902111..b9328a48f3d 100644
--- a/src/mesa/drivers/dri/i915/Makefile
+++ b/src/mesa/drivers/dri/i915/Makefile
@@ -52,7 +52,8 @@ DRIVER_SOURCES = \
intel_state.c \
intel_tris.c \
intel_fbo.c \
- intel_depthstencil.c
+ intel_depthstencil.c \
+ intel_bufmgr_ttm.c
C_SOURCES = \
$(COMMON_SOURCES) \
diff --git a/src/mesa/drivers/dri/i915/intel_batchbuffer.c b/src/mesa/drivers/dri/i915/intel_batchbuffer.c
index 639457d44ab..74c75a3769b 100644
--- a/src/mesa/drivers/dri/i915/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i915/intel_batchbuffer.c
@@ -116,85 +116,23 @@ intel_batchbuffer_free(struct intel_batchbuffer *batch)
free(batch);
}
-static int
-relocation_sort(const void *a_in, const void *b_in) {
- const struct buffer_reloc *a = a_in, *b = b_in;
-
- return (intptr_t)a->buf < (intptr_t)b->buf ? -1 : 1;
-}
/* TODO: Push this whole function into bufmgr.
*/
static void
do_flush_locked(struct intel_batchbuffer *batch,
- GLuint used,
- GLboolean ignore_cliprects, GLboolean allow_unlock)
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock)
{
- GLuint *ptr;
- GLuint i;
struct intel_context *intel = batch->intel;
- dri_fence *fo;
- GLboolean performed_rendering = GL_FALSE;
-
- assert(batch->buf->virtual != NULL);
- ptr = batch->buf->virtual;
-
- /* Sort our relocation list in terms of referenced buffer pointer.
- * This lets us uniquely validate the buffers with the sum of all the flags,
- * while avoiding O(n^2) on number of relocations.
- */
- qsort(batch->reloc, batch->nr_relocs, sizeof(batch->reloc[0]),
- relocation_sort);
-
- /* Perform the necessary validations of buffers, and enter the relocations
- * in the batchbuffer.
- */
- for (i = 0; i < batch->nr_relocs; i++) {
- struct buffer_reloc *r = &batch->reloc[i];
-
- if (r->validate_flags & DRM_BO_FLAG_WRITE)
- performed_rendering = GL_TRUE;
+ void *start;
+ GLuint count;
- /* If this is the first time we've seen this buffer in the relocation
- * list, figure out our flags and validate it.
- */
- if (i == 0 || batch->reloc[i - 1].buf != r->buf) {
- uint32_t validate_flags;
- int j, ret;
+ start = dri_process_relocs(batch->buf, &count);
- /* Accumulate the flags we need for validating this buffer. */
- validate_flags = r->validate_flags;
- for (j = i + 1; j < batch->nr_relocs; j++) {
- if (batch->reloc[j].buf != r->buf)
- break;
- validate_flags |= batch->reloc[j].validate_flags;
- }
-
- /* Validate. If we fail, fence to clear the unfenced list and bail
- * out.
- */
- ret = dri_bo_validate(r->buf, validate_flags);
- if (ret != 0) {
- dri_bo_unmap(batch->buf);
- fo = dri_fence_validated(intel->intelScreen->bufmgr,
- "batchbuffer failure fence", GL_TRUE);
- dri_fence_unreference(fo);
- goto done;
- }
- }
- ptr[r->offset / 4] = r->buf->offset + r->delta;
- dri_bo_unreference(r->buf);
- }
-
- dri_bo_unmap(batch->buf);
batch->map = NULL;
batch->ptr = NULL;
-
- dri_bo_validate(batch->buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE);
-
- batch->list_count = 0;
- batch->nr_relocs = 0;
batch->flags = 0;
/* Throw away non-effective packets. Won't work once we have
@@ -203,26 +141,18 @@ do_flush_locked(struct intel_batchbuffer *batch,
*/
if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
- intel_batch_ioctl(batch->intel,
- batch->buf->offset,
- used, ignore_cliprects, allow_unlock);
- }
-
- /* Associate a fence with the validated buffers, and note that we included
- * a flush at the end.
- */
- fo = dri_fence_validated(intel->intelScreen->bufmgr,
- "Batch fence", GL_TRUE);
-
- if (performed_rendering) {
- dri_fence_unreference(batch->last_fence);
- batch->last_fence = fo;
- } else {
- /* If we didn't validate any buffers for writing by the card, we don't
- * need to track the fence for glFinish().
- */
- dri_fence_unreference(fo);
+ if (intel->intelScreen->ttm == GL_TRUE) {
+ intel_exec_ioctl(batch->intel,
+ used, ignore_cliprects, allow_unlock,
+ start, count, &batch->last_fence);
+ } else {
+ intel_batch_ioctl(batch->intel,
+ batch->buf->offset,
+ used, ignore_cliprects, allow_unlock);
+ }
}
+
+ dri_post_submit(batch->buf, &batch->last_fence);
if (intel->numClipRects == 0 && !ignore_cliprects) {
if (allow_unlock) {
@@ -237,16 +167,14 @@ do_flush_locked(struct intel_batchbuffer *batch,
intel->vtbl.lost_hardware(intel);
}
-done:
if (INTEL_DEBUG & DEBUG_BATCH) {
- dri_bo_map(batch->buf, GL_FALSE);
- intel_decode(ptr, used / 4, batch->buf->offset,
- intel->intelScreen->deviceID);
- dri_bo_unmap(batch->buf);
+ // dri_bo_map(batch->buf, GL_FALSE);
+ // intel_decode(ptr, used / 4, batch->buf->offset,
+ // intel->intelScreen->deviceID);
+ // dri_bo_unmap(batch->buf);
}
}
-
void
intel_batchbuffer_flush(struct intel_batchbuffer *batch)
{
@@ -280,7 +208,7 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
GL_FALSE);
-
+
if (!was_locked)
UNLOCK_HARDWARE(intel);
@@ -305,22 +233,12 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
dri_bo *buffer,
GLuint flags, GLuint delta)
{
- struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++];
-
- assert(batch->nr_relocs <= MAX_RELOCS);
-
- dri_bo_reference(buffer);
- r->buf = buffer;
- r->offset = batch->ptr - batch->map;
- r->delta = delta;
- r->validate_flags = flags;
-
+ dri_emit_reloc(batch->buf, flags, delta, batch->ptr - batch->map, buffer);
batch->ptr += 4;
+
return GL_TRUE;
}
-
-
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, GLuint bytes, GLuint flags)
diff --git a/src/mesa/drivers/dri/i915/intel_batchbuffer.h b/src/mesa/drivers/dri/i915/intel_batchbuffer.h
index 850a91e1c91..b5c7a783a72 100644
--- a/src/mesa/drivers/dri/i915/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i915/intel_batchbuffer.h
@@ -2,6 +2,7 @@
#define INTEL_BATCHBUFFER_H
#include "mtypes.h"
+
#include "dri_bufmgr.h"
struct intel_context;
@@ -9,19 +10,9 @@ struct intel_context;
#define BATCH_SZ 16384
#define BATCH_RESERVED 16
-#define MAX_RELOCS 4096
-
#define INTEL_BATCH_NO_CLIPRECTS 0x1
#define INTEL_BATCH_CLIPRECTS 0x2
-struct buffer_reloc
-{
- dri_bo *buf;
- GLuint offset;
- GLuint delta; /* not needed? */
- GLuint validate_flags;
-};
-
struct intel_batchbuffer
{
struct intel_context *intel;
@@ -30,13 +21,9 @@ struct intel_batchbuffer
dri_fence *last_fence;
GLuint flags;
- drmBOList list;
- GLuint list_count;
GLubyte *map;
GLubyte *ptr;
- struct buffer_reloc reloc[MAX_RELOCS];
- GLuint nr_relocs;
GLuint size;
};
diff --git a/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c b/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c
new file mode 100644
index 00000000000..9ed6e3696f4
--- /dev/null
+++ b/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c
@@ -0,0 +1,853 @@
+/**************************************************************************
+ *
+ * Copyright � 2007 Red Hat Inc.
+ * Copyright � 2007 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ * Eric Anholt <[email protected]>
+ * Dave Airlie <[email protected]>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "glthread.h"
+#include "errno.h"
+#include "mtypes.h"
+#include "dri_bufmgr.h"
+#include "string.h"
+#include "imports.h"
+
+#include "i915_drm.h"
+
+#include "intel_bufmgr_ttm.h"
+
+#define BUFMGR_DEBUG 0
+
+struct intel_reloc_info
+{
+ GLuint type;
+ GLuint reloc;
+ GLuint delta; /* not needed? */
+ GLuint index;
+ drm_handle_t handle;
+};
+
+struct intel_bo_node
+{
+ drmMMListHead head;
+ drmBO *buf;
+ struct drm_i915_op_arg bo_arg;
+ unsigned long arg0;
+ unsigned long arg1;
+ void (*destroy)(void *);
+ void *priv;
+};
+
+struct intel_bo_reloc_list
+{
+ drmMMListHead head;
+ drmBO buf;
+ uint32_t *relocs;
+};
+
+struct intel_bo_reloc_node
+{
+ drmMMListHead head;
+ drm_handle_t handle;
+ uint32_t nr_reloc_types;
+ struct intel_bo_reloc_list type_list;
+};
+
+struct intel_bo_list {
+ unsigned numCurrent;
+ drmMMListHead list;
+ void (*destroy)(void *node);
+};
+
+typedef struct _dri_bufmgr_ttm {
+ dri_bufmgr bufmgr;
+
+ int fd;
+ _glthread_Mutex mutex;
+ unsigned int fence_type;
+ unsigned int fence_type_flush;
+
+ uint32_t max_relocs;
+ /** ttm relocation list */
+ struct intel_bo_list list;
+ struct intel_bo_list reloc_list;
+
+} dri_bufmgr_ttm;
+
+typedef struct _dri_bo_ttm {
+ dri_bo bo;
+
+ int refcount; /* Protected by bufmgr->mutex */
+ drmBO drm_bo;
+ const char *name;
+ /**
+ * Note whether we are the owner of the buffer, to determine if we must
+ * drmBODestroy or drmBOUnreference to unreference the buffer.
+ */
+ GLboolean owner;
+} dri_bo_ttm;
+
+typedef struct _dri_fence_ttm
+{
+ dri_fence fence;
+
+ int refcount; /* Protected by bufmgr->mutex */
+ const char *name;
+ drmFence drm_fence;
+} dri_fence_ttm;
+
+
+static void intel_bo_free_list(struct intel_bo_list *list)
+{
+ struct intel_bo_node *node;
+ drmMMListHead *l;
+
+ l = list->list.next;
+ while(l != &list->list) {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(struct intel_bo_node, l, head);
+ list->destroy(node);
+ l = list->list.next;
+ list->numCurrent--;
+ }
+}
+
+static void generic_destroy(void *nodep)
+{
+ free(nodep);
+}
+
+static int intel_create_bo_list(int numTarget, struct intel_bo_list *list, void (*destroy)(void *))
+{
+ DRMINITLISTHEAD(&list->list);
+ list->numCurrent = 0;
+ if (destroy)
+ list->destroy = destroy;
+ else
+ list->destroy = generic_destroy;
+ return 0;
+}
+
+
+static struct drm_i915_op_arg *
+intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_list *reloc_list, GLuint *count_p)
+{
+ struct intel_bo_node *node;
+ struct intel_bo_reloc_node *rl_node;
+ drmMMListHead *l, *rl;
+ struct drm_i915_op_arg *arg, *first;
+ struct drm_bo_op_req *req;
+ uint64_t *prevNext = NULL;
+ GLuint count = 0;
+
+ first = NULL;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(struct intel_bo_node, l, head);
+
+ arg = &node->bo_arg;
+ req = &arg->d.req;
+
+ if (!first)
+ first = arg;
+
+ if (prevNext)
+ *prevNext = (unsigned long) arg;
+
+ memset(arg, 0, sizeof(*arg));
+ prevNext = &arg->next;
+ req->bo_req.handle = node->buf->handle;
+ req->op = drm_bo_validate;
+ req->bo_req.flags = node->arg0;
+ req->bo_req.hint = 0;
+ req->bo_req.mask = node->arg1;
+ req->bo_req.fence_class = 0; /* Backwards compat. */
+ arg->reloc_handle = 0;
+
+ for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
+ rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
+
+ if (rl_node->handle == node->buf->handle) {
+ arg->reloc_handle = rl_node->type_list.buf.handle;
+ }
+ }
+ count++;
+ }
+
+ if (!first)
+ return 0;
+
+ *count_p = count;
+ return first;
+}
+
+static void intel_free_validate_list(int fd, struct intel_bo_list *list)
+{
+ struct intel_bo_node *node;
+ drmMMListHead *l;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(struct intel_bo_node, l, head);
+
+ if (node->destroy)
+ (*node->destroy)(node->priv);
+
+ }
+}
+
+static void intel_free_reloc_list(int fd, struct intel_bo_list *reloc_list)
+{
+ struct intel_bo_reloc_node *reloc_node;
+ drmMMListHead *rl, *tmp;
+
+ for (rl = reloc_list->list.next, tmp = rl->next; rl != &reloc_list->list; rl = tmp, tmp = rl->next) {
+ reloc_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
+
+ DRMLISTDEL(rl);
+
+ if (reloc_node->nr_reloc_types > 1) {
+
+ /* TODO */
+ }
+
+ drmBOUnmap(fd, &reloc_node->type_list.buf);
+ drmBODestroy(fd, &reloc_node->type_list.buf);
+ free(reloc_node);
+ }
+}
+
+static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, unsigned flags,
+ unsigned mask, int *itemLoc, void (*destroy_cb)(void *))
+{
+ struct intel_bo_node *node, *cur;
+ drmMMListHead *l;
+ int count = 0;
+ int ret = 0;
+ drmBO *buf_bo = &((dri_bo_ttm *)buf)->drm_bo;
+ cur = NULL;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(struct intel_bo_node, l, head);
+ if (node->buf->handle == buf_bo->handle) {
+ cur = node;
+ break;
+ }
+ count++;
+ }
+
+ if (!cur) {
+ cur = drmMalloc(sizeof(*cur));
+ if (!cur) {
+ return -ENOMEM;
+ }
+ cur->buf = buf_bo;
+ cur->priv = buf;
+ cur->arg0 = flags;
+ cur->arg1 = mask;
+ cur->destroy = destroy_cb;
+ ret = 1;
+
+ DRMLISTADDTAIL(&cur->head, &list->list);
+
+ } else {
+ unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
+ unsigned memFlags = cur->arg0 & flags & memMask;
+
+ if (!memFlags) {
+ return -EINVAL;
+ }
+ if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
+ return -EINVAL;
+ }
+ cur->arg1 |= mask;
+ cur->arg0 = memFlags | ((cur->arg0 | flags) &
+ cur->arg1 & ~DRM_BO_MASK_MEM);
+ }
+ *itemLoc = count;
+ return ret;
+}
+
+
+#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * sizeof(uint32_t))
+
+static int intel_create_new_reloc_type_list(int fd, struct intel_bo_reloc_list *cur_type, int max_relocs)
+{
+ int ret;
+
+ /* should allocate a drmBO here */
+ ret = drmBOCreate(fd, 0, RELOC_BUF_SIZE(max_relocs), 0,
+ NULL, drm_bo_type_dc,
+ DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_CACHED,
+ 0, &cur_type->buf);
+ if (ret)
+ return ret;
+
+ ret = drmBOMap(fd, &cur_type->buf, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0, (void **)&cur_type->relocs);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+
+static int intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list, struct intel_reloc_info *reloc_info, uint32_t max_relocs)
+{
+ struct intel_bo_reloc_node *rl_node, *cur;
+ drmMMListHead *rl, *l;
+ int ret = 0;
+ uint32_t *reloc_start;
+ int num_relocs;
+ struct intel_bo_reloc_list *cur_type;
+
+ cur = NULL;
+
+ for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
+ rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
+ if (rl_node->handle == reloc_info->handle) {
+ cur = rl_node;
+ break;
+ }
+ }
+
+ if (!cur) {
+
+ cur = malloc(sizeof(*cur));
+ if (!cur)
+ return -ENOMEM;
+
+ cur->nr_reloc_types = 1;
+ cur->handle = reloc_info->handle;
+ cur_type = &cur->type_list;
+
+ DRMINITLISTHEAD(&cur->type_list.head);
+ ret = intel_create_new_reloc_type_list(fd, cur_type, max_relocs);
+ if (ret) {
+ return -1;
+ }
+ DRMLISTADDTAIL(&cur->head, &reloc_list->list);
+
+ cur_type->relocs[0] = 0 | (reloc_info->type << 16);
+ cur_type->relocs[1] = 0; // next reloc buffer handle is 0
+
+ } else {
+ int found = 0;
+ if ((cur->type_list.relocs[0] >> 16) == reloc_info->type) {
+ cur_type = &cur->type_list;
+ found = 1;
+ } else {
+ for (l = cur->type_list.head.next; l != &cur->type_list.head; l = l->next) {
+ cur_type = DRMLISTENTRY(struct intel_bo_reloc_list, l, head);
+ if (((cur_type->relocs[0] >> 16) & 0xffff) == reloc_info->type)
+ found = 1;
+ break;
+ }
+ }
+
+ /* didn't find the relocation type */
+ if (!found) {
+ cur_type = malloc(sizeof(*cur_type));
+ if (!cur_type) {
+ return -ENOMEM;
+ }
+
+ ret = intel_create_new_reloc_type_list(fd, cur_type, max_relocs);
+ DRMLISTADDTAIL(&cur_type->head, &cur->type_list.head);
+
+ cur_type->relocs[0] = (reloc_info->type << 16);
+ cur_type->relocs[1] = 0;
+
+ cur->nr_reloc_types++;
+ }
+ }
+
+ reloc_start = cur_type->relocs;
+
+ num_relocs = (reloc_start[0] & 0xffff);
+
+ reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER] = reloc_info->reloc;
+ reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+1] = reloc_info->delta;
+ reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+2] = reloc_info->index;
+ reloc_start[0]++;
+ if (((reloc_start[0] & 0xffff)) > (max_relocs)) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+
+#if 0
+int
+driFenceSignaled(DriFenceObject * fence, unsigned type)
+{
+ int signaled;
+ int ret;
+
+ if (fence == NULL)
+ return GL_TRUE;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ BM_CKFATAL(ret);
+ return signaled;
+}
+#endif
+
+static dri_bo *
+dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment,
+ unsigned int location_mask)
+{
+ dri_bufmgr_ttm *ttm_bufmgr;
+ dri_bo_ttm *ttm_buf;
+ unsigned int pageSize = getpagesize();
+ int ret;
+ unsigned int flags, hint;
+
+ ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
+
+ ttm_buf = malloc(sizeof(*ttm_buf));
+ if (!ttm_buf)
+ return NULL;
+
+ /* The mask argument doesn't do anything for us that we want other than
+ * determine which pool (TTM or local) the buffer is allocated into, so just
+ * pass all of the allocation class flags.
+ */
+ flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_EXE;
+ /* No hints we want to use. */
+ hint = 0;
+
+ ret = drmBOCreate(ttm_bufmgr->fd, 0, size, alignment / pageSize,
+ NULL, drm_bo_type_dc,
+ flags, hint, &ttm_buf->drm_bo);
+ if (ret != 0) {
+ free(ttm_buf);
+ return NULL;
+ }
+ ttm_buf->bo.size = ttm_buf->drm_bo.size;
+ ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
+ ttm_buf->bo.virtual = NULL;
+ ttm_buf->bo.bufmgr = bufmgr;
+ ttm_buf->name = name;
+ ttm_buf->refcount = 1;
+ ttm_buf->owner = GL_TRUE;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_create: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
+#endif
+
+ return &ttm_buf->bo;
+}
+
+/* Our TTM backend doesn't allow creation of static buffers, as that requires
+ * privelege for the non-fake case, and the lock in the fake case where we were
+ * working around the X Server not creating buffers and passing handles to us.
+ */
+static dri_bo *
+dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name,
+ unsigned long offset, unsigned long size, void *virtual,
+ unsigned int location_mask)
+{
+ return NULL;
+}
+
+/** Returns a dri_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+dri_bo *
+intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
+ unsigned int handle)
+{
+ dri_bufmgr_ttm *ttm_bufmgr;
+ dri_bo_ttm *ttm_buf;
+ int ret;
+
+ ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
+
+ ttm_buf = malloc(sizeof(*ttm_buf));
+ if (!ttm_buf)
+ return NULL;
+
+ ret = drmBOReference(ttm_bufmgr->fd, handle, &ttm_buf->drm_bo);
+ if (ret != 0) {
+ free(ttm_buf);
+ return NULL;
+ }
+ ttm_buf->bo.size = ttm_buf->drm_bo.size;
+ ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
+ ttm_buf->bo.virtual = NULL;
+ ttm_buf->bo.bufmgr = bufmgr;
+ ttm_buf->name = name;
+ ttm_buf->refcount = 1;
+ ttm_buf->owner = GL_FALSE;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_create_from_handle: %p %08x (%s)\n", &ttm_buf->bo, handle,
+ ttm_buf->name);
+#endif
+
+ return &ttm_buf->bo;
+}
+
+static void
+dri_ttm_bo_reference(dri_bo *buf)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ ttm_buf->refcount++;
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+}
+
+static void
+dri_ttm_bo_unreference(dri_bo *buf)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+
+ if (!buf)
+ return;
+
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ if (--ttm_buf->refcount == 0) {
+ int ret;
+
+ /* XXX Having to use drmBODestroy as the opposite of drmBOCreate instead
+ * of simply unreferencing is madness, and leads to behaviors we may not
+ * want (making the buffer unsharable).
+ */
+ if (ttm_buf->owner)
+ ret = drmBODestroy(bufmgr_ttm->fd, &ttm_buf->drm_bo);
+ else
+ ret = drmBOUnReference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
+ if (ret != 0) {
+ fprintf(stderr, "drmBOUnReference failed (%s): %s\n", ttm_buf->name,
+ strerror(-ret));
+ }
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_unreference final: %p (%s)\n",
+ &ttm_buf->bo, ttm_buf->name);
+#endif
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ free(buf);
+ return;
+ }
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+}
+
+static int
+dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable)
+{
+ dri_bufmgr_ttm *bufmgr_ttm;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+ unsigned int flags;
+
+ bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+
+ flags = DRM_BO_FLAG_READ;
+ if (write_enable)
+ flags |= DRM_BO_FLAG_WRITE;
+
+ assert(buf->virtual == NULL);
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
+#endif
+
+ return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
+}
+
+static int
+dri_ttm_bo_unmap(dri_bo *buf)
+{
+ dri_bufmgr_ttm *bufmgr_ttm;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+
+ if (buf == NULL)
+ return 0;
+
+ bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+
+ assert(buf->virtual != NULL);
+
+ buf->virtual = NULL;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
+#endif
+
+ return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
+}
+
+/* Returns a dri_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+dri_fence *
+intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
+ drm_fence_arg_t *arg)
+{
+ dri_bufmgr_ttm *ttm_bufmgr;
+ dri_fence_ttm *ttm_fence;
+
+ ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
+
+ ttm_fence = malloc(sizeof(*ttm_fence));
+ if (!ttm_fence)
+ return NULL;
+
+ ttm_fence->drm_fence.handle = arg->handle;
+ ttm_fence->drm_fence.fence_class = arg->fence_class;
+ ttm_fence->drm_fence.type = arg->type;
+ ttm_fence->drm_fence.flags = arg->flags;
+ ttm_fence->drm_fence.signaled = 0;
+ ttm_fence->drm_fence.sequence = arg->sequence;
+
+ ttm_fence->fence.bufmgr = bufmgr;
+ ttm_fence->name = name;
+ ttm_fence->refcount = 1;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "fence_create_from_handle: %p (%s)\n", &ttm_fence->fence,
+ ttm_fence->name);
+#endif
+
+ return &ttm_fence->fence;
+}
+
+
+static void
+dri_ttm_fence_reference(dri_fence *fence)
+{
+ dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
+
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ ++fence_ttm->refcount;
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+#if BUFMGR_DEBUG
+ fprintf(stderr, "fence_reference: %p (%s)\n", &fence_ttm->fence,
+ fence_ttm->name);
+#endif
+}
+
+static void
+dri_ttm_fence_unreference(dri_fence *fence)
+{
+ dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
+
+ if (!fence)
+ return;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "fence_unreference: %p (%s)\n", &fence_ttm->fence,
+ fence_ttm->name);
+#endif
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ if (--fence_ttm->refcount == 0) {
+ int ret;
+
+ /* XXX Having to use drmFenceDestroy as the opposite of drmFenceBuffers
+ * instead of simply unreferencing is madness, and leads to behaviors we
+ * may not want (making the fence unsharable). This behavior by the DRM
+ * ioctls should be fixed, and drmFenceDestroy eliminated.
+ */
+ ret = drmFenceDestroy(bufmgr_ttm->fd, &fence_ttm->drm_fence);
+ if (ret != 0) {
+ fprintf(stderr, "drmFenceDestroy failed (%s): %s\n",
+ fence_ttm->name, strerror(-ret));
+ }
+
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ free(fence);
+ return;
+ }
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+}
+
+static void
+dri_ttm_fence_wait(dri_fence *fence)
+{
+ dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
+ int ret;
+
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ ret = drmFenceWait(bufmgr_ttm->fd, 0, &fence_ttm->drm_fence, 0);
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ if (ret != 0) {
+ _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
+ __FILE__, __LINE__, ret, fence_ttm->name);
+ abort();
+ }
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "fence_wait: %p (%s)\n", &fence_ttm->fence,
+ fence_ttm->name);
+#endif
+}
+
+static void
+dri_bufmgr_ttm_destroy(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
+
+ intel_bo_free_list(&bufmgr_ttm->list);
+ intel_bo_free_list(&bufmgr_ttm->reloc_list);
+
+ _glthread_DESTROY_MUTEX(bufmgr_ttm->mutex);
+ free(bufmgr);
+}
+
+
+static void intel_dribo_destroy_callback(void *priv)
+{
+ dri_bo *dribo = priv;
+
+ if (dribo) {
+ dri_bo_unreference(dribo);
+ }
+}
+
+static void
+dri_ttm_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset,
+ dri_bo *relocatee)
+{
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)batch_buf;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
+ int newItem;
+ struct intel_reloc_info reloc;
+ int mask;
+ int ret;
+
+ mask = DRM_BO_MASK_MEM;
+ mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
+
+ ret = intel_add_validate_buffer(&bufmgr_ttm->list, relocatee, flags, mask, &newItem, intel_dribo_destroy_callback);
+ if (ret < 0)
+ return;
+
+ if (ret == 1) {
+ dri_bo_reference(relocatee);
+ }
+
+ reloc.type = I915_RELOC_TYPE_0;
+ reloc.reloc = offset;
+ reloc.delta = delta;
+ reloc.index = newItem;
+ reloc.handle = ttm_buf->drm_bo.handle;
+
+ intel_add_validate_reloc(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list, &reloc, bufmgr_ttm->max_relocs);
+ return;
+}
+
+
+static void *
+dri_ttm_process_reloc(dri_bo *batch_buf, GLuint *count)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
+ void *ptr;
+ int itemLoc;
+
+ dri_bo_unmap(batch_buf);
+
+ intel_add_validate_buffer(&bufmgr_ttm->list, batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE, &itemLoc, NULL);
+
+ ptr = intel_setup_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list, &bufmgr_ttm->reloc_list, count);
+
+ return ptr;
+}
+
+static void
+dri_ttm_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
+
+ intel_free_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list);
+ intel_free_reloc_list(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list);
+
+ intel_bo_free_list(&bufmgr_ttm->list);
+}
+
+/**
+ * Initializes the TTM buffer manager, which uses the kernel to allocate, map,
+ * and manage map buffer objections.
+ *
+ * \param fd File descriptor of the opened DRM device.
+ * \param fence_type Driver-specific fence type used for fences with no flush.
+ * \param fence_type_flush Driver-specific fence type used for fences with a
+ * flush.
+ */
+dri_bufmgr *
+intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
+ unsigned int fence_type_flush, int batch_size)
+{
+ dri_bufmgr_ttm *bufmgr_ttm;
+
+ bufmgr_ttm = malloc(sizeof(*bufmgr_ttm));
+ bufmgr_ttm->fd = fd;
+ bufmgr_ttm->fence_type = fence_type;
+ bufmgr_ttm->fence_type_flush = fence_type_flush;
+ _glthread_INIT_MUTEX(bufmgr_ttm->mutex);
+
+ /* lets go with one relocation per every four dwords - purely heuristic */
+ bufmgr_ttm->max_relocs = batch_size / sizeof(uint32_t) / 4;
+
+ intel_create_bo_list(10, &bufmgr_ttm->list, NULL);
+ intel_create_bo_list(1, &bufmgr_ttm->reloc_list, NULL);
+
+ bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
+ bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
+ bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
+ bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
+ bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
+ bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
+ bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
+ bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
+ bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
+ bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
+ bufmgr_ttm->bufmgr.emit_reloc = dri_ttm_emit_reloc;
+ bufmgr_ttm->bufmgr.process_relocs = dri_ttm_process_reloc;
+ bufmgr_ttm->bufmgr.post_submit = dri_ttm_post_submit;
+ return &bufmgr_ttm->bufmgr;
+}
+
diff --git a/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.h b/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.h
new file mode 100644
index 00000000000..0738839cefb
--- /dev/null
+++ b/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.h
@@ -0,0 +1,17 @@
+
+#ifndef INTEL_BUFMGR_TTM_H
+#define INTEL_BUFMGR_TTM_H
+
+#include "dri_bufmgr.h"
+
+extern dri_bo *intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
+ unsigned int handle);
+
+dri_fence *intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
+ drm_fence_arg_t *arg);
+
+
+dri_bufmgr *intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
+ unsigned int fence_type_flush, int batch_size);
+
+#endif
diff --git a/src/mesa/drivers/dri/i915/intel_ioctl.c b/src/mesa/drivers/dri/i915/intel_ioctl.c
index 6e76737c783..94f7e73ecfb 100644
--- a/src/mesa/drivers/dri/i915/intel_ioctl.c
+++ b/src/mesa/drivers/dri/i915/intel_ioctl.c
@@ -42,6 +42,8 @@
#include "intel_regions.h"
#include "drm.h"
+#include "intel_bufmgr_ttm.h"
+
#define FILE_DEBUG_FLAG DEBUG_IOCTL
int
@@ -105,9 +107,6 @@ intel_batch_ioctl(struct intel_context *intel,
* hardware contexts which would preserve statechanges beyond a
* single buffer.
*/
-
-
-
batch.start = start_offset;
batch.used = used;
batch.cliprects = intel->pClipRects;
@@ -133,3 +132,56 @@ intel_batch_ioctl(struct intel_context *intel,
*/
intel->vtbl.lost_hardware(intel);
}
+
+void
+intel_exec_ioctl(struct intel_context *intel,
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock,
+ void *start, GLuint count, dri_fence **fence)
+{
+ struct drm_i915_execbuffer execbuf;
+ dri_fence *fo;
+
+ assert(intel->locked);
+ assert(used);
+
+ if (*fence) {
+ dri_fence_unreference(*fence);
+ }
+
+ memset(&execbuf, 0, sizeof(execbuf));
+
+ execbuf.num_buffers = count;
+ execbuf.batch.used = used;
+ execbuf.batch.cliprects = intel->pClipRects;
+ execbuf.batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
+ execbuf.batch.DR1 = 0;
+ execbuf.batch.DR4 = ((((GLuint) intel->drawX) & 0xffff) |
+ (((GLuint) intel->drawY) << 16));
+
+ execbuf.ops_list = (unsigned)start; // TODO
+ execbuf.fence_arg.flags = DRM_FENCE_FLAG_SHAREABLE | DRM_I915_FENCE_FLAG_FLUSHED;
+
+ if (drmCommandWriteRead(intel->driFd, DRM_I915_EXECBUFFER, &execbuf,
+ sizeof(execbuf))) {
+ fprintf(stderr, "DRM_I830_EXECBUFFER: %d\n", -errno);
+ UNLOCK_HARDWARE(intel);
+ exit(1);
+ }
+
+
+ fo = intel_ttm_fence_create_from_arg(intel->intelScreen->bufmgr, "fence buffers",
+ &execbuf.fence_arg);
+ if (!fo) {
+ fprintf(stderr, "failed to fence handle: %08x\n", execbuf.fence_arg.handle);
+ UNLOCK_HARDWARE(intel);
+ exit(1);
+ }
+ *fence = fo;
+
+ /* FIXME: use hardware contexts to avoid 'losing' hardware after
+ * each buffer flush.
+ */
+ intel->vtbl.lost_hardware(intel);
+
+}
diff --git a/src/mesa/drivers/dri/i915/intel_ioctl.h b/src/mesa/drivers/dri/i915/intel_ioctl.h
index 7a5b175ed1c..953fee92406 100644
--- a/src/mesa/drivers/dri/i915/intel_ioctl.h
+++ b/src/mesa/drivers/dri/i915/intel_ioctl.h
@@ -37,4 +37,8 @@ void intel_batch_ioctl(struct intel_context *intel,
GLuint start_offset,
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock);
+void intel_exec_ioctl(struct intel_context *intel,
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock,
+ void *start, GLuint count, dri_fence **fence);
#endif
diff --git a/src/mesa/drivers/dri/i915/intel_regions.c b/src/mesa/drivers/dri/i915/intel_regions.c
index 4eac859a133..187ccf17764 100644
--- a/src/mesa/drivers/dri/i915/intel_regions.c
+++ b/src/mesa/drivers/dri/i915/intel_regions.c
@@ -162,7 +162,7 @@ intel_region_create_static(intelScreenPrivate *intelScreen,
if (intelScreen->ttm) {
assert(bo_handle != -1);
- region->buffer = dri_ttm_bo_create_from_handle(intelScreen->bufmgr,
+ region->buffer = intel_ttm_bo_create_from_handle(intelScreen->bufmgr,
"static region",
bo_handle);
} else {
@@ -201,7 +201,7 @@ intel_region_update_static(intelScreenPrivate *intelScreen,
dri_bo_unreference(region->buffer);
if (intelScreen->ttm) {
assert(bo_handle != -1);
- region->buffer = dri_ttm_bo_create_from_handle(intelScreen->bufmgr,
+ region->buffer = intel_ttm_bo_create_from_handle(intelScreen->bufmgr,
"static region",
bo_handle);
} else {
diff --git a/src/mesa/drivers/dri/i915/intel_screen.c b/src/mesa/drivers/dri/i915/intel_screen.c
index df616f43b93..291441b3608 100644
--- a/src/mesa/drivers/dri/i915/intel_screen.c
+++ b/src/mesa/drivers/dri/i915/intel_screen.c
@@ -50,6 +50,8 @@
#include "intel_regions.h"
#include "intel_batchbuffer.h"
+#include "intel_bufmgr_ttm.h"
+
PUBLIC const char __driConfigOptions[] =
DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE
DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
@@ -534,11 +536,13 @@ static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
intelScreen->ttm = GL_FALSE;
if (getenv("INTEL_NO_TTM") == NULL &&
intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
+ intelScreen->drmMinor >= 11 &&
intelScreen->front.bo_handle != -1) {
- intelScreen->bufmgr = dri_bufmgr_ttm_init(sPriv->fd,
- DRM_FENCE_TYPE_EXE,
- DRM_FENCE_TYPE_EXE |
- DRM_I915_FENCE_TYPE_RW);
+ intelScreen->bufmgr = intel_bufmgr_ttm_init(sPriv->fd,
+ DRM_FENCE_TYPE_EXE,
+ DRM_FENCE_TYPE_EXE |
+ DRM_I915_FENCE_TYPE_RW,
+ BATCH_SZ);
if (intelScreen->bufmgr != NULL)
intelScreen->ttm = GL_TRUE;
}
diff --git a/src/mesa/drivers/dri/i965/intel_context.c b/src/mesa/drivers/dri/i965/intel_context.c
index 2cf311c7135..7ec316aa8ae 100644
--- a/src/mesa/drivers/dri/i965/intel_context.c
+++ b/src/mesa/drivers/dri/i965/intel_context.c
@@ -498,7 +498,7 @@ GLboolean intelInitContext( struct intel_context *intel,
_mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
_mesa_enable_extension( ctx, "GL_S3_s3tc" );
}
- else if (driQueryOptionb (&intelScreen->optionCache, "force_s3tc_enable")) {
+ else if (driQueryOptionb (&intel->optionCache, "force_s3tc_enable")) {
_mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_layout.c b/src/mesa/drivers/dri/intel/intel_tex_layout.c
index e3c6e1c17cf..4da636021ba 100644
--- a/src/mesa/drivers/dri/intel/intel_tex_layout.c
+++ b/src/mesa/drivers/dri/intel/intel_tex_layout.c
@@ -32,6 +32,7 @@
#include "intel_mipmap_tree.h"
#include "intel_tex_layout.h"
+#include "intel_context.h"
#include "macros.h"
GLuint intel_compressed_alignment(GLenum internalFormat)
diff --git a/src/mesa/drivers/dri/nouveau/nouveau_context.c b/src/mesa/drivers/dri/nouveau/nouveau_context.c
index f36483a3d49..a8569a9f153 100644
--- a/src/mesa/drivers/dri/nouveau/nouveau_context.c
+++ b/src/mesa/drivers/dri/nouveau/nouveau_context.c
@@ -378,3 +378,39 @@ void nouveauCopySubBuffer(__DRIdrawablePrivate *dPriv,
{
}
+void nouveauClearBuffer(GLcontext *ctx, nouveau_renderbuffer_t *buffer,
+ int fill, int mask)
+{
+ nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx);
+ int dimensions;
+
+ if (!buffer) {
+ return;
+ }
+
+ /* FIXME: only support 32 bits atm */
+
+ /* Surface that we will work on */
+ nouveauObjectOnSubchannel(nmesa, NvSubCtxSurf2D, NvCtxSurf2D);
+
+ BEGIN_RING_SIZE(NvSubCtxSurf2D, NV10_CONTEXT_SURFACES_2D_FORMAT, 4);
+ OUT_RING(0x0b); /* Y32 color format */
+ OUT_RING((buffer->pitch<<16)|buffer->pitch);
+ OUT_RING(buffer->offset);
+ OUT_RING(buffer->offset);
+
+ /* Now clear a rectangle */
+ dimensions = ((buffer->mesa.Height)<<16) | (buffer->mesa.Width);
+
+ nouveauObjectOnSubchannel(nmesa, NvSubGdiRectText, NvGdiRectText);
+
+ BEGIN_RING_SIZE(NvSubGdiRectText, NV04_GDI_RECTANGLE_TEXT_OPERATION, 1);
+ OUT_RING(3); /* SRCCOPY */
+
+ BEGIN_RING_SIZE(NvSubGdiRectText, NV04_GDI_RECTANGLE_TEXT_BLOCK_LEVEL1_TL, 5);
+ OUT_RING(0); /* top left */
+ OUT_RING(dimensions); /* bottom right */
+ OUT_RING(fill);
+ OUT_RING(0); /* top left */
+ OUT_RING(dimensions); /* bottom right */
+}
diff --git a/src/mesa/drivers/dri/nouveau/nouveau_context.h b/src/mesa/drivers/dri/nouveau/nouveau_context.h
index 77fe13a9cdc..9aff0ee668b 100644
--- a/src/mesa/drivers/dri/nouveau/nouveau_context.h
+++ b/src/mesa/drivers/dri/nouveau/nouveau_context.h
@@ -133,9 +133,6 @@ typedef struct nouveau_context {
nouveau_renderbuffer_t *color_buffer;
nouveau_renderbuffer_t *depth_buffer;
- /* Color buffer clear value */
- uint32_t clear_color_value;
-
/* Depth/stencil clear value */
uint32_t clear_value;
@@ -234,6 +231,9 @@ extern void nouveauSwapBuffers(__DRIdrawablePrivate *dPriv);
extern void nouveauCopySubBuffer(__DRIdrawablePrivate *dPriv,
int x, int y, int w, int h);
+extern void nouveauClearBuffer(GLcontext *ctx, nouveau_renderbuffer_t *buffer,
+ int fill, int mask);
+
/* Debugging utils: */
extern int NOUVEAU_DEBUG;
diff --git a/src/mesa/drivers/dri/nouveau/nouveau_driver.c b/src/mesa/drivers/dri/nouveau/nouveau_driver.c
index 4851c668356..8b76779002b 100644
--- a/src/mesa/drivers/dri/nouveau/nouveau_driver.c
+++ b/src/mesa/drivers/dri/nouveau/nouveau_driver.c
@@ -35,6 +35,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#include "framebuffer.h"
#include "utils.h"
+#include "colormac.h"
/* Wrapper for DRM_NOUVEAU_GETPARAM ioctl */
GLboolean nouveauDRMGetParam(nouveauContextPtr nmesa,
@@ -135,7 +136,74 @@ static void nouveauFinish( GLcontext *ctx )
/* glClear */
static void nouveauClear( GLcontext *ctx, GLbitfield mask )
{
- // XXX we really should do something here...
+ uint32_t clear_value;
+ nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx);
+
+ /* FIXME: should we clear front buffer, even if asked to do it? */
+ if (mask & (BUFFER_BIT_FRONT_LEFT|BUFFER_BIT_BACK_LEFT)) {
+ GLubyte c[4];
+ int color_bits = 32;
+ int color_mask = 0xffffffff;
+
+ UNCLAMPED_FLOAT_TO_RGBA_CHAN(c,ctx->Color.ClearColor);
+ clear_value = PACK_COLOR_8888(c[3],c[0],c[1],c[2]);
+
+ if (ctx->DrawBuffer) {
+ /* FIXME: find correct color buffer, instead of [0][0] */
+ if (ctx->DrawBuffer->_ColorDrawBuffers[0][0]) {
+ color_bits = ctx->DrawBuffer->_ColorDrawBuffers[0][0]->RedBits;
+ color_bits += ctx->DrawBuffer->_ColorDrawBuffers[0][0]->GreenBits;
+ color_bits += ctx->DrawBuffer->_ColorDrawBuffers[0][0]->BlueBits;
+ color_bits += ctx->DrawBuffer->_ColorDrawBuffers[0][0]->AlphaBits;
+ }
+ }
+
+ if (color_bits<24) {
+ clear_value = PACK_COLOR_565(c[0],c[1],c[2]);
+ color_mask = 0xffff;
+ }
+
+ nouveauClearBuffer(ctx, nmesa->color_buffer,
+ clear_value, color_mask);
+ }
+
+ if (mask & (BUFFER_BIT_DEPTH)) {
+ int depth_bits = 24;
+ int depth_mask;
+ if (ctx->DrawBuffer) {
+ if (ctx->DrawBuffer->_DepthBuffer) {
+ depth_bits = ctx->DrawBuffer->_DepthBuffer->DepthBits;
+ }
+ }
+
+ switch(depth_bits) {
+ case 16:
+ clear_value = (uint32_t) (ctx->Depth.Clear * 32767.0);
+ depth_mask = 0xffff;
+ break;
+ default:
+ clear_value = ((uint32_t) (ctx->Depth.Clear * 16777215.0)) << 8;
+ depth_mask = 0xffffff00;
+ break;
+ }
+
+ nouveauClearBuffer(ctx, nmesa->depth_buffer,
+ clear_value, depth_mask);
+ }
+
+ if (mask & (BUFFER_BIT_STENCIL)) {
+ int stencil_bits = 0;
+ if (ctx->DrawBuffer) {
+ if (ctx->DrawBuffer->_StencilBuffer) {
+ stencil_bits = ctx->DrawBuffer->_StencilBuffer->StencilBits;
+ }
+ }
+
+ if (stencil_bits>0) {
+ nouveauClearBuffer(ctx, nmesa->depth_buffer,
+ ctx->Stencil.Clear, (1<<stencil_bits)-1);
+ }
+ }
}
void nouveauDriverInitFunctions( struct dd_function_table *functions )
diff --git a/src/mesa/drivers/dri/nouveau/nv10_state.c b/src/mesa/drivers/dri/nouveau/nv10_state.c
index 8cbe72020fe..3e5bfe093f9 100644
--- a/src/mesa/drivers/dri/nouveau/nv10_state.c
+++ b/src/mesa/drivers/dri/nouveau/nv10_state.c
@@ -110,91 +110,19 @@ static void nv10BlendFuncSeparate(GLcontext *ctx, GLenum sfactorRGB, GLenum dfac
OUT_RING_CACHE(dfactorRGB);
}
-static void nv10ClearBuffer(GLcontext *ctx, nouveau_renderbuffer_t *buffer, int fill, int mask)
-{
- nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx);
- int dimensions;
-
- if (!buffer) {
- return;
- }
-
- /* Surface that we will work on */
- nouveauObjectOnSubchannel(nmesa, NvSubCtxSurf2D, NvCtxSurf2D);
-
- BEGIN_RING_SIZE(NvSubCtxSurf2D, NV10_CONTEXT_SURFACES_2D_FORMAT, 4);
- OUT_RING(0x0b); /* Y32 color format */
- OUT_RING((buffer->pitch<<16)|buffer->pitch);
- OUT_RING(buffer->offset);
- OUT_RING(buffer->offset);
-
- /* Now clear a rectangle */
- dimensions = ((buffer->mesa.Height)<<16) | (buffer->mesa.Width);
-
- nouveauObjectOnSubchannel(nmesa, NvSubGdiRectText, NvGdiRectText);
-
- BEGIN_RING_SIZE(NvSubGdiRectText, NV04_GDI_RECTANGLE_TEXT_OPERATION, 1);
- OUT_RING(3); /* SRCCOPY */
-
- BEGIN_RING_SIZE(NvSubGdiRectText, NV04_GDI_RECTANGLE_TEXT_BLOCK_LEVEL1_TL, 5);
- OUT_RING(0); /* top left */
- OUT_RING(dimensions); /* bottom right */
- OUT_RING(fill);
- OUT_RING(0); /* top left */
- OUT_RING(dimensions); /* bottom right */
-}
-
-static void nv10Clear(GLcontext *ctx, GLbitfield mask)
-{
- nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx);
-
- if (mask & (BUFFER_BIT_FRONT_LEFT|BUFFER_BIT_BACK_LEFT)) {
- nv10ClearBuffer(ctx, nmesa->color_buffer,
- nmesa->clear_color_value, 0xffffffff);
- }
- /* FIXME: check depth bits */
- if (mask & (BUFFER_BIT_DEPTH)) {
- nv10ClearBuffer(ctx, nmesa->depth_buffer,
- nmesa->clear_value, 0xffffff00);
- }
- /* FIXME: check about stencil? */
- if (mask & (BUFFER_BIT_STENCIL)) {
- nv10ClearBuffer(ctx, nmesa->depth_buffer,
- nmesa->clear_value, 0x000000ff);
- }
-}
-
static void nv10ClearColor(GLcontext *ctx, const GLfloat color[4])
{
- nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx);
- GLubyte c[4];
- UNCLAMPED_FLOAT_TO_RGBA_CHAN(c,color);
- nmesa->clear_color_value = PACK_COLOR_8888(c[3],c[0],c[1],c[2]);
+ /* Not for NV10 */
}
static void nv10ClearDepth(GLcontext *ctx, GLclampd d)
{
- nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx);
-
-/* switch (ctx->DrawBuffer->_DepthBuffer->DepthBits) {
- case 16:
- nmesa->clear_value = (uint32_t)(d*0x7FFF);
- break;
- case 24:*/
- nmesa->clear_value = ((nmesa->clear_value&0x000000FF) |
- (((uint32_t)(d*0xFFFFFF))<<8));
-/* break;
- }*/
+ /* Not for NV10 */
}
static void nv10ClearStencil(GLcontext *ctx, GLint s)
{
- nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx);
-
-/* if (ctx->DrawBuffer->_DepthBuffer->DepthBits == 24) {*/
- nmesa->clear_value = ((nmesa->clear_value&0xFFFFFF00)|
- (s&0x000000FF));
-/* }*/
+ /* Not for NV10 */
}
static void nv10ClipPlane(GLcontext *ctx, GLenum plane, const GLfloat *equation)
@@ -1037,10 +965,10 @@ void nv10InitStateFuncs(GLcontext *ctx, struct dd_function_table *func)
func->BlendColor = nv10BlendColor;
func->BlendEquationSeparate = nv10BlendEquationSeparate;
func->BlendFuncSeparate = nv10BlendFuncSeparate;
- func->Clear = nv10Clear;
- func->ClearColor = nv10ClearColor;
- func->ClearDepth = nv10ClearDepth;
- func->ClearStencil = nv10ClearStencil;
+/* func->Clear = nv10Clear;*/ /* Not for NV10 */
+ func->ClearColor = nv10ClearColor; /* Not for NV10 */
+ func->ClearDepth = nv10ClearDepth; /* Not for NV10 */
+ func->ClearStencil = nv10ClearStencil; /* Not for NV10 */
func->ClipPlane = nv10ClipPlane;
func->ColorMask = nv10ColorMask;
func->ColorMaterial = nv10ColorMaterial;
diff --git a/src/mesa/drivers/dri/r300/r300_fragprog.c b/src/mesa/drivers/dri/r300/r300_fragprog.c
index cce8e685865..78ed44b09c7 100644
--- a/src/mesa/drivers/dri/r300/r300_fragprog.c
+++ b/src/mesa/drivers/dri/r300/r300_fragprog.c
@@ -951,6 +951,10 @@ static void emit_tex(struct r300_fragment_program *fp,
if (REG_GET_TYPE(dest) == REG_TYPE_OUTPUT) {
rdest = dest;
dest = get_temp_reg_tex(fp);
+ } else if (fpi->DstReg.WriteMask != WRITEMASK_XYZW) {
+ /* in case write mask isn't XYZW */
+ rdest = dest;
+ dest = get_temp_reg_tex(fp);
}
hwdest =
t_hw_dst(fp, dest, GL_TRUE,
@@ -1016,7 +1020,7 @@ static void emit_tex(struct r300_fragment_program *fp,
/* Copy from temp to output if needed */
if (REG_GET_VALID(rdest)) {
- emit_arith(fp, PFS_OP_MAD, rdest, WRITEMASK_XYZW, dest,
+ emit_arith(fp, PFS_OP_MAD, rdest, fpi->DstReg.WriteMask, dest,
pfs_one, pfs_zero, 0);
free_temp(fp, dest);
}