summaryrefslogtreecommitdiffstats
path: root/src/mesa
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa')
-rw-r--r--src/mesa/drivers/dri/i915/Makefile1
-rw-r--r--src/mesa/drivers/dri/i915/intel_batchbuffer.c (renamed from src/mesa/drivers/dri/intel/intel_batchbuffer.c)0
-rw-r--r--src/mesa/drivers/dri/i915/intel_batchbuffer.h (renamed from src/mesa/drivers/dri/intel/intel_batchbuffer.h)0
-rw-r--r--src/mesa/drivers/dri/i965/Makefile2
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.h4
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw_upload.c6
-rw-r--r--src/mesa/drivers/dri/i965/brw_state_pool.c2
-rw-r--r--src/mesa/drivers/dri/i965/brw_state_upload.c2
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm_state.c2
-rw-r--r--src/mesa/drivers/dri/i965/bufmgr.h191
-rw-r--r--src/mesa/drivers/dri/i965/bufmgr_fake.c1360
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.c13
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.h4
-rw-r--r--src/mesa/drivers/dri/i965/intel_blit.c16
-rw-r--r--src/mesa/drivers/dri/i965/intel_blit.h10
-rw-r--r--src/mesa/drivers/dri/i965/intel_buffer_objects.c4
-rw-r--r--src/mesa/drivers/dri/i965/intel_buffer_objects.h4
-rw-r--r--src/mesa/drivers/dri/i965/intel_context.c25
-rw-r--r--src/mesa/drivers/dri/i965/intel_ioctl.c2
-rw-r--r--src/mesa/drivers/dri/i965/intel_mipmap_tree.c2
-rw-r--r--src/mesa/drivers/dri/i965/intel_regions.c12
-rw-r--r--src/mesa/drivers/dri/i965/intel_regions.h4
-rw-r--r--src/mesa/drivers/dri/i965/intel_screen.c59
-rw-r--r--src/mesa/drivers/dri/i965/intel_screen.h7
l---------[-rw-r--r--]src/mesa/drivers/dri/i965/intel_tex_layout.c103
-rw-r--r--src/mesa/drivers/dri/i965/intel_tex_validate.c2
26 files changed, 1617 insertions, 220 deletions
diff --git a/src/mesa/drivers/dri/i915/Makefile b/src/mesa/drivers/dri/i915/Makefile
index 0e2cdcb2400..d3d0bd570c1 100644
--- a/src/mesa/drivers/dri/i915/Makefile
+++ b/src/mesa/drivers/dri/i915/Makefile
@@ -66,7 +66,6 @@ DRIVER_DEFINES = -I../intel $(shell pkg-config libdrm --atleast-version=2.3.1 \
include ../Makefile.template
-intel_batchbuffer.o: ../intel/intel_batchbuffer.o
intel_tex_layout.o: ../intel/intel_tex_layout.c
symlinks:
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.c b/src/mesa/drivers/dri/i915/intel_batchbuffer.c
index 8ee48b5a689..8ee48b5a689 100644
--- a/src/mesa/drivers/dri/intel/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i915/intel_batchbuffer.c
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.h b/src/mesa/drivers/dri/i915/intel_batchbuffer.h
index 850a91e1c91..850a91e1c91 100644
--- a/src/mesa/drivers/dri/intel/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i915/intel_batchbuffer.h
diff --git a/src/mesa/drivers/dri/i965/Makefile b/src/mesa/drivers/dri/i965/Makefile
index 07256ce1e58..5748d7ff05e 100644
--- a/src/mesa/drivers/dri/i965/Makefile
+++ b/src/mesa/drivers/dri/i965/Makefile
@@ -5,6 +5,7 @@ include $(TOP)/configs/current
LIBNAME = i965_dri.so
DRIVER_SOURCES = \
+ bufmgr_fake.c \
intel_batchbuffer.c \
intel_blit.c \
intel_buffer_objects.c \
@@ -91,7 +92,6 @@ DRIVER_DEFINES = -I../intel
include ../Makefile.template
-intel_batchbuffer.o: ../intel/intel_batchbuffer.o
intel_tex_layout.o: ../intel/intel_tex_layout.c
server:
diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h
index 279baf8c835..aa797b72ce6 100644
--- a/src/mesa/drivers/dri/i965/brw_context.h
+++ b/src/mesa/drivers/dri/i965/brw_context.h
@@ -242,7 +242,7 @@ struct brw_surface_binding_table {
struct brw_cache;
struct brw_mem_pool {
- dri_bo *buffer;
+ struct buffer *buffer;
GLuint size;
GLuint offset; /* offset of first free byte */
@@ -605,7 +605,7 @@ struct brw_context
GLuint nr_surfaces;
GLuint max_threads;
- dri_bo *scratch_buffer;
+ struct buffer *scratch_buffer;
GLuint scratch_buffer_size;
GLuint sampler_count;
diff --git a/src/mesa/drivers/dri/i965/brw_draw_upload.c b/src/mesa/drivers/dri/i965/brw_draw_upload.c
index 5f6f0ef5a62..fc2e3035af1 100644
--- a/src/mesa/drivers/dri/i965/brw_draw_upload.c
+++ b/src/mesa/drivers/dri/i965/brw_draw_upload.c
@@ -58,7 +58,7 @@ struct brw_array_state {
GLuint dword;
} vb0;
- dri_bo *buffer;
+ struct buffer *buffer;
GLuint offset;
GLuint max_index;
@@ -68,7 +68,7 @@ struct brw_array_state {
};
-static dri_bo *array_buffer( const struct gl_client_array *array )
+static struct buffer *array_buffer( const struct gl_client_array *array )
{
return intel_bufferobj_buffer(intel_buffer_object(array->BufferObj));
}
@@ -620,7 +620,7 @@ void brw_upload_indices( struct brw_context *brw,
*/
{
struct brw_indexbuffer ib;
- dri_bo *buffer = intel_bufferobj_buffer(intel_buffer_object(bufferobj));
+ struct buffer *buffer = intel_bufferobj_buffer(intel_buffer_object(bufferobj));
memset(&ib, 0, sizeof(ib));
diff --git a/src/mesa/drivers/dri/i965/brw_state_pool.c b/src/mesa/drivers/dri/i965/brw_state_pool.c
index 9677c73583c..708ae857ab5 100644
--- a/src/mesa/drivers/dri/i965/brw_state_pool.c
+++ b/src/mesa/drivers/dri/i965/brw_state_pool.c
@@ -34,7 +34,7 @@
#include "imports.h"
#include "intel_ioctl.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
GLboolean brw_pool_alloc( struct brw_mem_pool *pool,
GLuint size,
diff --git a/src/mesa/drivers/dri/i965/brw_state_upload.c b/src/mesa/drivers/dri/i965/brw_state_upload.c
index d5e575ef66a..92c07c29624 100644
--- a/src/mesa/drivers/dri/i965/brw_state_upload.c
+++ b/src/mesa/drivers/dri/i965/brw_state_upload.c
@@ -33,7 +33,7 @@
#include "brw_context.h"
#include "brw_state.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
#include "intel_batchbuffer.h"
/* This is used to initialize brw->state.atoms[]. We could use this
diff --git a/src/mesa/drivers/dri/i965/brw_wm_state.c b/src/mesa/drivers/dri/i965/brw_wm_state.c
index b1de66d9884..5b4f2abd0e2 100644
--- a/src/mesa/drivers/dri/i965/brw_wm_state.c
+++ b/src/mesa/drivers/dri/i965/brw_wm_state.c
@@ -34,7 +34,7 @@
#include "brw_context.h"
#include "brw_state.h"
#include "brw_defines.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
/***********************************************************************
* WM unit - fragment programs and rasterization
diff --git a/src/mesa/drivers/dri/i965/bufmgr.h b/src/mesa/drivers/dri/i965/bufmgr.h
new file mode 100644
index 00000000000..b31c2e6d9bc
--- /dev/null
+++ b/src/mesa/drivers/dri/i965/bufmgr.h
@@ -0,0 +1,191 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef BUFMGR_H
+#define BUFMGR_H
+
+#include "intel_context.h"
+
+
+/* The buffer manager context. Opaque.
+ */
+struct bufmgr;
+struct buffer;
+
+
+struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel );
+
+/* Flags for validate and other calls. If both NO_UPLOAD and NO_EVICT
+ * are specified, ValidateBuffers is essentially a query.
+ */
+#define BM_MEM_LOCAL 0x1
+#define BM_MEM_AGP 0x2
+#define BM_MEM_VRAM 0x4 /* not yet used */
+#define BM_WRITE 0x8 /* not yet used */
+#define BM_READ 0x10 /* not yet used */
+#define BM_NO_UPLOAD 0x20
+#define BM_NO_EVICT 0x40
+#define BM_NO_MOVE 0x80 /* not yet used */
+#define BM_NO_ALLOC 0x100 /* legacy "fixed" buffers only */
+#define BM_CLIENT 0x200 /* for map - pointer will be accessed
+ * without dri lock */
+
+#define BM_MEM_MASK (BM_MEM_LOCAL|BM_MEM_AGP|BM_MEM_VRAM)
+
+
+
+
+/* Create a pool of a given memory type, from a certain offset and a
+ * certain size.
+ *
+ * Also passed in is a virtual pointer to the start of the pool. This
+ * is useful in the faked-out version in i915 so that MapBuffer can
+ * return a pointer to a buffer residing in AGP space.
+ *
+ * Flags passed into a pool are inherited by all buffers allocated in
+ * that pool. So pools representing the static front,back,depth
+ * buffer allocations should have MEM_AGP|NO_UPLOAD|NO_EVICT|NO_MOVE to match
+ * the behaviour of the legacy allocations.
+ *
+ * Returns -1 for failure, pool number for success.
+ */
+int bmInitPool( struct intel_context *,
+ unsigned long low_offset,
+ void *low_virtual,
+ unsigned long size,
+ unsigned flags);
+
+
+/* Stick closely to ARB_vbo semantics - they're well defined and
+ * understood, and drivers can just pass the calls through without too
+ * much thunking.
+ */
+void bmGenBuffers(struct intel_context *, const char *, unsigned n, struct buffer **buffers,
+ int align );
+void bmDeleteBuffers(struct intel_context *, unsigned n, struct buffer **buffers);
+
+
+/* Hook to inform faked buffer manager about fixed-position
+ * front,depth,back buffers. These may move to a fully memory-managed
+ * scheme, or they may continue to be managed as is.
+ */
+struct buffer *bmGenBufferStatic(struct intel_context *,
+ unsigned pool);
+
+/* On evict, buffer manager will call invalidate_cb() to note that the
+ * buffer needs to be reloaded.
+ *
+ * Buffer is uploaded by calling bmMapBuffer() and copying data into
+ * the returned pointer.
+ *
+ * This is basically a big hack to get some more performance by
+ * turning off backing store for buffers where we either have it
+ * already (textures) or don't need it (batch buffers, temporary
+ * vbo's).
+ */
+void bmBufferSetInvalidateCB(struct intel_context *,
+ struct buffer *buf,
+ void (*invalidate_cb)( struct intel_context *, void *ptr ),
+ void *ptr,
+ GLboolean dont_fence_subdata);
+
+
+/* The driver has more intimate knowledge of the hardare than a GL
+ * client would, so flags here is more proscriptive than the usage
+ * values in the ARB_vbo interface:
+ */
+int bmBufferData(struct intel_context *,
+ struct buffer *buf,
+ unsigned size,
+ const void *data,
+ unsigned flags );
+
+int bmBufferSubData(struct intel_context *,
+ struct buffer *buf,
+ unsigned offset,
+ unsigned size,
+ const void *data );
+
+/* In this version, taking the offset will provoke an upload on
+ * buffers not already resident in AGP:
+ */
+unsigned bmBufferOffset(struct intel_context *,
+ struct buffer *buf);
+
+
+/* Extract data from the buffer:
+ */
+void bmBufferGetSubData(struct intel_context *,
+ struct buffer *buf,
+ unsigned offset,
+ unsigned size,
+ void *data );
+
+void *bmMapBuffer( struct intel_context *,
+ struct buffer *buf,
+ unsigned access );
+
+void bmUnmapBuffer( struct intel_context *,
+ struct buffer *buf );
+
+/* Pertains to all buffers who's offset has been taken since the last
+ * fence or release.
+ */
+int bmValidateBuffers( struct intel_context * );
+void bmReleaseBuffers( struct intel_context * );
+
+GLuint bmCtxId( struct intel_context *intel );
+
+
+GLboolean bmError( struct intel_context * );
+void bmEvictAll( struct intel_context * );
+
+void *bmFindVirtual( struct intel_context *intel,
+ unsigned int offset,
+ size_t sz );
+
+/* This functionality is used by the buffer manager, not really sure
+ * if we need to be exposing it in this way, probably libdrm will
+ * offer equivalent calls.
+ *
+ * For now they can stay, but will likely change/move before final:
+ */
+unsigned bmSetFence( struct intel_context * );
+unsigned bmSetFenceLock( struct intel_context * );
+unsigned bmLockAndFence( struct intel_context *intel );
+int bmTestFence( struct intel_context *, unsigned fence );
+void bmFinishFence( struct intel_context *, unsigned fence );
+void bmFinishFenceLock( struct intel_context *, unsigned fence );
+
+void bm_fake_NotifyContendedLockTake( struct intel_context * );
+
+extern int INTEL_DEBUG;
+#define DEBUG_BUFMGR 0x10000000
+
+#define DBG(...) do { if (INTEL_DEBUG & DEBUG_BUFMGR) _mesa_printf(__VA_ARGS__); } while(0)
+
+#endif
diff --git a/src/mesa/drivers/dri/i965/bufmgr_fake.c b/src/mesa/drivers/dri/i965/bufmgr_fake.c
new file mode 100644
index 00000000000..a85121122fc
--- /dev/null
+++ b/src/mesa/drivers/dri/i965/bufmgr_fake.c
@@ -0,0 +1,1360 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Originally a fake version of the buffer manager so that we can
+ * prototype the changes in a driver fairly quickly, has been fleshed
+ * out to a fully functional interim solution.
+ *
+ * Basically wraps the old style memory management in the new
+ * programming interface, but is more expressive and avoids many of
+ * the bugs in the old texture manager.
+ */
+#include "bufmgr.h"
+
+#include "intel_context.h"
+#include "intel_ioctl.h"
+#include "intel_batchbuffer.h"
+
+#include "simple_list.h"
+#include "mm.h"
+#include "imports.h"
+
+#define BM_POOL_MAX 8
+
+/* Internal flags:
+ */
+#define BM_NO_BACKING_STORE 0x2000
+#define BM_NO_FENCE_SUBDATA 0x4000
+
+
+static int check_fenced( struct intel_context *intel );
+
+static int nr_attach = 0;
+
+/* Wrapper around mm.c's mem_block, which understands that you must
+ * wait for fences to expire before memory can be freed. This is
+ * specific to our use of memcpy for uploads - an upload that was
+ * processed through the command queue wouldn't need to care about
+ * fences.
+ */
+struct block {
+ struct block *next, *prev;
+ struct pool *pool; /* BM_MEM_AGP */
+ struct mem_block *mem; /* BM_MEM_AGP */
+
+ unsigned referenced:1;
+ unsigned on_hardware:1;
+ unsigned fenced:1;
+
+
+ unsigned fence; /* BM_MEM_AGP, Split to read_fence, write_fence */
+
+ struct buffer *buf;
+ void *virtual;
+};
+
+
+struct buffer {
+ unsigned id; /* debug only */
+ const char *name;
+ unsigned size;
+
+ unsigned mapped:1;
+ unsigned dirty:1;
+ unsigned alignment:13;
+ unsigned flags:16;
+
+ struct block *block;
+ void *backing_store;
+ void (*invalidate_cb)( struct intel_context *, void * );
+ void *invalidate_ptr;
+};
+
+struct pool {
+ unsigned size;
+ unsigned low_offset;
+ struct buffer *static_buffer;
+ unsigned flags;
+ struct mem_block *heap;
+ void *virtual;
+ struct block lru; /* only allocated, non-fence-pending blocks here */
+};
+
+struct bufmgr {
+ _glthread_Mutex mutex; /**< for thread safety */
+ struct pool pool[BM_POOL_MAX];
+ unsigned nr_pools;
+
+ unsigned buf_nr; /* for generating ids */
+
+ struct block referenced; /* after bmBufferOffset */
+ struct block on_hardware; /* after bmValidateBuffers */
+ struct block fenced; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
+ /* then to pool->lru or free() */
+
+ unsigned ctxId;
+ unsigned last_fence;
+ unsigned free_on_hardware;
+
+ unsigned fail:1;
+ unsigned need_fence:1;
+};
+
+#define MAXFENCE 0x7fffffff
+
+static GLboolean FENCE_LTE( unsigned a, unsigned b )
+{
+ if (a == b)
+ return GL_TRUE;
+
+ if (a < b && b - a < (1<<24))
+ return GL_TRUE;
+
+ if (a > b && MAXFENCE - a + b < (1<<24))
+ return GL_TRUE;
+
+ return GL_FALSE;
+}
+
+int bmTestFence( struct intel_context *intel, unsigned fence )
+{
+ /* Slight problem with wrap-around:
+ */
+ return fence == 0 || FENCE_LTE(fence, intel->sarea->last_dispatch);
+}
+
+#define LOCK(bm) \
+ int dolock = nr_attach > 1; \
+ if (dolock) _glthread_LOCK_MUTEX(bm->mutex)
+
+#define UNLOCK(bm) \
+ if (dolock) _glthread_UNLOCK_MUTEX(bm->mutex)
+
+
+
+static GLboolean alloc_from_pool( struct intel_context *intel,
+ unsigned pool_nr,
+ struct buffer *buf )
+{
+ struct bufmgr *bm = intel->bm;
+ struct pool *pool = &bm->pool[pool_nr];
+ struct block *block = (struct block *)calloc(sizeof *block, 1);
+ GLuint sz, align = (1<<buf->alignment);
+
+ if (!block)
+ return GL_FALSE;
+
+ sz = (buf->size + align-1) & ~(align-1);
+
+ block->mem = mmAllocMem(pool->heap,
+ sz,
+ buf->alignment, 0);
+ if (!block->mem) {
+ free(block);
+ return GL_FALSE;
+ }
+
+ make_empty_list(block);
+
+ /* Insert at head or at tail???
+ */
+ insert_at_tail(&pool->lru, block);
+
+ block->pool = pool;
+ block->virtual = pool->virtual + block->mem->ofs;
+ block->buf = buf;
+
+ buf->block = block;
+
+ return GL_TRUE;
+}
+
+
+
+
+
+
+
+
+/* Release the card storage associated with buf:
+ */
+static void free_block( struct intel_context *intel, struct block *block )
+{
+ DBG("free block %p\n", block);
+
+ if (!block)
+ return;
+
+ check_fenced(intel);
+
+ if (block->referenced) {
+ _mesa_printf("tried to free block on referenced list\n");
+ assert(0);
+ }
+ else if (block->on_hardware) {
+ block->buf = NULL;
+ intel->bm->free_on_hardware += block->mem->size;
+ }
+ else if (block->fenced) {
+ block->buf = NULL;
+ }
+ else {
+ DBG(" - free immediately\n");
+ remove_from_list(block);
+
+ mmFreeMem(block->mem);
+ free(block);
+ }
+}
+
+
+static void alloc_backing_store( struct intel_context *intel, struct buffer *buf )
+{
+ assert(!buf->backing_store);
+ assert(!(buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)));
+
+ buf->backing_store = ALIGN_MALLOC(buf->size, 64);
+}
+
+static void free_backing_store( struct intel_context *intel, struct buffer *buf )
+{
+ assert(!(buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)));
+
+ if (buf->backing_store) {
+ ALIGN_FREE(buf->backing_store);
+ buf->backing_store = NULL;
+ }
+}
+
+
+
+
+
+
+static void set_dirty( struct intel_context *intel,
+ struct buffer *buf )
+{
+ if (buf->flags & BM_NO_BACKING_STORE)
+ buf->invalidate_cb(intel, buf->invalidate_ptr);
+
+ assert(!(buf->flags & BM_NO_EVICT));
+
+ DBG("set_dirty - buf %d\n", buf->id);
+ buf->dirty = 1;
+}
+
+
+static int evict_lru( struct intel_context *intel, GLuint max_fence, GLuint *pool )
+{
+ struct bufmgr *bm = intel->bm;
+ struct block *block, *tmp;
+ int i;
+
+ DBG("%s\n", __FUNCTION__);
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ if (!(bm->pool[i].flags & BM_NO_EVICT)) {
+ foreach_s(block, tmp, &bm->pool[i].lru) {
+
+ if (block->buf &&
+ (block->buf->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ if (block->fence && max_fence &&
+ !FENCE_LTE(block->fence, max_fence))
+ return 0;
+
+ set_dirty(intel, block->buf);
+ block->buf->block = NULL;
+
+ free_block(intel, block);
+ *pool = i;
+ return 1;
+ }
+ }
+ }
+
+
+ return 0;
+}
+
+
+#define foreach_s_rev(ptr, t, list) \
+ for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
+
+static int evict_mru( struct intel_context *intel, GLuint *pool )
+{
+ struct bufmgr *bm = intel->bm;
+ struct block *block, *tmp;
+ int i;
+
+ DBG("%s\n", __FUNCTION__);
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ if (!(bm->pool[i].flags & BM_NO_EVICT)) {
+ foreach_s_rev(block, tmp, &bm->pool[i].lru) {
+
+ if (block->buf &&
+ (block->buf->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ set_dirty(intel, block->buf);
+ block->buf->block = NULL;
+
+ free_block(intel, block);
+ *pool = i;
+ return 1;
+ }
+ }
+ }
+
+
+ return 0;
+}
+
+
+static int check_fenced( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+ struct block *block, *tmp;
+ int ret = 0;
+
+ foreach_s(block, tmp, &bm->fenced ) {
+ assert(block->fenced);
+
+ if (bmTestFence(intel, block->fence)) {
+
+ block->fenced = 0;
+
+ if (!block->buf) {
+ DBG("delayed free: offset %x sz %x\n", block->mem->ofs, block->mem->size);
+ remove_from_list(block);
+ mmFreeMem(block->mem);
+ free(block);
+ }
+ else {
+ DBG("return to lru: offset %x sz %x\n", block->mem->ofs, block->mem->size);
+ move_to_tail(&block->pool->lru, block);
+ }
+
+ ret = 1;
+ }
+ else {
+ /* Blocks are ordered by fence, so if one fails, all from
+ * here will fail also:
+ */
+ break;
+ }
+ }
+
+ /* Also check the referenced list:
+ */
+ foreach_s(block, tmp, &bm->referenced ) {
+ if (block->fenced &&
+ bmTestFence(intel, block->fence)) {
+ block->fenced = 0;
+ }
+ }
+
+
+ DBG("%s: %d\n", __FUNCTION__, ret);
+ return ret;
+}
+
+
+
+static void fence_blocks( struct intel_context *intel,
+ unsigned fence )
+{
+ struct bufmgr *bm = intel->bm;
+ struct block *block, *tmp;
+
+ foreach_s (block, tmp, &bm->on_hardware) {
+ DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block,
+ block->mem->size, block->buf, fence);
+ block->fence = fence;
+
+ block->on_hardware = 0;
+ block->fenced = 1;
+
+ /* Move to tail of pending list here
+ */
+ move_to_tail(&bm->fenced, block);
+ }
+
+ /* Also check the referenced list:
+ */
+ foreach_s (block, tmp, &bm->referenced) {
+ if (block->on_hardware) {
+ DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block,
+ block->mem->size, block->buf, fence);
+
+ block->fence = fence;
+ block->on_hardware = 0;
+ block->fenced = 1;
+ }
+ }
+
+
+ bm->last_fence = fence;
+ assert(is_empty_list(&bm->on_hardware));
+}
+
+
+
+
+static GLboolean alloc_block( struct intel_context *intel,
+ struct buffer *buf )
+{
+ struct bufmgr *bm = intel->bm;
+ int i;
+
+ assert(intel->locked);
+
+ DBG("%s 0x%x bytes (%s)\n", __FUNCTION__, buf->size, buf->name);
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ if (!(bm->pool[i].flags & BM_NO_ALLOC) &&
+ alloc_from_pool(intel, i, buf)) {
+
+ DBG("%s --> 0x%x (sz %x)\n", __FUNCTION__,
+ buf->block->mem->ofs, buf->block->mem->size);
+
+ return GL_TRUE;
+ }
+ }
+
+ DBG("%s --> fail\n", __FUNCTION__);
+ return GL_FALSE;
+}
+
+
+static GLboolean evict_and_alloc_block( struct intel_context *intel,
+ struct buffer *buf )
+{
+ GLuint pool;
+ struct bufmgr *bm = intel->bm;
+
+ assert(buf->block == NULL);
+
+ /* Put a cap on the amount of free memory we'll allow to accumulate
+ * before emitting a fence.
+ */
+ if (bm->free_on_hardware > 1 * 1024 * 1024) {
+ DBG("fence for free space: %x\n", bm->free_on_hardware);
+ bmSetFence(intel);
+ }
+
+ /* Search for already free memory:
+ */
+ if (alloc_block(intel, buf))
+ return GL_TRUE;
+
+ /* Look for memory that may have become free:
+ */
+ if (check_fenced(intel) &&
+ alloc_block(intel, buf))
+ return GL_TRUE;
+
+ /* Look for memory blocks not used for >1 frame:
+ */
+ while (evict_lru(intel, intel->second_last_swap_fence, &pool))
+ if (alloc_from_pool(intel, pool, buf))
+ return GL_TRUE;
+
+ /* If we're not thrashing, allow lru eviction to dig deeper into
+ * recently used textures. We'll probably be thrashing soon:
+ */
+ if (!intel->thrashing) {
+ while (evict_lru(intel, 0, &pool))
+ if (alloc_from_pool(intel, pool, buf))
+ return GL_TRUE;
+ }
+
+ /* Keep thrashing counter alive?
+ */
+ if (intel->thrashing)
+ intel->thrashing = 20;
+
+ /* Wait on any already pending fences - here we are waiting for any
+ * freed memory that has been submitted to hardware and fenced to
+ * become available:
+ */
+ while (!is_empty_list(&bm->fenced)) {
+ GLuint fence = bm->fenced.next->fence;
+ bmFinishFence(intel, fence);
+
+ if (alloc_block(intel, buf))
+ return GL_TRUE;
+ }
+
+
+ /*
+ */
+ if (!is_empty_list(&bm->on_hardware)) {
+ bmSetFence(intel);
+
+ while (!is_empty_list(&bm->fenced)) {
+ GLuint fence = bm->fenced.next->fence;
+ bmFinishFence(intel, fence);
+ }
+
+ if (!intel->thrashing) {
+ DBG("thrashing\n");
+ }
+ intel->thrashing = 20;
+
+ if (alloc_block(intel, buf))
+ return GL_TRUE;
+ }
+
+ while (evict_mru(intel, &pool))
+ if (alloc_from_pool(intel, pool, buf))
+ return GL_TRUE;
+
+ DBG("%s 0x%x bytes failed\n", __FUNCTION__, buf->size);
+
+ assert(is_empty_list(&bm->on_hardware));
+ assert(is_empty_list(&bm->fenced));
+
+ return GL_FALSE;
+}
+
+
+
+
+
+
+
+
+
+
+/***********************************************************************
+ * Public functions
+ */
+
+
+/* The initialization functions are skewed in the fake implementation.
+ * This call would be to attach to an existing manager, rather than to
+ * create a local one.
+ */
+struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel )
+{
+ _glthread_DECLARE_STATIC_MUTEX(initMutex);
+ static struct bufmgr bm;
+
+ /* This function needs a mutex of its own...
+ */
+ _glthread_LOCK_MUTEX(initMutex);
+
+ if (nr_attach == 0) {
+ _glthread_INIT_MUTEX(bm.mutex);
+
+ make_empty_list(&bm.referenced);
+ make_empty_list(&bm.fenced);
+ make_empty_list(&bm.on_hardware);
+
+ /* The context id of any of the share group. This won't be used
+ * in communication with the kernel, so it doesn't matter if
+ * this context is eventually deleted.
+ */
+ bm.ctxId = intel->hHWContext;
+ }
+
+ nr_attach++;
+
+ _glthread_UNLOCK_MUTEX(initMutex);
+
+ return &bm;
+}
+
+
+
+/* The virtual pointer would go away in a true implementation.
+ */
+int bmInitPool( struct intel_context *intel,
+ unsigned long low_offset,
+ void *low_virtual,
+ unsigned long size,
+ unsigned flags)
+{
+ struct bufmgr *bm = intel->bm;
+ int retval = 0;
+
+ LOCK(bm);
+ {
+ GLuint i;
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ if (bm->pool[i].low_offset == low_offset &&
+ bm->pool[i].size == size) {
+ retval = i;
+ goto out;
+ }
+ }
+
+
+ if (bm->nr_pools >= BM_POOL_MAX)
+ retval = -1;
+ else {
+ i = bm->nr_pools++;
+
+ DBG("bmInitPool %d low_offset %x sz %x\n",
+ i, low_offset, size);
+
+ bm->pool[i].low_offset = low_offset;
+ bm->pool[i].size = size;
+ bm->pool[i].heap = mmInit( low_offset, size );
+ bm->pool[i].virtual = low_virtual - low_offset;
+ bm->pool[i].flags = flags;
+
+ make_empty_list(&bm->pool[i].lru);
+
+ retval = i;
+ }
+ }
+ out:
+ UNLOCK(bm);
+ return retval;
+}
+
+static struct buffer *do_GenBuffer(struct intel_context *intel, const char *name, int align)
+{
+ struct bufmgr *bm = intel->bm;
+ struct buffer *buf = calloc(sizeof(*buf), 1);
+
+ buf->id = ++bm->buf_nr;
+ buf->name = name;
+ buf->alignment = align;
+ buf->flags = BM_MEM_AGP|BM_MEM_VRAM|BM_MEM_LOCAL;
+
+ return buf;
+}
+
+
+void *bmFindVirtual( struct intel_context *intel,
+ unsigned int offset,
+ size_t sz )
+{
+ struct bufmgr *bm = intel->bm;
+ int i;
+
+ for (i = 0; i < bm->nr_pools; i++)
+ if (offset >= bm->pool[i].low_offset &&
+ offset + sz <= bm->pool[i].low_offset + bm->pool[i].size)
+ return bm->pool[i].virtual + offset;
+
+ return NULL;
+}
+
+
+void bmGenBuffers(struct intel_context *intel,
+ const char *name, unsigned n,
+ struct buffer **buffers,
+ int align )
+{
+ struct bufmgr *bm = intel->bm;
+ LOCK(bm);
+ {
+ int i;
+
+ for (i = 0; i < n; i++)
+ buffers[i] = do_GenBuffer(intel, name, align);
+ }
+ UNLOCK(bm);
+}
+
+
+void bmDeleteBuffers(struct intel_context *intel, unsigned n, struct buffer **buffers)
+{
+ struct bufmgr *bm = intel->bm;
+
+ LOCK(bm);
+ {
+ unsigned i;
+
+ for (i = 0; i < n; i++) {
+ struct buffer *buf = buffers[i];
+
+ if (buf && buf->block)
+ free_block(intel, buf->block);
+
+ if (buf)
+ free(buf);
+ }
+ }
+ UNLOCK(bm);
+}
+
+
+
+
+/* Hook to inform faked buffer manager about fixed-position
+ * front,depth,back buffers. These may move to a fully memory-managed
+ * scheme, or they may continue to be managed as is. It will probably
+ * be useful to pass a fixed offset here one day.
+ */
+struct buffer *bmGenBufferStatic(struct intel_context *intel,
+ unsigned pool )
+{
+ struct bufmgr *bm = intel->bm;
+ struct buffer *buf;
+ LOCK(bm);
+ {
+ assert(bm->pool[pool].flags & BM_NO_EVICT);
+ assert(bm->pool[pool].flags & BM_NO_MOVE);
+
+ if (bm->pool[pool].static_buffer)
+ buf = bm->pool[pool].static_buffer;
+ else {
+ buf = do_GenBuffer(intel, "static", 12);
+
+ bm->pool[pool].static_buffer = buf;
+ assert(!buf->block);
+
+ buf->size = bm->pool[pool].size;
+ buf->flags = bm->pool[pool].flags;
+ buf->alignment = 12;
+
+ if (!alloc_from_pool(intel, pool, buf))
+ assert(0);
+ }
+ }
+ UNLOCK(bm);
+ return buf;
+}
+
+
+static void wait_quiescent(struct intel_context *intel,
+ struct block *block)
+{
+ if (block->on_hardware) {
+ assert(intel->bm->need_fence);
+ bmSetFence(intel);
+ assert(!block->on_hardware);
+ }
+
+
+ if (block->fenced) {
+ bmFinishFence(intel, block->fence);
+ }
+
+ assert(!block->on_hardware);
+ assert(!block->fenced);
+}
+
+
+
+/* If buffer size changes, free and reallocate. Otherwise update in
+ * place.
+ */
+int bmBufferData(struct intel_context *intel,
+ struct buffer *buf,
+ unsigned size,
+ const void *data,
+ unsigned flags )
+{
+ struct bufmgr *bm = intel->bm;
+ int retval = 0;
+
+ LOCK(bm);
+ {
+ DBG("bmBufferData %d sz 0x%x data: %p\n", buf->id, size, data);
+
+ assert(!buf->mapped);
+
+ if (buf->block) {
+ struct block *block = buf->block;
+
+ /* Optimistic check to see if we can reuse the block -- not
+ * required for correctness:
+ */
+ if (block->fenced)
+ check_fenced(intel);
+
+ if (block->on_hardware ||
+ block->fenced ||
+ (buf->size && buf->size != size) ||
+ (data == NULL)) {
+
+ assert(!block->referenced);
+
+ free_block(intel, block);
+ buf->block = NULL;
+ buf->dirty = 1;
+ }
+ }
+
+ buf->size = size;
+ if (buf->block) {
+ assert (buf->block->mem->size >= size);
+ }
+
+ if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
+
+ assert(intel->locked || data == NULL);
+
+ if (data != NULL) {
+ if (!buf->block && !evict_and_alloc_block(intel, buf)) {
+ bm->fail = 1;
+ retval = -1;
+ goto out;
+ }
+
+ wait_quiescent(intel, buf->block);
+
+ DBG("bmBufferData %d offset 0x%x sz 0x%x\n",
+ buf->id, buf->block->mem->ofs, size);
+
+ assert(buf->block->virtual == buf->block->pool->virtual + buf->block->mem->ofs);
+
+ do_memcpy(buf->block->virtual, data, size);
+ }
+ buf->dirty = 0;
+ }
+ else {
+ DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
+ set_dirty(intel, buf);
+ free_backing_store(intel, buf);
+
+ if (data != NULL) {
+ alloc_backing_store(intel, buf);
+ do_memcpy(buf->backing_store, data, size);
+ }
+ }
+ }
+ out:
+ UNLOCK(bm);
+ return retval;
+}
+
+
+/* Update the buffer in place, in whatever space it is currently resident:
+ */
+int bmBufferSubData(struct intel_context *intel,
+ struct buffer *buf,
+ unsigned offset,
+ unsigned size,
+ const void *data )
+{
+ struct bufmgr *bm = intel->bm;
+ int retval = 0;
+
+ if (size == 0)
+ return 0;
+
+ LOCK(bm);
+ {
+ DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buf->id, offset, size);
+
+ assert(offset+size <= buf->size);
+
+ if (buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)) {
+
+ assert(intel->locked);
+
+ if (!buf->block && !evict_and_alloc_block(intel, buf)) {
+ bm->fail = 1;
+ retval = -1;
+ goto out;
+ }
+
+ if (!(buf->flags & BM_NO_FENCE_SUBDATA))
+ wait_quiescent(intel, buf->block);
+
+ buf->dirty = 0;
+
+ do_memcpy(buf->block->virtual + offset, data, size);
+ }
+ else {
+ DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
+ set_dirty(intel, buf);
+
+ if (buf->backing_store == NULL)
+ alloc_backing_store(intel, buf);
+
+ do_memcpy(buf->backing_store + offset, data, size);
+ }
+ }
+ out:
+ UNLOCK(bm);
+ return retval;
+}
+
+unsigned bmBufferOffset(struct intel_context *intel,
+ struct buffer *buf)
+{
+ struct bufmgr *bm = intel->bm;
+ unsigned retval = 0;
+
+ LOCK(bm);
+ {
+ assert(intel->locked);
+
+ if (!buf->block &&
+ !evict_and_alloc_block(intel, buf)) {
+ bm->fail = 1;
+ retval = ~0;
+ }
+ else {
+ assert(buf->block);
+ assert(buf->block->buf == buf);
+
+ DBG("Add buf %d (block %p, dirty %d) to referenced list\n", buf->id, buf->block,
+ buf->dirty);
+
+ move_to_tail(&bm->referenced, buf->block);
+ buf->block->referenced = 1;
+
+ retval = buf->block->mem->ofs;
+ }
+ }
+ UNLOCK(bm);
+
+ return retval;
+}
+
+
+
+/* Extract data from the buffer:
+ */
+void bmBufferGetSubData(struct intel_context *intel,
+ struct buffer *buf,
+ unsigned offset,
+ unsigned size,
+ void *data )
+{
+ struct bufmgr *bm = intel->bm;
+
+ LOCK(bm);
+ {
+ DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buf->id, offset, size);
+
+ if (buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)) {
+ if (buf->block && size) {
+ wait_quiescent(intel, buf->block);
+ do_memcpy(data, buf->block->virtual + offset, size);
+ }
+ }
+ else {
+ if (buf->backing_store && size) {
+ do_memcpy(data, buf->backing_store + offset, size);
+ }
+ }
+ }
+ UNLOCK(bm);
+}
+
+
+/* Return a pointer to whatever space the buffer is currently resident in:
+ */
+void *bmMapBuffer( struct intel_context *intel,
+ struct buffer *buf,
+ unsigned flags )
+{
+ struct bufmgr *bm = intel->bm;
+ void *retval = NULL;
+
+ LOCK(bm);
+ {
+ DBG("bmMapBuffer %d\n", buf->id);
+
+ if (buf->mapped) {
+ _mesa_printf("%s: already mapped\n", __FUNCTION__);
+ retval = NULL;
+ }
+ else if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
+
+ assert(intel->locked);
+
+ if (!buf->block && !evict_and_alloc_block(intel, buf)) {
+ DBG("%s: alloc failed\n", __FUNCTION__);
+ bm->fail = 1;
+ retval = NULL;
+ }
+ else {
+ assert(buf->block);
+ buf->dirty = 0;
+
+ if (!(buf->flags & BM_NO_FENCE_SUBDATA))
+ wait_quiescent(intel, buf->block);
+
+ buf->mapped = 1;
+ retval = buf->block->virtual;
+ }
+ }
+ else {
+ DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
+ set_dirty(intel, buf);
+
+ if (buf->backing_store == 0)
+ alloc_backing_store(intel, buf);
+
+ buf->mapped = 1;
+ retval = buf->backing_store;
+ }
+ }
+ UNLOCK(bm);
+ return retval;
+}
+
+void bmUnmapBuffer( struct intel_context *intel, struct buffer *buf )
+{
+ struct bufmgr *bm = intel->bm;
+
+ LOCK(bm);
+ {
+ DBG("bmUnmapBuffer %d\n", buf->id);
+ buf->mapped = 0;
+ }
+ UNLOCK(bm);
+}
+
+
+
+
+/* This is the big hack that turns on BM_NO_BACKING_STORE. Basically
+ * says that an external party will maintain the backing store, eg
+ * Mesa's local copy of texture data.
+ */
+void bmBufferSetInvalidateCB(struct intel_context *intel,
+ struct buffer *buf,
+ void (*invalidate_cb)( struct intel_context *, void *ptr ),
+ void *ptr,
+ GLboolean dont_fence_subdata)
+{
+ struct bufmgr *bm = intel->bm;
+
+ LOCK(bm);
+ {
+ if (buf->backing_store)
+ free_backing_store(intel, buf);
+
+ buf->flags |= BM_NO_BACKING_STORE;
+
+ if (dont_fence_subdata)
+ buf->flags |= BM_NO_FENCE_SUBDATA;
+
+ DBG("bmBufferSetInvalidateCB set buf %d dirty\n", buf->id);
+ buf->dirty = 1;
+ buf->invalidate_cb = invalidate_cb;
+ buf->invalidate_ptr = ptr;
+
+ /* Note that it is invalid right from the start. Also note
+ * invalidate_cb is called with the bufmgr locked, so cannot
+ * itself make bufmgr calls.
+ */
+ invalidate_cb( intel, ptr );
+ }
+ UNLOCK(bm);
+}
+
+
+
+
+
+
+
+/* This is only protected against thread interactions by the DRI lock
+ * and the policy of ensuring that all dma is flushed prior to
+ * releasing that lock. Otherwise you might have two threads building
+ * up a list of buffers to validate at once.
+ */
+int bmValidateBuffers( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+ int retval = 0;
+
+ LOCK(bm);
+ {
+ DBG("%s fail %d\n", __FUNCTION__, bm->fail);
+ assert(intel->locked);
+
+ if (!bm->fail) {
+ struct block *block, *tmp;
+
+ foreach_s(block, tmp, &bm->referenced) {
+ struct buffer *buf = block->buf;
+
+ DBG("Validate buf %d / block %p / dirty %d\n", buf->id, block, buf->dirty);
+
+ /* Upload the buffer contents if necessary:
+ */
+ if (buf->dirty) {
+ DBG("Upload dirty buf %d (%s) sz %d offset 0x%x\n", buf->id,
+ buf->name, buf->size, block->mem->ofs);
+
+ assert(!(buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)));
+
+ wait_quiescent(intel, buf->block);
+
+ do_memcpy(buf->block->virtual,
+ buf->backing_store,
+ buf->size);
+
+ buf->dirty = 0;
+ }
+
+ block->referenced = 0;
+ block->on_hardware = 1;
+ move_to_tail(&bm->on_hardware, block);
+ }
+
+ bm->need_fence = 1;
+ }
+
+ retval = bm->fail ? -1 : 0;
+ }
+ UNLOCK(bm);
+
+
+ if (retval != 0)
+ DBG("%s failed\n", __FUNCTION__);
+
+ return retval;
+}
+
+
+
+
+void bmReleaseBuffers( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+
+ LOCK(bm);
+ {
+ struct block *block, *tmp;
+
+ foreach_s (block, tmp, &bm->referenced) {
+
+ DBG("remove block %p from referenced list\n", block);
+
+ if (block->on_hardware) {
+ /* Return to the on-hardware list.
+ */
+ move_to_tail(&bm->on_hardware, block);
+ }
+ else if (block->fenced) {
+ struct block *s;
+
+ /* Hmm - have to scan the fenced list to insert the
+ * buffers in order. This is O(nm), but rare and the
+ * numbers are low.
+ */
+ foreach (s, &bm->fenced) {
+ if (FENCE_LTE(block->fence, s->fence))
+ break;
+ }
+
+ move_to_tail(s, block);
+ }
+ else {
+ /* Return to the lru list:
+ */
+ move_to_tail(&block->pool->lru, block);
+ }
+
+ block->referenced = 0;
+ }
+ }
+ UNLOCK(bm);
+}
+
+
+/* This functionality is used by the buffer manager, not really sure
+ * if we need to be exposing it in this way, probably libdrm will
+ * offer equivalent calls.
+ *
+ * For now they can stay, but will likely change/move before final:
+ */
+unsigned bmSetFence( struct intel_context *intel )
+{
+ assert(intel->locked);
+
+ /* Emit MI_FLUSH here:
+ */
+ if (intel->bm->need_fence) {
+
+ /* Emit a flush without using a batchbuffer. Can't rely on the
+ * batchbuffer at this level really. Would really prefer that
+ * the IRQ ioctly emitted the flush at the same time.
+ */
+ GLuint dword[2];
+ dword[0] = intel->vtbl.flush_cmd();
+ dword[1] = 0;
+ intel_cmd_ioctl(intel, (char *)&dword, sizeof(dword));
+
+ intel->bm->last_fence = intelEmitIrqLocked( intel );
+
+ fence_blocks(intel, intel->bm->last_fence);
+
+ intel->vtbl.note_fence(intel, intel->bm->last_fence);
+ intel->bm->need_fence = 0;
+
+ if (intel->thrashing) {
+ intel->thrashing--;
+ if (!intel->thrashing)
+ DBG("not thrashing\n");
+ }
+
+ intel->bm->free_on_hardware = 0;
+ }
+
+ return intel->bm->last_fence;
+}
+
+unsigned bmSetFenceLock( struct intel_context *intel )
+{
+ unsigned last;
+ LOCK(intel->bm);
+ last = bmSetFence(intel);
+ UNLOCK(intel->bm);
+ return last;
+}
+unsigned bmLockAndFence( struct intel_context *intel )
+{
+ if (intel->bm->need_fence) {
+ LOCK_HARDWARE(intel);
+ LOCK(intel->bm);
+ bmSetFence(intel);
+ UNLOCK(intel->bm);
+ UNLOCK_HARDWARE(intel);
+ }
+
+ return intel->bm->last_fence;
+}
+
+
+void bmFinishFence( struct intel_context *intel, unsigned fence )
+{
+ if (!bmTestFence(intel, fence)) {
+ DBG("...wait on fence %d\n", fence);
+ intelWaitIrq( intel, fence );
+ }
+ assert(bmTestFence(intel, fence));
+ check_fenced(intel);
+}
+
+void bmFinishFenceLock( struct intel_context *intel, unsigned fence )
+{
+ LOCK(intel->bm);
+ bmFinishFence(intel, fence);
+ UNLOCK(intel->bm);
+}
+
+
+/* Specifically ignore texture memory sharing.
+ * -- just evict everything
+ * -- and wait for idle
+ */
+void bm_fake_NotifyContendedLockTake( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+
+ LOCK(bm);
+ {
+ struct block *block, *tmp;
+ GLuint i;
+
+ assert(is_empty_list(&bm->referenced));
+
+ bm->need_fence = 1;
+ bm->fail = 0;
+ bmFinishFence(intel, bmSetFence(intel));
+
+ assert(is_empty_list(&bm->fenced));
+ assert(is_empty_list(&bm->on_hardware));
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ if (!(bm->pool[i].flags & BM_NO_EVICT)) {
+ foreach_s(block, tmp, &bm->pool[i].lru) {
+ assert(bmTestFence(intel, block->fence));
+ set_dirty(intel, block->buf);
+ }
+ }
+ }
+ }
+ UNLOCK(bm);
+}
+
+
+
+void bmEvictAll( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+
+ LOCK(bm);
+ {
+ struct block *block, *tmp;
+ GLuint i;
+
+ DBG("%s\n", __FUNCTION__);
+
+ assert(is_empty_list(&bm->referenced));
+
+ bm->need_fence = 1;
+ bm->fail = 0;
+ bmFinishFence(intel, bmSetFence(intel));
+
+ assert(is_empty_list(&bm->fenced));
+ assert(is_empty_list(&bm->on_hardware));
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ if (!(bm->pool[i].flags & BM_NO_EVICT)) {
+ foreach_s(block, tmp, &bm->pool[i].lru) {
+ assert(bmTestFence(intel, block->fence));
+ set_dirty(intel, block->buf);
+ block->buf->block = NULL;
+
+ free_block(intel, block);
+ }
+ }
+ }
+ }
+ UNLOCK(bm);
+}
+
+
+GLboolean bmError( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+ GLboolean retval;
+
+ LOCK(bm);
+ {
+ retval = bm->fail;
+ }
+ UNLOCK(bm);
+
+ return retval;
+}
+
+
+GLuint bmCtxId( struct intel_context *intel )
+{
+ return intel->bm->ctxId;
+}
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index e4557a38471..2aaa10e1c21 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -28,24 +28,13 @@
#include "imports.h"
#include "intel_batchbuffer.h"
#include "intel_ioctl.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
static void intel_batchbuffer_reset( struct intel_batchbuffer *batch )
{
assert(batch->map == NULL);
- if (batch->buf != NULL) {
- dri_bo_unreference(batch->buf);
- batch->buf = NULL;
- }
-
- batch->buf = dri_bo_alloc(intel->intelScreen->bufmgr, "batchbuffer",
- intel->intelScreen->maxBatchSize, 4096,
- DRM_BO_FLAG_MEM_TT);
- dri_bo_map(batch->buf, GL_TRUE);
- batch->map = batch->buf->virtual;
-
batch->offset = (unsigned long)batch->ptr;
batch->offset = (batch->offset + 63) & ~63;
batch->ptr = (unsigned char *) batch->offset;
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
index a824966d2e2..25e0a65e99f 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
@@ -29,7 +29,7 @@
#define INTEL_BATCHBUFFER_H
#include "mtypes.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
struct intel_context;
@@ -43,7 +43,7 @@ struct intel_context;
struct intel_batchbuffer {
struct intel_context *intel;
- dri_bo *buffer;
+ struct buffer *buffer;
GLuint flags;
unsigned long offset;
diff --git a/src/mesa/drivers/dri/i965/intel_blit.c b/src/mesa/drivers/dri/i965/intel_blit.c
index 4502c551a73..f88cbb2328d 100644
--- a/src/mesa/drivers/dri/i965/intel_blit.c
+++ b/src/mesa/drivers/dri/i965/intel_blit.c
@@ -41,7 +41,7 @@
#include "intel_regions.h"
#include "intel_structs.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
@@ -66,7 +66,7 @@ void intelCopyBuffer( const __DRIdrawablePrivate *dPriv,
intelFlush( &intel->ctx );
- dri_fence_wait(intel, intel->last_swap_fence);
+ bmFinishFenceLock(intel, intel->last_swap_fence);
/* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
* should work regardless.
@@ -154,10 +154,8 @@ void intelCopyBuffer( const __DRIdrawablePrivate *dPriv,
}
intel_batchbuffer_flush( intel->batch );
-
- dri_fence_unreference(intel->second_last_swap_fence);
intel->second_last_swap_fence = intel->last_swap_fence;
- intel->last_swap_fence = dri_fence_reference(intel->bmbmSetFenceLock( intel );
+ intel->last_swap_fence = bmSetFenceLock( intel );
UNLOCK_HARDWARE( intel );
if (!rect)
@@ -180,7 +178,7 @@ void intelCopyBuffer( const __DRIdrawablePrivate *dPriv,
void intelEmitFillBlit( struct intel_context *intel,
GLuint cpp,
GLshort dst_pitch,
- dri_bo *dst_buffer,
+ struct buffer *dst_buffer,
GLuint dst_offset,
GLboolean dst_tiled,
GLshort x, GLshort y,
@@ -252,11 +250,11 @@ static GLuint translate_raster_op(GLenum logicop)
void intelEmitCopyBlit( struct intel_context *intel,
GLuint cpp,
GLshort src_pitch,
- dri_bo *src_buffer,
+ struct buffer *src_buffer,
GLuint src_offset,
GLboolean src_tiled,
GLshort dst_pitch,
- dri_bo *dst_buffer,
+ struct buffer *dst_buffer,
GLuint dst_offset,
GLboolean dst_tiled,
GLshort src_x, GLshort src_y,
@@ -530,7 +528,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
GLshort dst_pitch,
- dri_bo *dst_buffer,
+ struct buffer *dst_buffer,
GLuint dst_offset,
GLboolean dst_tiled,
GLshort x, GLshort y,
diff --git a/src/mesa/drivers/dri/i965/intel_blit.h b/src/mesa/drivers/dri/i965/intel_blit.h
index 45dd33db664..e361545c8fa 100644
--- a/src/mesa/drivers/dri/i965/intel_blit.h
+++ b/src/mesa/drivers/dri/i965/intel_blit.h
@@ -31,7 +31,7 @@
#include "intel_context.h"
#include "intel_ioctl.h"
-dri_bo;
+struct buffer;
extern void intelCopyBuffer( const __DRIdrawablePrivate *dpriv,
const drm_clip_rect_t *rect );
@@ -40,11 +40,11 @@ extern void intelClearWithBlit(GLcontext *ctx, GLbitfield mask);
extern void intelEmitCopyBlit( struct intel_context *intel,
GLuint cpp,
GLshort src_pitch,
- dri_bo *src_buffer,
+ struct buffer *src_buffer,
GLuint src_offset,
GLboolean src_tiled,
GLshort dst_pitch,
- dri_bo *dst_buffer,
+ struct buffer *dst_buffer,
GLuint dst_offset,
GLboolean dst_tiled,
GLshort srcx, GLshort srcy,
@@ -55,7 +55,7 @@ extern void intelEmitCopyBlit( struct intel_context *intel,
extern void intelEmitFillBlit( struct intel_context *intel,
GLuint cpp,
GLshort dst_pitch,
- dri_bo *dst_buffer,
+ struct buffer *dst_buffer,
GLuint dst_offset,
GLboolean dst_tiled,
GLshort x, GLshort y,
@@ -68,7 +68,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
GLshort dst_pitch,
- dri_bo *dst_buffer,
+ struct buffer *dst_buffer,
GLuint dst_offset,
GLboolean dst_tiled,
GLshort dst_x, GLshort dst_y,
diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.c b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
index 32c458987d7..3349284f5db 100644
--- a/src/mesa/drivers/dri/i965/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
@@ -32,7 +32,7 @@
#include "intel_context.h"
#include "intel_buffer_objects.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
/**
@@ -185,7 +185,7 @@ static GLboolean intel_bufferobj_unmap( GLcontext *ctx,
return GL_TRUE;
}
-dri_bo *intel_bufferobj_buffer( const struct intel_buffer_object *intel_obj )
+struct buffer *intel_bufferobj_buffer( const struct intel_buffer_object *intel_obj )
{
assert(intel_obj->Base.Name);
assert(intel_obj->buffer);
diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.h b/src/mesa/drivers/dri/i965/intel_buffer_objects.h
index a80f448716d..4b38803e576 100644
--- a/src/mesa/drivers/dri/i965/intel_buffer_objects.h
+++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.h
@@ -39,13 +39,13 @@ struct gl_buffer_object;
*/
struct intel_buffer_object {
struct gl_buffer_object Base;
- dri_bo *buffer; /* the low-level buffer manager's buffer handle */
+ struct buffer *buffer; /* the low-level buffer manager's buffer handle */
};
/* Get the bm buffer associated with a GL bufferobject:
*/
-dri_bo *intel_bufferobj_buffer( const struct intel_buffer_object *obj );
+struct buffer *intel_bufferobj_buffer( const struct intel_buffer_object *obj );
/* Hook the bufferobject implementation into mesa:
*/
diff --git a/src/mesa/drivers/dri/i965/intel_context.c b/src/mesa/drivers/dri/i965/intel_context.c
index 37c9fa3ec31..022819d5821 100644
--- a/src/mesa/drivers/dri/i965/intel_context.c
+++ b/src/mesa/drivers/dri/i965/intel_context.c
@@ -58,7 +58,7 @@
#include "intel_regions.h"
#include "intel_buffer_objects.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
#include "utils.h"
#include "vblank.h"
@@ -635,10 +635,10 @@ static void intelContendedLock( struct intel_context *intel, GLuint flags )
/* As above, but don't evict the texture data on transitions
* between contexts which all share a local buffer manager.
*/
- if (sarea->texAge != intel->hHWContext) {
+ if (sarea->texAge != my_bufmgr) {
DBG("Lost Textures: sarea->texAge %x my_bufmgr %x\n", sarea->ctxOwner, my_bufmgr);
- sarea->texAge = intel->hHWContext;
- dri_bufmgr_fake_contended_lock_take(intel->intelScreen->bufmgr);
+ sarea->texAge = my_bufmgr;
+ bm_fake_NotifyContendedLockTake( intel );
}
/* Drawable changed?
@@ -668,6 +668,11 @@ void LOCK_HARDWARE( struct intel_context *intel )
intel->locked = 1;
+ if (bmError(intel)) {
+ bmEvictAll(intel);
+ intel->vtbl.lost_hardware( intel );
+ }
+
/* Make sure nothing has been emitted prior to getting the lock:
*/
assert(intel->batch->map == 0);
@@ -675,8 +680,16 @@ void LOCK_HARDWARE( struct intel_context *intel )
/* XXX: postpone, may not be needed:
*/
if (!intel_batchbuffer_map(intel->batch)) {
- _mesa_printf("failure to map batchbuffer\n");
- assert(0);
+ bmEvictAll(intel);
+ intel->vtbl.lost_hardware( intel );
+
+ /* This could only fail if the batchbuffer was greater in size
+ * than the available texture memory:
+ */
+ if (!intel_batchbuffer_map(intel->batch)) {
+ _mesa_printf("double failure to map batchbuffer\n");
+ assert(0);
+ }
}
}
diff --git a/src/mesa/drivers/dri/i965/intel_ioctl.c b/src/mesa/drivers/dri/i965/intel_ioctl.c
index 88d59830272..e7e736079f4 100644
--- a/src/mesa/drivers/dri/i965/intel_ioctl.c
+++ b/src/mesa/drivers/dri/i965/intel_ioctl.c
@@ -41,7 +41,7 @@
#include "intel_blit.h"
#include "intel_regions.h"
#include "drm.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
static int intelWaitIdleLocked( struct intel_context *intel )
{
diff --git a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c
index 8db61267b55..0fb33e27f47 100644
--- a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c
+++ b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c
@@ -28,7 +28,7 @@
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
#include "enums.h"
#include "imports.h"
diff --git a/src/mesa/drivers/dri/i965/intel_regions.c b/src/mesa/drivers/dri/i965/intel_regions.c
index 9bf858cacb4..b78eba898ff 100644
--- a/src/mesa/drivers/dri/i965/intel_regions.c
+++ b/src/mesa/drivers/dri/i965/intel_regions.c
@@ -42,7 +42,7 @@
#include "intel_context.h"
#include "intel_regions.h"
#include "intel_blit.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
#include "imports.h"
/* XXX: Thread safety?
@@ -51,7 +51,7 @@ GLubyte *intel_region_map(struct intel_context *intel, struct intel_region *regi
{
DBG("%s\n", __FUNCTION__);
if (!region->map_refcount++) {
- region->map = dri_bo_map(region->buffer, GL_TRUE);
+ region->map = bmMapBuffer(intel, region->buffer, 0);
if (!region->map)
region->map_refcount--;
}
@@ -64,7 +64,7 @@ void intel_region_unmap(struct intel_context *intel,
{
DBG("%s\n", __FUNCTION__);
if (!--region->map_refcount) {
- dri_bo_unmap(region->buffer);
+ bmUnmapBuffer(intel, region->buffer);
region->map = NULL;
}
}
@@ -84,8 +84,8 @@ struct intel_region *intel_region_alloc( struct intel_context *intel,
region->height = height; /* needed? */
region->refcount = 1;
- region->buffer = dri_bo_alloc(intelScreen->bufmgr, "region",
- pitch * cpp * height, 64, DRM_BO_FLAG_MEM_TT);
+ bmGenBuffers(intel, "tex", 1, &region->buffer, 6);
+ bmBufferData(intel, region->buffer, pitch * cpp * height, NULL, 0);
return region;
}
@@ -108,7 +108,7 @@ void intel_region_release( struct intel_context *intel,
if (--(*region)->refcount == 0) {
assert((*region)->map_refcount == 0);
- dri_bo_unreference((*region)->buffer);
+ bmDeleteBuffers(intel, 1, &(*region)->buffer);
free(*region);
}
*region = NULL;
diff --git a/src/mesa/drivers/dri/i965/intel_regions.h b/src/mesa/drivers/dri/i965/intel_regions.h
index 8505d7eedc2..d2235f1275b 100644
--- a/src/mesa/drivers/dri/i965/intel_regions.h
+++ b/src/mesa/drivers/dri/i965/intel_regions.h
@@ -29,7 +29,7 @@
#define INTEL_REGIONS_H
#include "mtypes.h"
-#include "dri_bufmgr.h" /* for DBG! */
+#include "bufmgr.h" /* for DBG! */
struct intel_context;
/* A layer on top of the bufmgr buffers that adds a few useful things:
@@ -40,7 +40,7 @@ struct intel_context;
* - Blitter commands for copying 2D regions between buffers.
*/
struct intel_region {
- dri_bo *buffer;
+ struct buffer *buffer;
GLuint refcount;
GLuint cpp;
GLuint pitch;
diff --git a/src/mesa/drivers/dri/i965/intel_screen.c b/src/mesa/drivers/dri/i965/intel_screen.c
index 06bf9214f05..5dac50df32c 100644
--- a/src/mesa/drivers/dri/i965/intel_screen.c
+++ b/src/mesa/drivers/dri/i965/intel_screen.c
@@ -109,18 +109,12 @@ intelMapScreenRegions(__DRIscreenPrivate *sPriv)
return GL_FALSE;
}
- if (intelScreen->tex.size != 0) {
- intelScreen->ttm = GL_FALSE;
-
- if (drmMap(sPriv->fd,
- intelScreen->tex.handle,
- intelScreen->tex.size,
- (drmAddress *)&intelScreen->tex.map) != 0) {
- intelUnmapScreenRegions(intelScreen);
- return GL_FALSE;
- }
- } else {
- intelScreen->ttm = GL_TRUE;
+ if (drmMap(sPriv->fd,
+ intelScreen->tex.handle,
+ intelScreen->tex.size,
+ (drmAddress *)&intelScreen->tex.map) != 0) {
+ intelUnmapScreenRegions(intelScreen);
+ return GL_FALSE;
}
if (0)
@@ -169,32 +163,6 @@ intelUnmapScreenRegions(intelScreenPrivate *intelScreen)
}
}
-/** Driver-specific fence emit implementation for the fake memory manager. */
-static unsigned int
-intel_fence_emit(void *private)
-{
- intelScreenPrivate *intelScreen = (intelScreenPrivate *)private;
- unsigned int fence;
-
- /* XXX: Need to emit a flush, if we haven't already (at least with the
- * current batchbuffer implementation, we have).
- */
-
- fence = intelEmitIrqLocked(intelScreen);
-
- return fence;
-}
-
-/** Driver-specific fence wait implementation for the fake memory manager. */
-static int
-intel_fence_wait(void *private, unsigned int cookie)
-{
- intelScreenPrivate *intelScreen = (intelScreenPrivate *)private;
-
- intelWaitIrq(intelScreen, cookie);
-
- return 0;
-}
static void
intelPrintDRIInfo(intelScreenPrivate *intelScreen,
@@ -391,19 +359,7 @@ static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
(*glx_enable_extension)( psc, "GLX_SGI_make_current_read" );
(*glx_enable_extension)( psc, "GLX_MESA_copy_sub_buffer" );
}
-
- assert(!intelScreen->ttm);
- intelScreen->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
- intelScreen->tex.map,
- intelScreen->tex.size,
- intel_fence_emit,
- intel_fence_wait,
- intelScreen);
- if (intelScreen->bufmgr == FALSE) {
- fprintf(stderr, "Couldn't initialize buffer manager\n");
- return GL_FALSE;
- }
-
+
return GL_TRUE;
}
@@ -413,7 +369,6 @@ static void intelDestroyScreen(__DRIscreenPrivate *sPriv)
intelScreenPrivate *intelScreen = (intelScreenPrivate *)sPriv->private;
intelUnmapScreenRegions(intelScreen);
- dri_bufmgr_destroy(intelScreen->bufmgr);
FREE(intelScreen);
sPriv->private = NULL;
}
diff --git a/src/mesa/drivers/dri/i965/intel_screen.h b/src/mesa/drivers/dri/i965/intel_screen.h
index bb004822c43..bf9a716082d 100644
--- a/src/mesa/drivers/dri/i965/intel_screen.h
+++ b/src/mesa/drivers/dri/i965/intel_screen.h
@@ -80,13 +80,6 @@ typedef struct
* Configuration cache with default values for all contexts
*/
driOptionCache optionCache;
-
- /**
- * This value indicates that the kernel memory manager is being used
- * instead of the fake client-side memory manager.
- */
- GLboolean ttm;
- dri_bufmgr *bufmgr;
} intelScreenPrivate;
diff --git a/src/mesa/drivers/dri/i965/intel_tex_layout.c b/src/mesa/drivers/dri/i965/intel_tex_layout.c
index fcb5cc39068..fe61b441945 100644..120000
--- a/src/mesa/drivers/dri/i965/intel_tex_layout.c
+++ b/src/mesa/drivers/dri/i965/intel_tex_layout.c
@@ -1,102 +1 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
- /*
- * Authors:
- * Keith Whitwell <[email protected]>
- * Michel Dänzer <[email protected]>
- */
-
-#include "intel_mipmap_tree.h"
-#include "intel_tex_layout.h"
-#include "macros.h"
-
-
-static int align(int value, int alignment)
-{
- return (value + alignment - 1) & ~(alignment - 1);
-}
-
-void i945_miptree_layout_2d( struct intel_mipmap_tree *mt )
-{
- GLint align_h = 2, align_w = 4;
- GLuint level;
- GLuint x = 0;
- GLuint y = 0;
- GLuint width = mt->width0;
- GLuint height = mt->height0;
-
- mt->pitch = mt->width0;
-
- /* May need to adjust pitch to accomodate the placement of
- * the 2nd mipmap. This occurs when the alignment
- * constraints of mipmap placement push the right edge of the
- * 2nd mipmap out past the width of its parent.
- */
- if (mt->first_level != mt->last_level) {
- GLuint mip1_width = align(minify(mt->width0), align_w)
- + minify(minify(mt->width0));
-
- if (mip1_width > mt->width0)
- mt->pitch = mip1_width;
- }
-
- /* Pitch must be a whole number of dwords, even though we
- * express it in texels.
- */
- mt->pitch = align(mt->pitch * mt->cpp, 4) / mt->cpp;
- mt->total_height = 0;
-
- for ( level = mt->first_level ; level <= mt->last_level ; level++ ) {
- GLuint img_height;
-
- intel_miptree_set_level_info(mt, level, 1, x, y, width,
- height, 1);
-
- if (mt->compressed)
- img_height = MAX2(1, height/4);
- else
- img_height = align(height, align_h);
-
-
- /* Because the images are packed better, the final offset
- * might not be the maximal one:
- */
- mt->total_height = MAX2(mt->total_height, y + img_height);
-
- /* Layout_below: step right after second mipmap.
- */
- if (level == mt->first_level + 1) {
- x += align(width, align_w);
- }
- else {
- y += img_height;
- }
-
- width = minify(width);
- height = minify(height);
- }
-}
+../intel/intel_tex_layout.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i965/intel_tex_validate.c b/src/mesa/drivers/dri/i965/intel_tex_validate.c
index 0cfb32f76d1..8c05e7cdabc 100644
--- a/src/mesa/drivers/dri/i965/intel_tex_validate.c
+++ b/src/mesa/drivers/dri/i965/intel_tex_validate.c
@@ -31,7 +31,7 @@
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_tex.h"
-#include "dri_bufmgr.h"
+#include "bufmgr.h"
/**
* Compute which mipmap levels that really need to be sent to the hardware.