summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorEric Anholt <[email protected]>2008-06-03 14:43:48 -0700
committerEric Anholt <[email protected]>2008-06-03 14:43:48 -0700
commit4b5b008d54e86ac4f0a2176429d062100978ca8c (patch)
tree034024ec34df06863f7568273872b22bb895a45b /src
parentfccc427aac17b3fa17160332e6e6f3c2cef25ca5 (diff)
[intel] Convert drivers to using libdrm bufmgr code.
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/Makefile.template4
-rw-r--r--src/mesa/drivers/dri/common/dri_bufmgr.c171
-rw-r--r--src/mesa/drivers/dri/common/dri_bufmgr.h216
-rw-r--r--src/mesa/drivers/dri/i915/Makefile5
l---------src/mesa/drivers/dri/i915/intel_bufmgr_fake.c1
l---------src/mesa/drivers/dri/i915/intel_bufmgr_gem.c1
l---------src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c1
-rw-r--r--src/mesa/drivers/dri/i965/Makefile3
-rw-r--r--src/mesa/drivers/dri/i965/brw_cc.c12
-rw-r--r--src/mesa/drivers/dri/i965/brw_clip_state.c12
-rw-r--r--src/mesa/drivers/dri/i965/brw_curbe.c5
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw_upload.c5
-rw-r--r--src/mesa/drivers/dri/i965/brw_gs_state.c10
-rw-r--r--src/mesa/drivers/dri/i965/brw_sf_state.c20
-rw-r--r--src/mesa/drivers/dri/i965/brw_state_cache.c5
-rw-r--r--src/mesa/drivers/dri/i965/brw_vs_state.c10
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm_sampler_state.c12
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm_state.c32
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm_surface_state.c34
l---------src/mesa/drivers/dri/i965/intel_bufmgr_fake.c1
l---------src/mesa/drivers/dri/i965/intel_bufmgr_gem.c1
l---------src/mesa/drivers/dri/i965/intel_bufmgr_ttm.c1
-rw-r--r--src/mesa/drivers/dri/intel/intel_batchbuffer.c8
-rw-r--r--src/mesa/drivers/dri/intel/intel_buffer_objects.c3
-rw-r--r--src/mesa/drivers/dri/intel/intel_bufmgr_fake.c1177
-rw-r--r--src/mesa/drivers/dri/intel/intel_bufmgr_fake.h50
-rw-r--r--src/mesa/drivers/dri/intel/intel_bufmgr_gem.c847
-rw-r--r--src/mesa/drivers/dri/intel/intel_bufmgr_gem.h16
-rw-r--r--src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c1102
-rw-r--r--src/mesa/drivers/dri/intel/intel_bufmgr_ttm.h20
-rw-r--r--src/mesa/drivers/dri/intel/intel_context.c19
-rw-r--r--src/mesa/drivers/dri/intel/intel_context.h1
-rw-r--r--src/mesa/drivers/dri/intel/intel_ioctl.c2
-rw-r--r--src/mesa/drivers/dri/intel/intel_regions.c33
-rw-r--r--src/mesa/drivers/dri/intel/intel_screen.c2
35 files changed, 105 insertions, 3737 deletions
diff --git a/src/mesa/drivers/dri/Makefile.template b/src/mesa/drivers/dri/Makefile.template
index cb416627078..864c6234c85 100644
--- a/src/mesa/drivers/dri/Makefile.template
+++ b/src/mesa/drivers/dri/Makefile.template
@@ -11,10 +11,6 @@ COMMON_SOURCES = \
../common/xmlconfig.c \
../common/drirenderbuffer.c
-COMMON_BM_SOURCES = \
- ../common/dri_bufmgr.c
-
-
ifeq ($(WINDOW_SYSTEM),dri)
WINOBJ=
WINLIB=
diff --git a/src/mesa/drivers/dri/common/dri_bufmgr.c b/src/mesa/drivers/dri/common/dri_bufmgr.c
deleted file mode 100644
index be2a7b740c3..00000000000
--- a/src/mesa/drivers/dri/common/dri_bufmgr.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright © 2007 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <[email protected]>
- *
- */
-
-#include <string.h>
-#include <stdlib.h>
-#include <assert.h>
-#include "mtypes.h"
-#include "dri_bufmgr.h"
-
-/** @file dri_bufmgr.c
- *
- * Convenience functions for buffer management methods.
- */
-
-dri_bo *
-dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
- unsigned int alignment, uint64_t location_mask)
-{
- assert((location_mask & ~(DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_MEM_TT |
- DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_PRIV0 |
- DRM_BO_FLAG_MEM_PRIV1 | DRM_BO_FLAG_MEM_PRIV2 |
- DRM_BO_FLAG_MEM_PRIV3 | DRM_BO_FLAG_MEM_PRIV4 |
- DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED)) == 0);
- return bufmgr->bo_alloc(bufmgr, name, size, alignment, location_mask);
-}
-
-dri_bo *
-dri_bo_alloc_static(dri_bufmgr *bufmgr, const char *name, unsigned long offset,
- unsigned long size, void *virtual,
- uint64_t location_mask)
-{
- assert((location_mask & ~(DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_MEM_TT |
- DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_PRIV0 |
- DRM_BO_FLAG_MEM_PRIV1 | DRM_BO_FLAG_MEM_PRIV2 |
- DRM_BO_FLAG_MEM_PRIV3 |
- DRM_BO_FLAG_MEM_PRIV4)) == 0);
-
- return bufmgr->bo_alloc_static(bufmgr, name, offset, size, virtual,
- location_mask);
-}
-
-void
-dri_bo_reference(dri_bo *bo)
-{
- bo->bufmgr->bo_reference(bo);
-}
-
-void
-dri_bo_unreference(dri_bo *bo)
-{
- if (bo == NULL)
- return;
-
- bo->bufmgr->bo_unreference(bo);
-}
-
-int
-dri_bo_map(dri_bo *buf, GLboolean write_enable)
-{
- return buf->bufmgr->bo_map(buf, write_enable);
-}
-
-int
-dri_bo_unmap(dri_bo *buf)
-{
- return buf->bufmgr->bo_unmap(buf);
-}
-
-int
-dri_bo_subdata(dri_bo *bo, unsigned long offset,
- unsigned long size, const void *data)
-{
- int ret;
- if (bo->bufmgr->bo_subdata)
- return bo->bufmgr->bo_subdata(bo, offset, size, data);
- if (size == 0 || data == NULL)
- return 0;
-
- ret = dri_bo_map(bo, GL_TRUE);
- if (ret)
- return ret;
- memcpy((unsigned char *)bo->virtual + offset, data, size);
- dri_bo_unmap(bo);
- return 0;
-}
-
-int
-dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
- unsigned long size, void *data)
-{
- int ret;
- if (bo->bufmgr->bo_subdata)
- return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
-
- if (size == 0 || data == NULL)
- return 0;
-
- ret = dri_bo_map(bo, GL_FALSE);
- if (ret)
- return ret;
- memcpy(data, (unsigned char *)bo->virtual + offset, size);
- dri_bo_unmap(bo);
- return 0;
-}
-
-void
-dri_bo_wait_rendering(dri_bo *bo)
-{
- bo->bufmgr->bo_wait_rendering(bo);
-}
-
-void
-dri_bufmgr_destroy(dri_bufmgr *bufmgr)
-{
- bufmgr->destroy(bufmgr);
-}
-
-
-int dri_emit_reloc(dri_bo *reloc_buf,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target_buf)
-{
- return reloc_buf->bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
- delta, offset, target_buf);
-}
-
-void *dri_process_relocs(dri_bo *batch_buf)
-{
- return batch_buf->bufmgr->process_relocs(batch_buf);
-}
-
-void dri_post_submit(dri_bo *batch_buf)
-{
- batch_buf->bufmgr->post_submit(batch_buf);
-}
-
-void
-dri_bufmgr_set_debug(dri_bufmgr *bufmgr, GLboolean enable_debug)
-{
- bufmgr->debug = enable_debug;
-}
-
-int
-dri_bufmgr_check_aperture_space(dri_bo *bo)
-{
- return bo->bufmgr->check_aperture_space(bo);
-}
diff --git a/src/mesa/drivers/dri/common/dri_bufmgr.h b/src/mesa/drivers/dri/common/dri_bufmgr.h
deleted file mode 100644
index 1abca08cc8a..00000000000
--- a/src/mesa/drivers/dri/common/dri_bufmgr.h
+++ /dev/null
@@ -1,216 +0,0 @@
-/**************************************************************************
- *
- * Copyright � 2007 Intel Corporation
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
- * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- * Eric Anholt <[email protected]>
- */
-
-#ifndef _DRI_BUFMGR_H_
-#define _DRI_BUFMGR_H_
-#include <xf86drm.h>
-
-typedef struct _dri_bufmgr dri_bufmgr;
-typedef struct _dri_bo dri_bo;
-
-struct _dri_bo {
- /**
- * Size in bytes of the buffer object.
- *
- * The size may be larger than the size originally requested for the
- * allocation, such as being aligned to page size.
- */
- unsigned long size;
- /**
- * Card virtual address (offset from the beginning of the aperture) for the
- * object. Only valid while validated.
- */
- unsigned long offset;
- /**
- * Virtual address for accessing the buffer data. Only valid while mapped.
- */
- void *virtual;
- /** Buffer manager context associated with this buffer object */
- dri_bufmgr *bufmgr;
-};
-
-/**
- * Context for a buffer manager instance.
- *
- * Contains public methods followed by private storage for the buffer manager.
- */
-struct _dri_bufmgr {
- /**
- * Allocate a buffer object.
- *
- * Buffer objects are not necessarily initially mapped into CPU virtual
- * address space or graphics device aperture. They must be mapped using
- * bo_map() to be used by the CPU, and validated for use using bo_validate()
- * to be used from the graphics device.
- */
- dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
- unsigned long size, unsigned int alignment,
- uint64_t location_mask);
-
- /**
- * Allocates a buffer object for a static allocation.
- *
- * Static allocations are ones such as the front buffer that are offered by
- * the X Server, which are never evicted and never moved.
- */
- dri_bo *(*bo_alloc_static)(dri_bufmgr *bufmgr_ctx, const char *name,
- unsigned long offset, unsigned long size,
- void *virtual, uint64_t location_mask);
-
- /** Takes a reference on a buffer object */
- void (*bo_reference)(dri_bo *bo);
-
- /**
- * Releases a reference on a buffer object, freeing the data if
- * rerefences remain.
- */
- void (*bo_unreference)(dri_bo *bo);
-
- /**
- * Maps the buffer into userspace.
- *
- * This function will block waiting for any existing execution on the
- * buffer to complete, first. The resulting mapping is available at
- * buf->virtual.
- */
- int (*bo_map)(dri_bo *buf, GLboolean write_enable);
-
- /** Reduces the refcount on the userspace mapping of the buffer object. */
- int (*bo_unmap)(dri_bo *buf);
-
- /**
- * Write data into an object.
- *
- * This is an optional function, if missing,
- * dri_bo will map/memcpy/unmap.
- */
- int (*bo_subdata) (dri_bo *buf, unsigned long offset,
- unsigned long size, const void *data);
-
- /**
- * Read data from an object
- *
- * This is an optional function, if missing,
- * dri_bo will map/memcpy/unmap.
- */
- int (*bo_get_subdata) (dri_bo *bo, unsigned long offset,
- unsigned long size, void *data);
-
- /**
- * Waits for rendering to an object by the GPU to have completed.
- *
- * This is not required for any access to the BO by bo_map, bo_subdata, etc.
- * It is merely a way for the driver to implement glFinish.
- */
- void (*bo_wait_rendering) (dri_bo *bo);
-
- /**
- * Tears down the buffer manager instance.
- */
- void (*destroy)(dri_bufmgr *bufmgr);
-
- /**
- * Add relocation entry in reloc_buf, which will be updated with the
- * target buffer's real offset on on command submission.
- *
- * Relocations remain in place for the lifetime of the buffer object.
- *
- * \param reloc_buf Buffer to write the relocation into.
- * \param flags BO flags to be used in validating the target buffer.
- * Applicable flags include:
- * - DRM_BO_FLAG_READ: The buffer will be read in the process of
- * command execution.
- * - DRM_BO_FLAG_WRITE: The buffer will be written in the process of
- * command execution.
- * - DRM_BO_FLAG_MEM_TT: The buffer should be validated in TT memory.
- * - DRM_BO_FLAG_MEM_VRAM: The buffer should be validated in video
- * memory.
- * \param delta Constant value to be added to the relocation target's offset.
- * \param offset Byte offset within batch_buf of the relocated pointer.
- * \param target Buffer whose offset should be written into the relocation
- * entry.
- */
- int (*emit_reloc)(dri_bo *reloc_buf,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target);
-
- /**
- * Processes the relocations, either in userland or by converting the list
- * for use in batchbuffer submission.
- *
- * Kernel-based implementations will return a pointer to the arguments
- * to be handed with batchbuffer submission to the kernel. The userland
- * implementation performs the buffer validation and emits relocations
- * into them the appopriate order.
- *
- * \param batch_buf buffer at the root of the tree of relocations
- * \return argument to be completed and passed to the execbuffers ioctl
- * (if any).
- */
- void *(*process_relocs)(dri_bo *batch_buf);
-
- void (*post_submit)(dri_bo *batch_buf);
-
- int (*check_aperture_space)(dri_bo *bo);
- GLboolean debug; /**< Enables verbose debugging printouts */
-};
-
-dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
- unsigned int alignment, uint64_t location_mask);
-dri_bo *dri_bo_alloc_static(dri_bufmgr *bufmgr, const char *name,
- unsigned long offset, unsigned long size,
- void *virtual, uint64_t location_mask);
-void dri_bo_reference(dri_bo *bo);
-void dri_bo_unreference(dri_bo *bo);
-int dri_bo_map(dri_bo *buf, GLboolean write_enable);
-int dri_bo_unmap(dri_bo *buf);
-
-int dri_bo_subdata(dri_bo *bo, unsigned long offset,
- unsigned long size, const void *data);
-int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
- unsigned long size, void *data);
-void dri_bo_wait_rendering(dri_bo *bo);
-
-void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, GLboolean enable_debug);
-void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
-
-int dri_emit_reloc(dri_bo *reloc_buf,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target_buf);
-void *dri_process_relocs(dri_bo *batch_buf);
-void dri_post_process_relocs(dri_bo *batch_buf);
-void dri_post_submit(dri_bo *batch_buf);
-int dri_bufmgr_check_aperture_space(dri_bo *bo);
-
-#endif
diff --git a/src/mesa/drivers/dri/i915/Makefile b/src/mesa/drivers/dri/i915/Makefile
index 476814c4ec4..74f6169b2ea 100644
--- a/src/mesa/drivers/dri/i915/Makefile
+++ b/src/mesa/drivers/dri/i915/Makefile
@@ -53,13 +53,10 @@ DRIVER_SOURCES = \
intel_state.c \
intel_tris.c \
intel_fbo.c \
- intel_depthstencil.c \
- intel_bufmgr_fake.c \
- intel_bufmgr_gem.c
+ intel_depthstencil.c
C_SOURCES = \
$(COMMON_SOURCES) \
- $(COMMON_BM_SOURCES) \
$(DRIVER_SOURCES)
ASM_SOURCES =
diff --git a/src/mesa/drivers/dri/i915/intel_bufmgr_fake.c b/src/mesa/drivers/dri/i915/intel_bufmgr_fake.c
deleted file mode 120000
index 9b840a8123a..00000000000
--- a/src/mesa/drivers/dri/i915/intel_bufmgr_fake.c
+++ /dev/null
@@ -1 +0,0 @@
-../intel/intel_bufmgr_fake.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_bufmgr_gem.c b/src/mesa/drivers/dri/i915/intel_bufmgr_gem.c
deleted file mode 120000
index dee0daf9c04..00000000000
--- a/src/mesa/drivers/dri/i915/intel_bufmgr_gem.c
+++ /dev/null
@@ -1 +0,0 @@
-../intel/intel_bufmgr_gem.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c b/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c
deleted file mode 120000
index e9df5c62794..00000000000
--- a/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c
+++ /dev/null
@@ -1 +0,0 @@
-../intel/intel_bufmgr_ttm.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i965/Makefile b/src/mesa/drivers/dri/i965/Makefile
index 001f63ba12a..c15418df062 100644
--- a/src/mesa/drivers/dri/i965/Makefile
+++ b/src/mesa/drivers/dri/i965/Makefile
@@ -9,8 +9,6 @@ DRIVER_SOURCES = \
intel_blit.c \
intel_buffer_objects.c \
intel_buffers.c \
- intel_bufmgr_fake.c \
- intel_bufmgr_gem.c \
intel_context.c \
intel_decode.c \
intel_depthstencil.c \
@@ -85,7 +83,6 @@ DRIVER_SOURCES = \
C_SOURCES = \
$(COMMON_SOURCES) \
- $(COMMON_BM_SOURCES) \
$(MINIGLX_SOURCES) \
$(DRIVER_SOURCES)
diff --git a/src/mesa/drivers/dri/i965/brw_cc.c b/src/mesa/drivers/dri/i965/brw_cc.c
index b9338db0f56..afcfbcccb93 100644
--- a/src/mesa/drivers/dri/i965/brw_cc.c
+++ b/src/mesa/drivers/dri/i965/brw_cc.c
@@ -256,12 +256,12 @@ cc_unit_create_from_key(struct brw_context *brw, struct brw_cc_unit_key *key)
NULL, NULL);
/* Emit CC viewport relocation */
- dri_emit_reloc(bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION,
- 0,
- 0,
- offsetof(struct brw_cc_unit_state, cc4),
- brw->cc.vp_bo);
+ intel_bo_emit_reloc(bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION,
+ 0,
+ 0,
+ offsetof(struct brw_cc_unit_state, cc4),
+ brw->cc.vp_bo);
return bo;
}
diff --git a/src/mesa/drivers/dri/i965/brw_clip_state.c b/src/mesa/drivers/dri/i965/brw_clip_state.c
index 26c322672c2..fd5157bdb72 100644
--- a/src/mesa/drivers/dri/i965/brw_clip_state.c
+++ b/src/mesa/drivers/dri/i965/brw_clip_state.c
@@ -119,12 +119,12 @@ clip_unit_create_from_key(struct brw_context *brw,
/* Emit clip program relocation */
assert(brw->clip.prog_bo);
- dri_emit_reloc(bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION,
- 0,
- clip.thread0.grf_reg_count << 1,
- offsetof(struct brw_clip_unit_state, thread0),
- brw->clip.prog_bo);
+ intel_bo_emit_reloc(bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION,
+ 0,
+ clip.thread0.grf_reg_count << 1,
+ offsetof(struct brw_clip_unit_state, thread0),
+ brw->clip.prog_bo);
return bo;
}
diff --git a/src/mesa/drivers/dri/i965/brw_curbe.c b/src/mesa/drivers/dri/i965/brw_curbe.c
index 1b5e22f130f..bd0b04c36fc 100644
--- a/src/mesa/drivers/dri/i965/brw_curbe.c
+++ b/src/mesa/drivers/dri/i965/brw_curbe.c
@@ -306,10 +306,7 @@ static int prepare_constant_buffer(struct brw_context *brw)
* They're generally around 64b.
*/
brw->curbe.curbe_bo = dri_bo_alloc(brw->intel.bufmgr, "CURBE",
- 4096, 1 << 6,
- DRM_BO_FLAG_MEM_LOCAL |
- DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_CACHED_MAPPED);
+ 4096, 1 << 6);
brw->curbe.curbe_next_offset = 0;
}
diff --git a/src/mesa/drivers/dri/i965/brw_draw_upload.c b/src/mesa/drivers/dri/i965/brw_draw_upload.c
index 5222d2e450a..026c8ed8982 100644
--- a/src/mesa/drivers/dri/i965/brw_draw_upload.c
+++ b/src/mesa/drivers/dri/i965/brw_draw_upload.c
@@ -247,10 +247,7 @@ static void wrap_buffers( struct brw_context *brw,
if (brw->vb.upload.bo != NULL)
dri_bo_unreference(brw->vb.upload.bo);
brw->vb.upload.bo = dri_bo_alloc(brw->intel.bufmgr, "temporary VBO",
- size, 1,
- DRM_BO_FLAG_MEM_LOCAL |
- DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_CACHED_MAPPED);
+ size, 1);
/* Set the internal VBO\ to no-backing-store. We only use them as a
* temporary within a brw_try_draw_prims while the lock is held.
diff --git a/src/mesa/drivers/dri/i965/brw_gs_state.c b/src/mesa/drivers/dri/i965/brw_gs_state.c
index 2bf86f55738..953ccf777f5 100644
--- a/src/mesa/drivers/dri/i965/brw_gs_state.c
+++ b/src/mesa/drivers/dri/i965/brw_gs_state.c
@@ -106,11 +106,11 @@ gs_unit_create_from_key(struct brw_context *brw, struct brw_gs_unit_key *key)
if (key->prog_active) {
/* Emit GS program relocation */
- dri_emit_reloc(bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
- gs.thread0.grf_reg_count << 1,
- offsetof(struct brw_gs_unit_state, thread0),
- brw->gs.prog_bo);
+ intel_bo_emit_reloc(bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
+ gs.thread0.grf_reg_count << 1,
+ offsetof(struct brw_gs_unit_state, thread0),
+ brw->gs.prog_bo);
}
return bo;
diff --git a/src/mesa/drivers/dri/i965/brw_sf_state.c b/src/mesa/drivers/dri/i965/brw_sf_state.c
index 5cf32284862..e8f36718a3a 100644
--- a/src/mesa/drivers/dri/i965/brw_sf_state.c
+++ b/src/mesa/drivers/dri/i965/brw_sf_state.c
@@ -253,18 +253,18 @@ sf_unit_create_from_key(struct brw_context *brw, struct brw_sf_unit_key *key,
NULL, NULL);
/* Emit SF program relocation */
- dri_emit_reloc(bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
- sf.thread0.grf_reg_count << 1,
- offsetof(struct brw_sf_unit_state, thread0),
- brw->sf.prog_bo);
+ intel_bo_emit_reloc(bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
+ sf.thread0.grf_reg_count << 1,
+ offsetof(struct brw_sf_unit_state, thread0),
+ brw->sf.prog_bo);
/* Emit SF viewport relocation */
- dri_emit_reloc(bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
- sf.sf5.front_winding | (sf.sf5.viewport_transform << 1),
- offsetof(struct brw_sf_unit_state, sf5),
- brw->sf.vp_bo);
+ intel_bo_emit_reloc(bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
+ sf.sf5.front_winding | (sf.sf5.viewport_transform << 1),
+ offsetof(struct brw_sf_unit_state, sf5),
+ brw->sf.vp_bo);
return bo;
}
diff --git a/src/mesa/drivers/dri/i965/brw_state_cache.c b/src/mesa/drivers/dri/i965/brw_state_cache.c
index d617650fadd..fc0c3bd9ffd 100644
--- a/src/mesa/drivers/dri/i965/brw_state_cache.c
+++ b/src/mesa/drivers/dri/i965/brw_state_cache.c
@@ -214,10 +214,7 @@ brw_upload_cache( struct brw_cache *cache,
/* Create the buffer object to contain the data */
bo = dri_bo_alloc(cache->brw->intel.bufmgr,
- cache->name[cache_id], data_size, 1 << 6,
- DRM_BO_FLAG_MEM_LOCAL |
- DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_CACHED_MAPPED);
+ cache->name[cache_id], data_size, 1 << 6);
/* Set up the memory containing the key, aux_data, and reloc_bufs */
diff --git a/src/mesa/drivers/dri/i965/brw_vs_state.c b/src/mesa/drivers/dri/i965/brw_vs_state.c
index 73f52d74284..a6b3db69ea4 100644
--- a/src/mesa/drivers/dri/i965/brw_vs_state.c
+++ b/src/mesa/drivers/dri/i965/brw_vs_state.c
@@ -115,11 +115,11 @@ vs_unit_create_from_key(struct brw_context *brw, struct brw_vs_unit_key *key)
NULL, NULL);
/* Emit VS program relocation */
- dri_emit_reloc(bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
- vs.thread0.grf_reg_count << 1,
- offsetof(struct brw_vs_unit_state, thread0),
- brw->vs.prog_bo);
+ intel_bo_emit_reloc(bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
+ vs.thread0.grf_reg_count << 1,
+ offsetof(struct brw_vs_unit_state, thread0),
+ brw->vs.prog_bo);
return bo;
}
diff --git a/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c b/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c
index 13f7f218006..2e0aff7ab28 100644
--- a/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c
+++ b/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c
@@ -305,12 +305,12 @@ static int upload_wm_samplers( struct brw_context *brw )
continue;
ret |= dri_bufmgr_check_aperture_space(brw->wm.sdc_bo[i]);
- dri_emit_reloc(brw->wm.sampler_bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
- 0,
- i * sizeof(struct brw_sampler_state) +
- offsetof(struct brw_sampler_state, ss2),
- brw->wm.sdc_bo[i]);
+ intel_bo_emit_reloc(brw->wm.sampler_bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
+ 0,
+ i * sizeof(struct brw_sampler_state) +
+ offsetof(struct brw_sampler_state, ss2),
+ brw->wm.sdc_bo[i]);
}
}
diff --git a/src/mesa/drivers/dri/i965/brw_wm_state.c b/src/mesa/drivers/dri/i965/brw_wm_state.c
index f79b58ba7ae..ef78d71bbb7 100644
--- a/src/mesa/drivers/dri/i965/brw_wm_state.c
+++ b/src/mesa/drivers/dri/i965/brw_wm_state.c
@@ -199,28 +199,28 @@ wm_unit_create_from_key(struct brw_context *brw, struct brw_wm_unit_key *key,
NULL, NULL);
/* Emit WM program relocation */
- dri_emit_reloc(bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
- wm.thread0.grf_reg_count << 1,
- offsetof(struct brw_wm_unit_state, thread0),
- brw->wm.prog_bo);
+ intel_bo_emit_reloc(bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
+ wm.thread0.grf_reg_count << 1,
+ offsetof(struct brw_wm_unit_state, thread0),
+ brw->wm.prog_bo);
/* Emit scratch space relocation */
if (key->total_scratch != 0) {
- dri_emit_reloc(bo,
- 0, 0,
- wm.thread2.per_thread_scratch_space,
- offsetof(struct brw_wm_unit_state, thread2),
- brw->wm.scratch_buffer);
+ intel_bo_emit_reloc(bo,
+ 0, 0,
+ wm.thread2.per_thread_scratch_space,
+ offsetof(struct brw_wm_unit_state, thread2),
+ brw->wm.scratch_buffer);
}
/* Emit sampler state relocation */
if (key->sampler_count != 0) {
- dri_emit_reloc(bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
- wm.wm4.stats_enable | (wm.wm4.sampler_count << 2),
- offsetof(struct brw_wm_unit_state, wm4),
- brw->wm.sampler_bo);
+ intel_bo_emit_reloc(bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
+ wm.wm4.stats_enable | (wm.wm4.sampler_count << 2),
+ offsetof(struct brw_wm_unit_state, wm4),
+ brw->wm.sampler_bo);
}
return bo;
@@ -251,7 +251,7 @@ static int upload_wm_unit( struct brw_context *brw )
brw->wm.scratch_buffer = dri_bo_alloc(intel->bufmgr,
"wm scratch",
total,
- 4096, DRM_BO_FLAG_MEM_TT);
+ 4096);
}
}
diff --git a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
index 73f4b2b4a38..6fc6d9dfd82 100644
--- a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
+++ b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
@@ -203,11 +203,11 @@ brw_create_texture_surface( struct brw_context *brw,
NULL, NULL);
/* Emit relocation to surface contents */
- dri_emit_reloc(bo,
- DRM_GEM_DOMAIN_I915_SAMPLER, 0,
- 0,
- offsetof(struct brw_surface_state, ss1),
- key->bo);
+ intel_bo_emit_reloc(bo,
+ DRM_GEM_DOMAIN_I915_SAMPLER, 0,
+ 0,
+ offsetof(struct brw_surface_state, ss1),
+ key->bo);
return bo;
}
@@ -341,13 +341,13 @@ brw_update_region_surface(struct brw_context *brw, struct intel_region *region,
* them both. We might be able to figure out from other state
* a more restrictive relocation to emit.
*/
- dri_emit_reloc(brw->wm.surf_bo[unit],
- DRM_GEM_DOMAIN_I915_RENDER |
- DRM_GEM_DOMAIN_I915_SAMPLER,
- DRM_GEM_DOMAIN_I915_RENDER,
- 0,
- offsetof(struct brw_surface_state, ss1),
- region_bo);
+ intel_bo_emit_reloc(brw->wm.surf_bo[unit],
+ DRM_GEM_DOMAIN_I915_RENDER |
+ DRM_GEM_DOMAIN_I915_SAMPLER,
+ DRM_GEM_DOMAIN_I915_RENDER,
+ 0,
+ offsetof(struct brw_surface_state, ss1),
+ region_bo);
}
}
@@ -391,11 +391,11 @@ brw_wm_get_binding_table(struct brw_context *brw)
/* Emit binding table relocations to surface state */
for (i = 0; i < BRW_WM_MAX_SURF; i++) {
if (brw->wm.surf_bo[i] != NULL) {
- dri_emit_reloc(bind_bo,
- DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
- 0,
- i * sizeof(GLuint),
- brw->wm.surf_bo[i]);
+ intel_bo_emit_reloc(bind_bo,
+ DRM_GEM_DOMAIN_I915_INSTRUCTION, 0,
+ 0,
+ i * sizeof(GLuint),
+ brw->wm.surf_bo[i]);
}
}
diff --git a/src/mesa/drivers/dri/i965/intel_bufmgr_fake.c b/src/mesa/drivers/dri/i965/intel_bufmgr_fake.c
deleted file mode 120000
index 9b840a8123a..00000000000
--- a/src/mesa/drivers/dri/i965/intel_bufmgr_fake.c
+++ /dev/null
@@ -1 +0,0 @@
-../intel/intel_bufmgr_fake.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i965/intel_bufmgr_gem.c b/src/mesa/drivers/dri/i965/intel_bufmgr_gem.c
deleted file mode 120000
index dee0daf9c04..00000000000
--- a/src/mesa/drivers/dri/i965/intel_bufmgr_gem.c
+++ /dev/null
@@ -1 +0,0 @@
-../intel/intel_bufmgr_gem.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i965/intel_bufmgr_ttm.c b/src/mesa/drivers/dri/i965/intel_bufmgr_ttm.c
deleted file mode 120000
index e9df5c62794..00000000000
--- a/src/mesa/drivers/dri/i965/intel_bufmgr_ttm.c
+++ /dev/null
@@ -1 +0,0 @@
-../intel/intel_bufmgr_ttm.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.c b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
index 803ff5e90ee..019880581a2 100644
--- a/src/mesa/drivers/dri/intel/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
@@ -29,6 +29,7 @@
#include "intel_ioctl.h"
#include "intel_decode.h"
#include "intel_reg.h"
+#include "intel_bufmgr.h"
/* Relocations in kernel space:
* - pass dma buffer seperately
@@ -82,8 +83,7 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
batch->buffer = malloc (intel->maxBatchSize);
batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer",
- intel->maxBatchSize, 4096,
- DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED);
+ intel->maxBatchSize, 4096);
if (batch->buffer)
batch->map = batch->buffer;
else {
@@ -290,8 +290,8 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
if (batch->ptr - batch->map > batch->buf->size)
_mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n",
batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
- ret = dri_emit_reloc(batch->buf, read_domains, write_domain,
- delta, batch->ptr - batch->map, buffer);
+ ret = intel_bo_emit_reloc(batch->buf, read_domains, write_domain,
+ delta, batch->ptr - batch->map, buffer);
/*
* Using the old buffer offset, write in what the right data would be, in case
diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.c b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
index 951b8cbfb76..4227f0c9734 100644
--- a/src/mesa/drivers/dri/intel/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
@@ -45,8 +45,7 @@ intel_bufferobj_alloc_buffer(struct intel_context *intel,
struct intel_buffer_object *intel_obj)
{
intel_obj->buffer = dri_bo_alloc(intel->bufmgr, "bufferobj",
- intel_obj->Base.Size, 64,
- DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED);
+ intel_obj->Base.Size, 64);
}
/**
diff --git a/src/mesa/drivers/dri/intel/intel_bufmgr_fake.c b/src/mesa/drivers/dri/intel/intel_bufmgr_fake.c
deleted file mode 100644
index 2aed3d85be8..00000000000
--- a/src/mesa/drivers/dri/intel/intel_bufmgr_fake.c
+++ /dev/null
@@ -1,1177 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/* Originally a fake version of the buffer manager so that we can
- * prototype the changes in a driver fairly quickly, has been fleshed
- * out to a fully functional interim solution.
- *
- * Basically wraps the old style memory management in the new
- * programming interface, but is more expressive and avoids many of
- * the bugs in the old texture manager.
- */
-#include "mtypes.h"
-#include "dri_bufmgr.h"
-#include "intel_bufmgr_fake.h"
-#include "drm.h"
-#include "i915_drm.h"
-
-#include "simple_list.h"
-#include "mm.h"
-#include "imports.h"
-
-#define DBG(...) do { \
- if (bufmgr_fake->bufmgr.debug) \
- _mesa_printf(__VA_ARGS__); \
-} while (0)
-
-/* Internal flags:
- */
-#define BM_NO_BACKING_STORE 0x00000001
-#define BM_NO_FENCE_SUBDATA 0x00000002
-#define BM_PINNED 0x00000004
-
-/* Wrapper around mm.c's mem_block, which understands that you must
- * wait for fences to expire before memory can be freed. This is
- * specific to our use of memcpy for uploads - an upload that was
- * processed through the command queue wouldn't need to care about
- * fences.
- */
-#define MAX_RELOCS 4096
-
-struct fake_buffer_reloc
-{
- /** Buffer object that the relocation points at. */
- dri_bo *target_buf;
- /** Offset of the relocation entry within reloc_buf. */
- GLuint offset;
- /** Cached value of the offset when we last performed this relocation. */
- GLuint last_target_offset;
- /** Value added to target_buf's offset to get the relocation entry. */
- GLuint delta;
- /** Cache domains the target buffer is read into. */
- uint32_t read_domains;
- /** Cache domain the target buffer will have dirty cachelines in. */
- uint32_t write_domain;
-};
-
-struct block {
- struct block *next, *prev;
- struct mem_block *mem; /* BM_MEM_AGP */
-
- /**
- * Marks that the block is currently in the aperture and has yet to be
- * fenced.
- */
- unsigned on_hardware:1;
- /**
- * Marks that the block is currently fenced (being used by rendering) and
- * can't be freed until @fence is passed.
- */
- unsigned fenced:1;
-
- /** Fence cookie for the block. */
- unsigned fence; /* Split to read_fence, write_fence */
-
- dri_bo *bo;
- void *virtual;
-};
-
-typedef struct _bufmgr_fake {
- dri_bufmgr bufmgr;
-
- unsigned long low_offset;
- unsigned long size;
- void *virtual;
-
- struct mem_block *heap;
- struct block lru; /* only allocated, non-fence-pending blocks here */
-
- unsigned buf_nr; /* for generating ids */
-
- struct block on_hardware; /* after bmValidateBuffers */
- struct block fenced; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
- /* then to bufmgr->lru or free() */
-
- unsigned int last_fence;
-
- unsigned fail:1;
- unsigned need_fence:1;
- GLboolean thrashing;
-
- /**
- * Driver callback to emit a fence, returning the cookie.
- *
- * Currently, this also requires that a write flush be emitted before
- * emitting the fence, but this should change.
- */
- unsigned int (*fence_emit)(void *private);
- /** Driver callback to wait for a fence cookie to have passed. */
- int (*fence_wait)(void *private, unsigned int fence_cookie);
- /** Driver-supplied argument to driver callbacks */
- void *driver_priv;
-
- GLboolean debug;
-
- GLboolean performed_rendering;
-
- /* keep track of the current total size of objects we have relocs for */
- unsigned long current_total_size;
-} dri_bufmgr_fake;
-
-typedef struct _dri_bo_fake {
- dri_bo bo;
-
- unsigned id; /* debug only */
- const char *name;
-
- unsigned dirty:1;
- unsigned size_accounted:1; /*this buffers size has been accounted against the aperture */
- unsigned card_dirty:1; /* has the card written to this buffer - we make need to copy it back */
- unsigned int refcount;
- /* Flags may consist of any of the DRM_BO flags, plus
- * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
- * driver private flags.
- */
- uint64_t flags;
- /** Cache domains the target buffer is read into. */
- uint32_t read_domains;
- /** Cache domain the target buffer will have dirty cachelines in. */
- uint32_t write_domain;
-
- unsigned int alignment;
- GLboolean is_static, validated;
- unsigned int map_count;
-
- /** relocation list */
- struct fake_buffer_reloc *relocs;
- GLuint nr_relocs;
-
- struct block *block;
- void *backing_store;
- void (*invalidate_cb)(dri_bo *bo, void *ptr);
- void *invalidate_ptr;
-} dri_bo_fake;
-
-static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
- unsigned int fence_cookie);
-
-static int dri_fake_check_aperture_space(dri_bo *bo);
-
-#define MAXFENCE 0x7fffffff
-
-static GLboolean FENCE_LTE( unsigned a, unsigned b )
-{
- if (a == b)
- return GL_TRUE;
-
- if (a < b && b - a < (1<<24))
- return GL_TRUE;
-
- if (a > b && MAXFENCE - a + b < (1<<24))
- return GL_TRUE;
-
- return GL_FALSE;
-}
-
-static unsigned int
-_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
-{
- bufmgr_fake->last_fence = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
- return bufmgr_fake->last_fence;
-}
-
-static void
-_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie)
-{
- int ret;
-
- ret = bufmgr_fake->fence_wait(bufmgr_fake->driver_priv, cookie);
- if (ret != 0) {
- _mesa_printf("%s:%d: Error %d waiting for fence.\n",
- __FILE__, __LINE__);
- abort();
- }
- clear_fenced(bufmgr_fake, cookie);
-}
-
-static GLboolean
-_fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
-{
- /* Slight problem with wrap-around:
- */
- return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
-}
-
-/**
- * Allocate a memory manager block for the buffer.
- */
-static GLboolean
-alloc_block(dri_bo *bo)
-{
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr;
- struct block *block = (struct block *)calloc(sizeof *block, 1);
- unsigned int align_log2 = _mesa_ffs(bo_fake->alignment) - 1;
- GLuint sz;
-
- if (!block)
- return GL_FALSE;
-
- sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
-
- block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
- if (!block->mem) {
- free(block);
- return GL_FALSE;
- }
-
- make_empty_list(block);
-
- /* Insert at head or at tail???
- */
- insert_at_tail(&bufmgr_fake->lru, block);
-
- block->virtual = bufmgr_fake->virtual +
- block->mem->ofs - bufmgr_fake->low_offset;
- block->bo = bo;
-
- bo_fake->block = block;
-
- return GL_TRUE;
-}
-
-/* Release the card storage associated with buf:
- */
-static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
-{
- dri_bo_fake *bo_fake;
- DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
-
- if (!block)
- return;
-
- bo_fake = (dri_bo_fake *)block->bo;
- if (!(bo_fake->flags & BM_NO_BACKING_STORE) && (bo_fake->card_dirty == 1)) {
- memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
- bo_fake->card_dirty = 1;
- bo_fake->dirty = 1;
- }
-
- if (block->on_hardware) {
- block->bo = NULL;
- }
- else if (block->fenced) {
- block->bo = NULL;
- }
- else {
- DBG(" - free immediately\n");
- remove_from_list(block);
-
- mmFreeMem(block->mem);
- free(block);
- }
-}
-
-static void
-alloc_backing_store(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- assert(!bo_fake->backing_store);
- assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
-
- bo_fake->backing_store = ALIGN_MALLOC(bo->size, 64);
-
- DBG("alloc_backing - buf %d %p %d\n", bo_fake->id, bo_fake->backing_store, bo->size);
- assert(bo_fake->backing_store);
-}
-
-static void
-free_backing_store(dri_bo *bo)
-{
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- if (bo_fake->backing_store) {
- assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
- ALIGN_FREE(bo_fake->backing_store);
- bo_fake->backing_store = NULL;
- }
-}
-
-static void
-set_dirty(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
- bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
-
- assert(!(bo_fake->flags & BM_PINNED));
-
- DBG("set_dirty - buf %d\n", bo_fake->id);
- bo_fake->dirty = 1;
-}
-
-static GLboolean
-evict_lru(dri_bufmgr_fake *bufmgr_fake, GLuint max_fence)
-{
- struct block *block, *tmp;
-
- DBG("%s\n", __FUNCTION__);
-
- foreach_s(block, tmp, &bufmgr_fake->lru) {
- dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
-
- if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
- continue;
-
- if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence))
- return 0;
-
- set_dirty(&bo_fake->bo);
- bo_fake->block = NULL;
-
- free_block(bufmgr_fake, block);
- return GL_TRUE;
- }
-
- return GL_FALSE;
-}
-
-#define foreach_s_rev(ptr, t, list) \
- for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
-
-static GLboolean
-evict_mru(dri_bufmgr_fake *bufmgr_fake)
-{
- struct block *block, *tmp;
-
- DBG("%s\n", __FUNCTION__);
-
- foreach_s_rev(block, tmp, &bufmgr_fake->lru) {
- dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
-
- if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
- continue;
-
- set_dirty(&bo_fake->bo);
- bo_fake->block = NULL;
-
- free_block(bufmgr_fake, block);
- return GL_TRUE;
- }
-
- return GL_FALSE;
-}
-
-/**
- * Removes all objects from the fenced list older than the given fence.
- */
-static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
- unsigned int fence_cookie)
-{
- struct block *block, *tmp;
- int ret = 0;
-
- foreach_s(block, tmp, &bufmgr_fake->fenced) {
- assert(block->fenced);
-
- if (_fence_test(bufmgr_fake, block->fence)) {
-
- block->fenced = 0;
-
- if (!block->bo) {
- DBG("delayed free: offset %x sz %x\n",
- block->mem->ofs, block->mem->size);
- remove_from_list(block);
- mmFreeMem(block->mem);
- free(block);
- }
- else {
- DBG("return to lru: offset %x sz %x\n",
- block->mem->ofs, block->mem->size);
- move_to_tail(&bufmgr_fake->lru, block);
- }
-
- ret = 1;
- }
- else {
- /* Blocks are ordered by fence, so if one fails, all from
- * here will fail also:
- */
- DBG("fence not passed: offset %x sz %x %d %d \n",
- block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence);
- break;
- }
- }
-
- DBG("%s: %d\n", __FUNCTION__, ret);
- return ret;
-}
-
-static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
-{
- struct block *block, *tmp;
-
- foreach_s (block, tmp, &bufmgr_fake->on_hardware) {
- DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block,
- block->mem->size, block->mem->ofs, block->bo, fence);
- block->fence = fence;
-
- block->on_hardware = 0;
- block->fenced = 1;
-
- /* Move to tail of pending list here
- */
- move_to_tail(&bufmgr_fake->fenced, block);
- }
-
- assert(is_empty_list(&bufmgr_fake->on_hardware));
-}
-
-static GLboolean evict_and_alloc_block(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- assert(bo_fake->block == NULL);
-
- /* Search for already free memory:
- */
- if (alloc_block(bo))
- return GL_TRUE;
-
- /* If we're not thrashing, allow lru eviction to dig deeper into
- * recently used textures. We'll probably be thrashing soon:
- */
- if (!bufmgr_fake->thrashing) {
- while (evict_lru(bufmgr_fake, 0))
- if (alloc_block(bo))
- return GL_TRUE;
- }
-
- /* Keep thrashing counter alive?
- */
- if (bufmgr_fake->thrashing)
- bufmgr_fake->thrashing = 20;
-
- /* Wait on any already pending fences - here we are waiting for any
- * freed memory that has been submitted to hardware and fenced to
- * become available:
- */
- while (!is_empty_list(&bufmgr_fake->fenced)) {
- GLuint fence = bufmgr_fake->fenced.next->fence;
- _fence_wait_internal(bufmgr_fake, fence);
-
- if (alloc_block(bo))
- return GL_TRUE;
- }
-
- if (!is_empty_list(&bufmgr_fake->on_hardware)) {
- while (!is_empty_list(&bufmgr_fake->fenced)) {
- GLuint fence = bufmgr_fake->fenced.next->fence;
- _fence_wait_internal(bufmgr_fake, fence);
- }
-
- if (!bufmgr_fake->thrashing) {
- DBG("thrashing\n");
- }
- bufmgr_fake->thrashing = 20;
-
- if (alloc_block(bo))
- return GL_TRUE;
- }
-
- while (evict_mru(bufmgr_fake))
- if (alloc_block(bo))
- return GL_TRUE;
-
- DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
-
- return GL_FALSE;
-}
-
-/***********************************************************************
- * Public functions
- */
-
-/**
- * Wait for hardware idle by emitting a fence and waiting for it.
- */
-static void
-dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
-{
- unsigned int cookie;
-
- cookie = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
- _fence_wait_internal(bufmgr_fake, cookie);
-}
-
-/**
- * Wait for rendering to a buffer to complete.
- *
- * It is assumed that the bathcbuffer which performed the rendering included
- * the necessary flushing.
- */
-static void
-dri_fake_bo_wait_rendering(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- if (bo_fake->block == NULL || !bo_fake->block->fenced)
- return;
-
- _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
-}
-
-/* Specifically ignore texture memory sharing.
- * -- just evict everything
- * -- and wait for idle
- */
-void
-dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
- struct block *block, *tmp;
-
- bufmgr_fake->need_fence = 1;
- bufmgr_fake->fail = 0;
-
- /* Wait for hardware idle. We don't know where acceleration has been
- * happening, so we'll need to wait anyway before letting anything get
- * put on the card again.
- */
- dri_bufmgr_fake_wait_idle(bufmgr_fake);
-
- /* Check that we hadn't released the lock without having fenced the last
- * set of buffers.
- */
- assert(is_empty_list(&bufmgr_fake->fenced));
- assert(is_empty_list(&bufmgr_fake->on_hardware));
-
- foreach_s(block, tmp, &bufmgr_fake->lru) {
- assert(_fence_test(bufmgr_fake, block->fence));
- set_dirty(block->bo);
- }
-}
-
-static dri_bo *
-dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
- unsigned long size, unsigned int alignment,
- uint64_t location_mask)
-{
- dri_bufmgr_fake *bufmgr_fake;
- dri_bo_fake *bo_fake;
-
- bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
-
- assert(size != 0);
-
- bo_fake = calloc(1, sizeof(*bo_fake));
- if (!bo_fake)
- return NULL;
-
- bo_fake->bo.size = size;
- bo_fake->bo.offset = -1;
- bo_fake->bo.virtual = NULL;
- bo_fake->bo.bufmgr = bufmgr;
- bo_fake->refcount = 1;
-
- /* Alignment must be a power of two */
- assert((alignment & (alignment - 1)) == 0);
- if (alignment == 0)
- alignment = 1;
- bo_fake->alignment = alignment;
- bo_fake->id = ++bufmgr_fake->buf_nr;
- bo_fake->name = name;
- bo_fake->flags = 0;
- bo_fake->is_static = GL_FALSE;
-
- DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
- bo_fake->bo.size / 1024);
-
- return &bo_fake->bo;
-}
-
-static dri_bo *
-dri_fake_bo_alloc_static(dri_bufmgr *bufmgr, const char *name,
- unsigned long offset, unsigned long size,
- void *virtual, uint64_t location_mask)
-{
- dri_bufmgr_fake *bufmgr_fake;
- dri_bo_fake *bo_fake;
-
- bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
-
- assert(size != 0);
-
- bo_fake = calloc(1, sizeof(*bo_fake));
- if (!bo_fake)
- return NULL;
-
- bo_fake->bo.size = size;
- bo_fake->bo.offset = offset;
- bo_fake->bo.virtual = virtual;
- bo_fake->bo.bufmgr = bufmgr;
- bo_fake->refcount = 1;
- bo_fake->id = ++bufmgr_fake->buf_nr;
- bo_fake->name = name;
- bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE;
- bo_fake->is_static = GL_TRUE;
-
- DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
- bo_fake->bo.size / 1024);
-
- return &bo_fake->bo;
-}
-
-static void
-dri_fake_bo_reference(dri_bo *bo)
-{
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- bo_fake->refcount++;
-}
-
-static void
-dri_fake_bo_unreference(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- int i;
-
- if (!bo)
- return;
-
- if (--bo_fake->refcount == 0) {
- assert(bo_fake->map_count == 0);
- /* No remaining references, so free it */
- if (bo_fake->block)
- free_block(bufmgr_fake, bo_fake->block);
- free_backing_store(bo);
-
- for (i = 0; i < bo_fake->nr_relocs; i++)
- dri_bo_unreference(bo_fake->relocs[i].target_buf);
-
- DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);
-
- free(bo_fake->relocs);
- free(bo);
-
- return;
- }
-}
-
-/**
- * Set the buffer as not requiring backing store, and instead get the callback
- * invoked whenever it would be set dirty.
- */
-void dri_bo_fake_disable_backing_store(dri_bo *bo,
- void (*invalidate_cb)(dri_bo *bo,
- void *ptr),
- void *ptr)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- if (bo_fake->backing_store)
- free_backing_store(bo);
-
- bo_fake->flags |= BM_NO_BACKING_STORE;
-
- DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
- bo_fake->dirty = 1;
- bo_fake->invalidate_cb = invalidate_cb;
- bo_fake->invalidate_ptr = ptr;
-
- /* Note that it is invalid right from the start. Also note
- * invalidate_cb is called with the bufmgr locked, so cannot
- * itself make bufmgr calls.
- */
- if (invalidate_cb != NULL)
- invalidate_cb(bo, ptr);
-}
-
-/**
- * Map a buffer into bo->virtual, allocating either card memory space (If
- * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
- */
-static int
-dri_fake_bo_map(dri_bo *bo, GLboolean write_enable)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- /* Static buffers are always mapped. */
- if (bo_fake->is_static)
- return 0;
-
- /* Allow recursive mapping. Mesa may recursively map buffers with
- * nested display loops, and it is used internally in bufmgr_fake
- * for relocation.
- */
- if (bo_fake->map_count++ != 0)
- return 0;
-
- {
- DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
- bo_fake->bo.size / 1024);
-
- if (bo->virtual != NULL) {
- _mesa_printf("%s: already mapped\n", __FUNCTION__);
- abort();
- }
- else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) {
-
- if (!bo_fake->block && !evict_and_alloc_block(bo)) {
- DBG("%s: alloc failed\n", __FUNCTION__);
- bufmgr_fake->fail = 1;
- return 1;
- }
- else {
- assert(bo_fake->block);
- bo_fake->dirty = 0;
-
- if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
- bo_fake->block->fenced) {
- dri_fake_bo_wait_rendering(bo);
- }
-
- bo->virtual = bo_fake->block->virtual;
- }
- }
- else {
- if (write_enable)
- set_dirty(bo);
-
- if (bo_fake->backing_store == 0)
- alloc_backing_store(bo);
-
- bo->virtual = bo_fake->backing_store;
- }
- }
-
- return 0;
-}
-
-static int
-dri_fake_bo_unmap(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- /* Static buffers are always mapped. */
- if (bo_fake->is_static)
- return 0;
-
- assert(bo_fake->map_count != 0);
- if (--bo_fake->map_count != 0)
- return 0;
-
- DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
- bo_fake->bo.size / 1024);
-
- bo->virtual = NULL;
-
- return 0;
-}
-
-static void
-dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
-{
- struct block *block, *tmp;
-
- bufmgr_fake->performed_rendering = GL_FALSE;
- /* okay for ever BO that is on the HW kick it off.
- seriously not afraid of the POLICE right now */
- foreach_s(block, tmp, &bufmgr_fake->on_hardware) {
- dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
-
- block->on_hardware = 0;
- free_block(bufmgr_fake, block);
- bo_fake->block = NULL;
- bo_fake->validated = GL_FALSE;
- if (!(bo_fake->flags & BM_NO_BACKING_STORE))
- bo_fake->dirty = 1;
- }
-}
-
-static int
-dri_fake_bo_validate(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- /* XXX: Sanity-check whether we've already validated this one under
- * different flags. See drmAddValidateItem().
- */
- bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
-
- DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
- bo_fake->bo.size / 1024);
-
- /* Sanity check: Buffers should be unmapped before being validated.
- * This is not so much of a problem for bufmgr_fake, but TTM refuses,
- * and the problem is harder to debug there.
- */
- assert(bo_fake->map_count == 0);
-
- if (bo_fake->is_static) {
- /* Add it to the needs-fence list */
- bufmgr_fake->need_fence = 1;
- return 0;
- }
-
- /* reset size accounted */
- bo_fake->size_accounted = 0;
-
- /* Allocate the card memory */
- if (!bo_fake->block && !evict_and_alloc_block(bo)) {
- bufmgr_fake->fail = 1;
- DBG("Failed to validate buf %d:%s\n", bo_fake->id, bo_fake->name);
- return -1;
- }
-
- assert(bo_fake->block);
- assert(bo_fake->block->bo == &bo_fake->bo);
-
- bo->offset = bo_fake->block->mem->ofs;
-
- /* Upload the buffer contents if necessary */
- if (bo_fake->dirty) {
- DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
- bo_fake->name, bo->size, bo_fake->block->mem->ofs);
-
- assert(!(bo_fake->flags &
- (BM_NO_BACKING_STORE|BM_PINNED)));
-
- /* Actually, should be able to just wait for a fence on the memory,
- * which we would be tracking when we free it. Waiting for idle is
- * a sufficiently large hammer for now.
- */
- dri_bufmgr_fake_wait_idle(bufmgr_fake);
-
- /* we may never have mapped this BO so it might not have any backing
- * store if this happens it should be rare, but 0 the card memory
- * in any case */
- if (bo_fake->backing_store)
- memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size);
- else
- memset(bo_fake->block->virtual, 0, bo->size);
-
- bo_fake->dirty = 0;
- }
-
- bo_fake->block->fenced = 0;
- bo_fake->block->on_hardware = 1;
- move_to_tail(&bufmgr_fake->on_hardware, bo_fake->block);
-
- bo_fake->validated = GL_TRUE;
- bufmgr_fake->need_fence = 1;
-
- return 0;
-}
-
-static void
-dri_fake_fence_validated(dri_bufmgr *bufmgr)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
- unsigned int cookie;
-
- cookie = _fence_emit_internal(bufmgr_fake);
- fence_blocks(bufmgr_fake, cookie);
-
- DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
-}
-
-static void
-dri_fake_destroy(dri_bufmgr *bufmgr)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
-
- mmDestroy(bufmgr_fake->heap);
- free(bufmgr);
-}
-
-static int
-dri_fake_emit_reloc(dri_bo *reloc_buf,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target_buf)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)reloc_buf->bufmgr;
- struct fake_buffer_reloc *r;
- dri_bo_fake *reloc_fake = (dri_bo_fake *)reloc_buf;
- dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;
- int i;
-
- assert(reloc_buf);
- assert(target_buf);
-
- assert(target_fake->is_static || target_fake->size_accounted);
-
- if (reloc_fake->relocs == NULL) {
- reloc_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) *
- MAX_RELOCS);
- }
-
- r = &reloc_fake->relocs[reloc_fake->nr_relocs++];
-
- assert(reloc_fake->nr_relocs <= MAX_RELOCS);
-
- dri_bo_reference(target_buf);
-
- r->target_buf = target_buf;
- r->offset = offset;
- r->last_target_offset = target_buf->offset;
- r->delta = delta;
- r->read_domains = read_domains;
- r->write_domain = write_domain;
-
- if (bufmgr_fake->debug) {
- /* Check that a conflicting relocation hasn't already been emitted. */
- for (i = 0; i < reloc_fake->nr_relocs - 1; i++) {
- struct fake_buffer_reloc *r2 = &reloc_fake->relocs[i];
-
- assert(r->offset != r2->offset);
- }
- }
-
- return 0;
-}
-
-/**
- * Incorporates the validation flags associated with each relocation into
- * the combined validation flags for the buffer on this batchbuffer submission.
- */
-static void
-dri_fake_calculate_domains(dri_bo *bo)
-{
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- int i;
-
- for (i = 0; i < bo_fake->nr_relocs; i++) {
- struct fake_buffer_reloc *r = &bo_fake->relocs[i];
- dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
-
- /* Do the same for the tree of buffers we depend on */
- dri_fake_calculate_domains(r->target_buf);
-
- target_fake->read_domains |= r->read_domains;
- if (target_fake->write_domain != 0)
- target_fake->write_domain = r->write_domain;
- }
-}
-
-
-static int
-dri_fake_reloc_and_validate_buffer(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- int i, ret;
-
- assert(bo_fake->map_count == 0);
-
- for (i = 0; i < bo_fake->nr_relocs; i++) {
- struct fake_buffer_reloc *r = &bo_fake->relocs[i];
- dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
- uint32_t reloc_data;
-
- /* Validate the target buffer if that hasn't been done. */
- if (!target_fake->validated) {
- ret = dri_fake_reloc_and_validate_buffer(r->target_buf);
- if (ret != 0) {
- if (bo->virtual != NULL)
- dri_bo_unmap(bo);
- return ret;
- }
- }
-
- /* Calculate the value of the relocation entry. */
- if (r->target_buf->offset != r->last_target_offset) {
- reloc_data = r->target_buf->offset + r->delta;
-
- if (bo->virtual == NULL)
- dri_bo_map(bo, GL_TRUE);
-
- *(uint32_t *)(bo->virtual + r->offset) = reloc_data;
-
- r->last_target_offset = r->target_buf->offset;
- }
- }
-
- if (bo->virtual != NULL)
- dri_bo_unmap(bo);
-
- if (bo_fake->write_domain != 0) {
- if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) {
- if (bo_fake->backing_store == 0)
- alloc_backing_store(bo);
-
- bo_fake->card_dirty = 1;
- }
- bufmgr_fake->performed_rendering = GL_TRUE;
- }
-
- return dri_fake_bo_validate(bo);
-}
-
-static void *
-dri_fake_process_relocs(dri_bo *batch_buf)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
- dri_bo_fake *batch_fake = (dri_bo_fake *)batch_buf;
- int ret;
- int retry_count = 0;
-
- bufmgr_fake->performed_rendering = GL_FALSE;
-
- dri_fake_calculate_domains(batch_buf);
-
- batch_fake->read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
-
- /* we've ran out of RAM so blow the whole lot away and retry */
- restart:
- ret = dri_fake_reloc_and_validate_buffer(batch_buf);
- if (bufmgr_fake->fail == 1) {
- if (retry_count == 0) {
- retry_count++;
- dri_fake_kick_all(bufmgr_fake);
- bufmgr_fake->fail = 0;
- goto restart;
- } else /* dump out the memory here */
- mmDumpMemInfo(bufmgr_fake->heap);
- }
-
- assert(ret == 0);
-
- bufmgr_fake->current_total_size = 0;
- return NULL;
-}
-
-static void
-dri_bo_fake_post_submit(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- int i;
-
- for (i = 0; i < bo_fake->nr_relocs; i++) {
- struct fake_buffer_reloc *r = &bo_fake->relocs[i];
- dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
-
- if (target_fake->validated)
- dri_bo_fake_post_submit(r->target_buf);
-
- DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
- bo_fake->name, (uint32_t)bo->offset, r->offset,
- target_fake->name, (uint32_t)r->target_buf->offset, r->delta);
- }
-
- assert(bo_fake->map_count == 0);
- bo_fake->validated = GL_FALSE;
- bo_fake->read_domains = 0;
- bo_fake->write_domain = 0;
-}
-
-
-static void
-dri_fake_post_submit(dri_bo *batch_buf)
-{
- dri_fake_fence_validated(batch_buf->bufmgr);
-
- dri_bo_fake_post_submit(batch_buf);
-}
-
-static int
-dri_fake_check_aperture_space(dri_bo *bo)
-{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- GLuint sz;
-
- sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
-
- if (bo_fake->size_accounted || bo_fake->is_static)
- return 0;
-
- if (bufmgr_fake->current_total_size + sz > bufmgr_fake->size) {
- DBG("check_space: %s bo %d %d overflowed bufmgr size %d\n", bo_fake->name, bo_fake->id, sz, bufmgr_fake->size);
- return -1;
- }
-
- bufmgr_fake->current_total_size += sz;
- bo_fake->size_accounted = 1;
- DBG("drm_check_space: buf %d, %s %d %d\n", bo_fake->id, bo_fake->name, bo->size, bufmgr_fake->current_total_size);
- return 0;
-}
-
-dri_bufmgr *
-dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
- unsigned long size,
- unsigned int (*fence_emit)(void *private),
- int (*fence_wait)(void *private, unsigned int cookie),
- void *driver_priv)
-{
- dri_bufmgr_fake *bufmgr_fake;
-
- bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
-
- /* Initialize allocator */
- make_empty_list(&bufmgr_fake->fenced);
- make_empty_list(&bufmgr_fake->on_hardware);
- make_empty_list(&bufmgr_fake->lru);
-
- bufmgr_fake->low_offset = low_offset;
- bufmgr_fake->virtual = low_virtual;
- bufmgr_fake->size = size;
- bufmgr_fake->heap = mmInit(low_offset, size);
-
- /* Hook in methods */
- bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc;
- bufmgr_fake->bufmgr.bo_alloc_static = dri_fake_bo_alloc_static;
- bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference;
- bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
- bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
- bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
- bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering;
- bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
- bufmgr_fake->bufmgr.emit_reloc = dri_fake_emit_reloc;
- bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
- bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit;
- bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space;
- bufmgr_fake->bufmgr.debug = GL_FALSE;
-
- bufmgr_fake->fence_emit = fence_emit;
- bufmgr_fake->fence_wait = fence_wait;
- bufmgr_fake->driver_priv = driver_priv;
-
- return &bufmgr_fake->bufmgr;
-}
-
diff --git a/src/mesa/drivers/dri/intel/intel_bufmgr_fake.h b/src/mesa/drivers/dri/intel/intel_bufmgr_fake.h
deleted file mode 100644
index bc7e59e61db..00000000000
--- a/src/mesa/drivers/dri/intel/intel_bufmgr_fake.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2007 Intel Corporation
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- * Eric Anholt <[email protected]>
- */
-
-#ifndef _INTEL_BUFMGR_FAKE_H_
-#define _INTEL_BUFMGR_FAKE_H_
-
-void dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
-dri_bufmgr *dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
- unsigned long size,
- unsigned int (*fence_emit)(void *private),
- int (*fence_wait)(void *private,
- unsigned int cookie),
- void *driver_priv);
-void dri_bo_fake_disable_backing_store(dri_bo *bo,
- void (*invalidate_cb)(dri_bo *bo,
- void *ptr),
- void *ptr);
-#endif /* _INTEL_BUFMGR_FAKE_H_ */
-
diff --git a/src/mesa/drivers/dri/intel/intel_bufmgr_gem.c b/src/mesa/drivers/dri/intel/intel_bufmgr_gem.c
deleted file mode 100644
index 3c1c3157e13..00000000000
--- a/src/mesa/drivers/dri/intel/intel_bufmgr_gem.c
+++ /dev/null
@@ -1,847 +0,0 @@
-/**************************************************************************
- *
- * Copyright � 2007 Red Hat Inc.
- * Copyright � 2007 Intel Corporation
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
- * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- * Eric Anholt <[email protected]>
- * Dave Airlie <[email protected]>
- */
-
-#include <xf86drm.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <assert.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-
-#include "errno.h"
-#include "mtypes.h"
-#include "dri_bufmgr.h"
-#include "string.h"
-#include "imports.h"
-
-#include "i915_drm.h"
-
-#include "intel_bufmgr_gem.h"
-
-#define DBG(...) do { \
- if (bufmgr_gem->bufmgr.debug) \
- fprintf(stderr, __VA_ARGS__); \
-} while (0)
-
-struct intel_validate_entry {
- dri_bo *bo;
- struct drm_i915_op_arg bo_arg;
-};
-
-struct dri_gem_bo_bucket_entry {
- uint32_t gem_handle;
- uint32_t last_offset;
- struct dri_gem_bo_bucket_entry *next;
-};
-
-struct dri_gem_bo_bucket {
- struct dri_gem_bo_bucket_entry *head;
- struct dri_gem_bo_bucket_entry **tail;
- /**
- * Limit on the number of entries in this bucket.
- *
- * 0 means that this caching at this bucket size is disabled.
- * -1 means that there is no limit to caching at this size.
- */
- int max_entries;
- int num_entries;
-};
-
-/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
- * is 1 << 16 pages, or 256MB.
- */
-#define INTEL_GEM_BO_BUCKETS 16
-typedef struct _dri_bufmgr_gem {
- dri_bufmgr bufmgr;
-
- int fd;
-
- uint32_t max_relocs;
-
- struct drm_i915_gem_exec_object *exec_objects;
- dri_bo **exec_bos;
- int exec_size;
- int exec_count;
-
- /** Array of lists of cached gem objects of power-of-two sizes */
- struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
-
- struct drm_i915_gem_execbuffer exec_arg;
-} dri_bufmgr_gem;
-
-typedef struct _dri_bo_gem {
- dri_bo bo;
-
- int refcount;
- GLboolean mapped;
- uint32_t gem_handle;
- const char *name;
-
- /**
- * Index of the buffer within the validation list while preparing a
- * batchbuffer execution.
- */
- int validate_index;
-
- /**
- * Tracks whether set_domain to CPU is current
- * Set when set_domain has been called
- * Cleared when a batch has been submitted
- */
- GLboolean cpu_domain_set;
-
- /** Array passed to the DRM containing relocation information. */
- struct drm_i915_gem_relocation_entry *relocs;
- /** Array of bos corresponding to relocs[i].target_handle */
- dri_bo **reloc_target_bo;
- /** Number of entries in relocs */
- int reloc_count;
- /** Mapped address for the buffer */
- void *virtual;
-} dri_bo_gem;
-
-static int
-logbase2(int n)
-{
- GLint i = 1;
- GLint log2 = 0;
-
- while (n > i) {
- i *= 2;
- log2++;
- }
-
- return log2;
-}
-
-static struct dri_gem_bo_bucket *
-dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
-{
- int i;
-
- /* We only do buckets in power of two increments */
- if ((size & (size - 1)) != 0)
- return NULL;
-
- /* We should only see sizes rounded to pages. */
- assert((size % 4096) == 0);
-
- /* We always allocate in units of pages */
- i = ffs(size / 4096) - 1;
- if (i >= INTEL_GEM_BO_BUCKETS)
- return NULL;
-
- return &bufmgr_gem->cache_bucket[i];
-}
-
-
-static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
-{
- int i, j;
-
- for (i = 0; i < bufmgr_gem->exec_count; i++) {
- dri_bo *bo = bufmgr_gem->exec_bos[i];
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
-
- if (bo_gem->relocs == NULL) {
- DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
- continue;
- }
-
- for (j = 0; j < bo_gem->reloc_count; j++) {
- dri_bo *target_bo = bo_gem->reloc_target_bo[j];
- dri_bo_gem *target_gem = (dri_bo_gem *)target_bo;
-
- DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
- i,
- bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset,
- target_gem->gem_handle, target_gem->name, target_bo->offset,
- bo_gem->relocs[j].delta);
- }
- }
-}
-
-/**
- * Adds the given buffer to the list of buffers to be validated (moved into the
- * appropriate memory type) with the next batch submission.
- *
- * If a buffer is validated multiple times in a batch submission, it ends up
- * with the intersection of the memory type flags and the union of the
- * access flags.
- */
-static void
-intel_add_validate_buffer(dri_bo *bo)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- int index;
-
- if (bo_gem->validate_index != -1)
- return;
-
- /* Extend the array of validation entries as necessary. */
- if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
- int new_size = bufmgr_gem->exec_size * 2;
-
- if (new_size == 0)
- new_size = 5;
-
- bufmgr_gem->exec_objects =
- realloc(bufmgr_gem->exec_objects,
- sizeof(*bufmgr_gem->exec_objects) * new_size);
- bufmgr_gem->exec_bos =
- realloc(bufmgr_gem->exec_bos,
- sizeof(*bufmgr_gem->exec_bos) * new_size);
- bufmgr_gem->exec_size = new_size;
- }
-
- index = bufmgr_gem->exec_count;
- bo_gem->validate_index = index;
- /* Fill in array entry */
- bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
- bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
- bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
- bufmgr_gem->exec_objects[index].alignment = 0;
- bufmgr_gem->exec_objects[index].offset = 0;
- bufmgr_gem->exec_bos[index] = bo;
- dri_bo_reference(bo);
- bufmgr_gem->exec_count++;
-}
-
-
-#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
- sizeof(uint32_t))
-
-static int
-intel_setup_reloc_list(dri_bo *bo)
-{
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
-
- bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
- sizeof(struct drm_i915_gem_relocation_entry));
- bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *));
-
- return 0;
-}
-
-static dri_bo *
-dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
- unsigned long size, unsigned int alignment,
- uint64_t location_mask)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
- dri_bo_gem *bo_gem;
- unsigned int page_size = getpagesize();
- int ret;
- struct dri_gem_bo_bucket *bucket;
- GLboolean alloc_from_cache = GL_FALSE;
-
- bo_gem = calloc(1, sizeof(*bo_gem));
- if (!bo_gem)
- return NULL;
-
- /* Round the allocated size up to a power of two number of pages. */
- bo_gem->bo.size = 1 << logbase2(size);
- if (bo_gem->bo.size < page_size)
- bo_gem->bo.size = page_size;
- bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_gem->bo.size);
-
- /* If we don't have caching at this size, don't actually round the
- * allocation up.
- */
- if (bucket == NULL || bucket->max_entries == 0) {
- bo_gem->bo.size = size;
- if (bo_gem->bo.size < page_size)
- bo_gem->bo.size = page_size;
- }
-
- /* Get a buffer out of the cache if available */
- if (bucket != NULL && bucket->num_entries > 0) {
- struct dri_gem_bo_bucket_entry *entry = bucket->head;
- struct drm_i915_gem_busy busy;
-
- busy.handle = entry->gem_handle;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
- alloc_from_cache = (ret == 0 && busy.busy == 0);
-
- if (alloc_from_cache) {
- bucket->head = entry->next;
- if (entry->next == NULL)
- bucket->tail = &bucket->head;
- bucket->num_entries--;
-
- bo_gem->gem_handle = entry->gem_handle;
- bo_gem->bo.offset = entry->last_offset;
- free(entry);
- }
- }
-
- if (!alloc_from_cache) {
- struct drm_gem_create create;
-
- memset(&create, 0, sizeof(create));
- create.size = bo_gem->bo.size;
-
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CREATE, &create);
- bo_gem->gem_handle = create.handle;
- if (ret != 0) {
- free(bo_gem);
- return NULL;
- }
- }
-
- bo_gem->bo.virtual = NULL;
- bo_gem->bo.bufmgr = bufmgr;
- bo_gem->name = name;
- bo_gem->refcount = 1;
- bo_gem->validate_index = -1;
-
- DBG("bo_create: buf %d (%s) %ldb\n",
- bo_gem->gem_handle, bo_gem->name, size);
-
- return &bo_gem->bo;
-}
-
-/* Our GEM backend doesn't allow creation of static buffers, as that requires
- * privelege for the non-fake case, and the lock in the fake case where we were
- * working around the X Server not creating buffers and passing handles to us.
- */
-static dri_bo *
-dri_gem_bo_alloc_static(dri_bufmgr *bufmgr, const char *name,
- unsigned long offset, unsigned long size, void *virtual,
- uint64_t location_mask)
-{
- return NULL;
-}
-
-/**
- * Returns a dri_bo wrapping the given buffer object handle.
- *
- * This can be used when one application needs to pass a buffer object
- * to another.
- */
-dri_bo *
-intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
- unsigned int handle)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
- dri_bo_gem *bo_gem;
- int ret;
- struct drm_gem_open open_arg;
-
- bo_gem = calloc(1, sizeof(*bo_gem));
- if (!bo_gem)
- return NULL;
-
- memset(&open_arg, 0, sizeof(open_arg));
- open_arg.name = handle;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
- if (ret != 0) {
- fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
- name, handle, strerror(-ret));
- free(bo_gem);
- return NULL;
- }
- bo_gem->bo.size = open_arg.size;
- bo_gem->bo.offset = 0;
- bo_gem->bo.virtual = NULL;
- bo_gem->bo.bufmgr = bufmgr;
- bo_gem->name = name;
- bo_gem->refcount = 1;
- bo_gem->validate_index = -1;
- bo_gem->gem_handle = open_arg.handle;
-
- DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
-
- return &bo_gem->bo;
-}
-
-static void
-dri_gem_bo_reference(dri_bo *bo)
-{
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
-
- bo_gem->refcount++;
-}
-
-static void
-dri_gem_bo_unreference(dri_bo *bo)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
-
- if (!bo)
- return;
-
- if (--bo_gem->refcount == 0) {
- struct dri_gem_bo_bucket *bucket;
- int ret;
-
- if (bo_gem->mapped)
- munmap (bo_gem->virtual, bo->size);
-
- if (bo_gem->relocs != NULL) {
- int i;
-
- /* Unreference all the target buffers */
- for (i = 0; i < bo_gem->reloc_count; i++)
- dri_bo_unreference(bo_gem->reloc_target_bo[i]);
- free(bo_gem->reloc_target_bo);
- free(bo_gem->relocs);
- }
-
- bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
- /* Put the buffer into our internal cache for reuse if we can. */
- if (bucket != NULL &&
- (bucket->max_entries == -1 ||
- (bucket->max_entries > 0 &&
- bucket->num_entries < bucket->max_entries)))
- {
- struct dri_gem_bo_bucket_entry *entry;
-
- entry = calloc(1, sizeof(*entry));
- entry->gem_handle = bo_gem->gem_handle;
- entry->last_offset = bo->offset;
-
- entry->next = NULL;
- *bucket->tail = entry;
- bucket->tail = &entry->next;
- bucket->num_entries++;
- } else {
- struct drm_gem_close close;
-
- /* Close this object */
- close.handle = bo_gem->gem_handle;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
- if (ret != 0) {
- fprintf(stderr,
- "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
- bo_gem->gem_handle, bo_gem->name, strerror(-ret));
- }
- }
-
- DBG("bo_unreference final: %d (%s)\n",
- bo_gem->gem_handle, bo_gem->name);
-
- free(bo);
- return;
- }
-}
-
-static int
-dri_gem_bo_map(dri_bo *bo, GLboolean write_enable)
-{
- dri_bufmgr_gem *bufmgr_gem;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_set_domain set_domain;
- int ret;
-
- bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
-
- /* Allow recursive mapping. Mesa may recursively map buffers with
- * nested display loops.
- */
- if (!bo_gem->mapped) {
-
- assert(bo->virtual == NULL);
-
- DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
-
- if (bo_gem->virtual == NULL) {
- struct drm_gem_mmap mmap_arg;
-
- memset(&mmap_arg, 0, sizeof(mmap_arg));
- mmap_arg.handle = bo_gem->gem_handle;
- mmap_arg.offset = 0;
- mmap_arg.size = bo->size;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_MMAP, &mmap_arg);
- if (ret != 0) {
- fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
- __FILE__, __LINE__,
- bo_gem->gem_handle, bo_gem->name, strerror(errno));
- }
- bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
- }
- bo->virtual = bo_gem->virtual;
- bo_gem->mapped = GL_TRUE;
- DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
- }
-
- if (!bo_gem->cpu_domain_set) {
- set_domain.handle = bo_gem->gem_handle;
- set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
- set_domain.write_domain = write_enable ? DRM_GEM_DOMAIN_CPU : 0;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
- if (ret != 0) {
- fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
- __FILE__, __LINE__,
- bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
- strerror (errno));
- }
- bo_gem->cpu_domain_set = GL_TRUE;
- }
-
- return 0;
-}
-
-static int
-dri_gem_bo_unmap(dri_bo *bo)
-{
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
-
- if (bo == NULL)
- return 0;
-
- assert(bo_gem->mapped);
-
- return 0;
-}
-
-static int
-dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
- unsigned long size, const void *data)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_pwrite pwrite;
- int ret;
-
- memset (&pwrite, 0, sizeof (pwrite));
- pwrite.handle = bo_gem->gem_handle;
- pwrite.offset = offset;
- pwrite.size = size;
- pwrite.data_ptr = (uint64_t) (uintptr_t) data;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PWRITE, &pwrite);
- if (ret != 0) {
- fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
- __FILE__, __LINE__,
- bo_gem->gem_handle, (int) offset, (int) size,
- strerror (errno));
- }
- return 0;
-}
-
-static int
-dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
- unsigned long size, void *data)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_pread pread;
- int ret;
-
- memset (&pread, 0, sizeof (pread));
- pread.handle = bo_gem->gem_handle;
- pread.offset = offset;
- pread.size = size;
- pread.data_ptr = (uint64_t) (uintptr_t) data;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PREAD, &pread);
- if (ret != 0) {
- fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
- __FILE__, __LINE__,
- bo_gem->gem_handle, (int) offset, (int) size,
- strerror (errno));
- }
- return 0;
-}
-
-static void
-dri_gem_bo_wait_rendering(dri_bo *bo)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_set_domain set_domain;
- int ret;
-
- set_domain.handle = bo_gem->gem_handle;
- set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
- set_domain.write_domain = 0;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
- if (ret != 0) {
- fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
- __FILE__, __LINE__,
- bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
- strerror (errno));
- }
-}
-
-static void
-dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
- int i;
-
- free(bufmgr_gem->exec_objects);
- free(bufmgr_gem->exec_bos);
-
- /* Free any cached buffer objects we were going to reuse */
- for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
- struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
- struct dri_gem_bo_bucket_entry *entry;
-
- while ((entry = bucket->head) != NULL) {
- struct drm_gem_close close;
- int ret;
-
- bucket->head = entry->next;
- if (entry->next == NULL)
- bucket->tail = &bucket->head;
- bucket->num_entries--;
-
- /* Close this object */
- close.handle = entry->gem_handle;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
- if (ret != 0) {
- fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %s\n",
- strerror(-ret));
- }
-
- free(entry);
- }
- }
-
- free(bufmgr);
-}
-
-/**
- * Adds the target buffer to the validation list and adds the relocation
- * to the reloc_buffer's relocation list.
- *
- * The relocation entry at the given offset must already contain the
- * precomputed relocation value, because the kernel will optimize out
- * the relocation entry write when the buffer hasn't moved from the
- * last known offset in target_bo.
- */
-static int
-dri_gem_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target_bo)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo;
-
- /* Create a new relocation list if needed */
- if (bo_gem->relocs == NULL)
- intel_setup_reloc_list(bo);
-
- /* Check overflow */
- assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
-
- /* Check args */
- assert (offset <= bo->size - 4);
- assert ((write_domain & (write_domain-1)) == 0);
-
- bo_gem->relocs[bo_gem->reloc_count].offset = offset;
- bo_gem->relocs[bo_gem->reloc_count].delta = delta;
- bo_gem->relocs[bo_gem->reloc_count].target_handle =
- target_bo_gem->gem_handle;
- bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
- bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
- bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
-
- bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
- dri_bo_reference(target_bo);
-
- bo_gem->reloc_count++;
- return 0;
-}
-
-/**
- * Walk the tree of relocations rooted at BO and accumulate the list of
- * validations to be performed and update the relocation buffers with
- * index values into the validation list.
- */
-static void
-dri_gem_bo_process_reloc(dri_bo *bo)
-{
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- int i;
-
- if (bo_gem->relocs == NULL)
- return;
-
- for (i = 0; i < bo_gem->reloc_count; i++) {
- dri_bo *target_bo = bo_gem->reloc_target_bo[i];
-
- /* Continue walking the tree depth-first. */
- dri_gem_bo_process_reloc(target_bo);
-
- /* Add the target to the validate list */
- intel_add_validate_buffer(target_bo);
- }
-}
-
-static void *
-dri_gem_process_reloc(dri_bo *batch_buf)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr;
-
- /* Update indices and set up the validate list. */
- dri_gem_bo_process_reloc(batch_buf);
-
- /* Add the batch buffer to the validation list. There are no relocations
- * pointing to it.
- */
- intel_add_validate_buffer(batch_buf);
-
- bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
- bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count;
- bufmgr_gem->exec_arg.batch_start_offset = 0;
- bufmgr_gem->exec_arg.batch_len = 0; /* written in intel_exec_ioctl */
-
- return &bufmgr_gem->exec_arg;
-}
-
-static void
-intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
-{
- int i;
-
- for (i = 0; i < bufmgr_gem->exec_count; i++) {
- dri_bo *bo = bufmgr_gem->exec_bos[i];
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
-
- /* Update the buffer offset */
- if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
- DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
- bo_gem->gem_handle, bo_gem->name, bo->offset,
- bufmgr_gem->exec_objects[i].offset);
- bo->offset = bufmgr_gem->exec_objects[i].offset;
- }
- }
-}
-
-static void
-dri_gem_post_submit(dri_bo *batch_buf)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
- int i;
-
- intel_update_buffer_offsets (bufmgr_gem);
-
- if (bufmgr_gem->bufmgr.debug)
- dri_gem_dump_validation_list(bufmgr_gem);
-
- for (i = 0; i < bufmgr_gem->exec_count; i++) {
- dri_bo *bo = bufmgr_gem->exec_bos[i];
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
-
- /* Need to call set_domain on next bo_map */
- bo_gem->cpu_domain_set = GL_FALSE;
-
- /* Disconnect the buffer from the validate list */
- bo_gem->validate_index = -1;
- dri_bo_unreference(bo);
- bufmgr_gem->exec_bos[i] = NULL;
- }
- bufmgr_gem->exec_count = 0;
-}
-
-/**
- * Enables unlimited caching of buffer objects for reuse.
- *
- * This is potentially very memory expensive, as the cache at each bucket
- * size is only bounded by how many buffers of that size we've managed to have
- * in flight at once.
- */
-void
-intel_gem_enable_bo_reuse(dri_bufmgr *bufmgr)
-{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
- int i;
-
- for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
- bufmgr_gem->cache_bucket[i].max_entries = -1;
- }
-}
-
-/*
- *
- */
-static int
-dri_gem_check_aperture_space(dri_bo *bo)
-{
- return 0;
-}
-
-/**
- * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
- * and manage map buffer objections.
- *
- * \param fd File descriptor of the opened DRM device.
- */
-dri_bufmgr *
-intel_bufmgr_gem_init(int fd, int batch_size)
-{
- dri_bufmgr_gem *bufmgr_gem;
- int i;
-
- bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
- bufmgr_gem->fd = fd;
-
- /* Let's go with one relocation per every 2 dwords (but round down a bit
- * since a power of two will mean an extra page allocation for the reloc
- * buffer).
- *
- * Every 4 was too few for the blender benchmark.
- */
- bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
-
- bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc;
- bufmgr_gem->bufmgr.bo_alloc_static = dri_gem_bo_alloc_static;
- bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference;
- bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
- bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
- bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
- bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata;
- bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
- bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
- bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
- bufmgr_gem->bufmgr.emit_reloc = dri_gem_emit_reloc;
- bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
- bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit;
- bufmgr_gem->bufmgr.debug = GL_FALSE;
- bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
- /* Initialize the linked lists for BO reuse cache. */
- for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
- bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
-
- return &bufmgr_gem->bufmgr;
-}
-
diff --git a/src/mesa/drivers/dri/intel/intel_bufmgr_gem.h b/src/mesa/drivers/dri/intel/intel_bufmgr_gem.h
deleted file mode 100644
index 36caeba2147..00000000000
--- a/src/mesa/drivers/dri/intel/intel_bufmgr_gem.h
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#ifndef INTEL_BUFMGR_GEM_H
-#define INTEL_BUFMGR_GEM_H
-
-#include "dri_bufmgr.h"
-
-extern dri_bo *intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr,
- const char *name,
- unsigned int handle);
-
-dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
-
-void
-intel_gem_enable_bo_reuse(dri_bufmgr *bufmgr);
-
-#endif /* INTEL_BUFMGR_GEM_H */
diff --git a/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c b/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c
deleted file mode 100644
index 545913fa31b..00000000000
--- a/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c
+++ /dev/null
@@ -1,1102 +0,0 @@
-/**************************************************************************
- *
- * Copyright � 2007 Red Hat Inc.
- * Copyright � 2007 Intel Corporation
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
- * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- * Eric Anholt <[email protected]>
- * Dave Airlie <[email protected]>
- */
-
-#include <xf86drm.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <assert.h>
-
-#include "errno.h"
-#include "mtypes.h"
-#include "dri_bufmgr.h"
-#include "string.h"
-#include "imports.h"
-
-#include "i915_drm.h"
-
-#include "intel_bufmgr_ttm.h"
-
-#define DBG(...) do { \
- if (bufmgr_ttm->bufmgr.debug) \
- fprintf(stderr, __VA_ARGS__); \
-} while (0)
-
-/*
- * These bits are always specified in each validation
- * request. Other bits are not supported at this point
- * as it would require a bit of investigation to figure
- * out what mask value should be used.
- */
-#define INTEL_BO_MASK (DRM_BO_MASK_MEM | \
- DRM_BO_FLAG_READ | \
- DRM_BO_FLAG_WRITE | \
- DRM_BO_FLAG_EXE)
-
-struct intel_validate_entry {
- dri_bo *bo;
- struct drm_i915_op_arg bo_arg;
-};
-
-struct dri_ttm_bo_bucket_entry {
- drmBO drm_bo;
- struct dri_ttm_bo_bucket_entry *next;
-};
-
-struct dri_ttm_bo_bucket {
- struct dri_ttm_bo_bucket_entry *head;
- struct dri_ttm_bo_bucket_entry **tail;
- /**
- * Limit on the number of entries in this bucket.
- *
- * 0 means that this caching at this bucket size is disabled.
- * -1 means that there is no limit to caching at this size.
- */
- int max_entries;
- int num_entries;
-};
-
-/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
- * is 1 << 16 pages, or 256MB.
- */
-#define INTEL_TTM_BO_BUCKETS 16
-typedef struct _dri_bufmgr_ttm {
- dri_bufmgr bufmgr;
-
- int fd;
- unsigned int fence_type;
- unsigned int fence_type_flush;
-
- uint32_t max_relocs;
-
- struct intel_validate_entry *validate_array;
- int validate_array_size;
- int validate_count;
-
- /** Array of lists of cached drmBOs of power-of-two sizes */
- struct dri_ttm_bo_bucket cache_bucket[INTEL_TTM_BO_BUCKETS];
-} dri_bufmgr_ttm;
-
-/**
- * Private information associated with a relocation that isn't already stored
- * in the relocation buffer to be passed to the kernel.
- */
-struct dri_ttm_reloc {
- dri_bo *target_buf;
- uint64_t validate_flags;
- /** Offset of target_buf after last execution of this relocation entry. */
- unsigned int last_target_offset;
-};
-
-typedef struct _dri_bo_ttm {
- dri_bo bo;
-
- int refcount;
- unsigned int map_count;
- drmBO drm_bo;
- const char *name;
-
- uint64_t last_flags;
-
- /**
- * Index of the buffer within the validation list while preparing a
- * batchbuffer execution.
- */
- int validate_index;
-
- /** DRM buffer object containing relocation list */
- uint32_t *reloc_buf_data;
- struct dri_ttm_reloc *relocs;
-
- /**
- * Indicates that the buffer may be shared with other processes, so we
- * can't hold maps beyond when the user does.
- */
- GLboolean shared;
-
- GLboolean delayed_unmap;
- /* Virtual address from the dri_bo_map whose unmap was delayed. */
- void *saved_virtual;
-} dri_bo_ttm;
-
-typedef struct _dri_fence_ttm
-{
- dri_fence fence;
-
- int refcount;
- const char *name;
- drmFence drm_fence;
-} dri_fence_ttm;
-
-static int
-logbase2(int n)
-{
- GLint i = 1;
- GLint log2 = 0;
-
- while (n > i) {
- i *= 2;
- log2++;
- }
-
- return log2;
-}
-
-static struct dri_ttm_bo_bucket *
-dri_ttm_bo_bucket_for_size(dri_bufmgr_ttm *bufmgr_ttm, unsigned long size)
-{
- int i;
-
- /* We only do buckets in power of two increments */
- if ((size & (size - 1)) != 0)
- return NULL;
-
- /* We should only see sizes rounded to pages. */
- assert((size % 4096) == 0);
-
- /* We always allocate in units of pages */
- i = ffs(size / 4096) - 1;
- if (i >= INTEL_TTM_BO_BUCKETS)
- return NULL;
-
- return &bufmgr_ttm->cache_bucket[i];
-}
-
-
-static void dri_ttm_dump_validation_list(dri_bufmgr_ttm *bufmgr_ttm)
-{
- int i, j;
-
- for (i = 0; i < bufmgr_ttm->validate_count; i++) {
- dri_bo *bo = bufmgr_ttm->validate_array[i].bo;
- dri_bo_ttm *bo_ttm = (dri_bo_ttm *)bo;
-
- if (bo_ttm->reloc_buf_data != NULL) {
- for (j = 0; j < (bo_ttm->reloc_buf_data[0] & 0xffff); j++) {
- uint32_t *reloc_entry = bo_ttm->reloc_buf_data +
- I915_RELOC_HEADER +
- j * I915_RELOC0_STRIDE;
- dri_bo *target_bo = bo_ttm->relocs[j].target_buf;
- dri_bo_ttm *target_ttm = (dri_bo_ttm *)target_bo;
-
- DBG("%2d: %s@0x%08x -> %s@0x%08lx + 0x%08x\n",
- i,
- bo_ttm->name, reloc_entry[0],
- target_ttm->name, target_bo->offset,
- reloc_entry[1]);
- }
- } else {
- DBG("%2d: %s\n", i, bo_ttm->name);
- }
- }
-}
-
-/**
- * Adds the given buffer to the list of buffers to be validated (moved into the
- * appropriate memory type) with the next batch submission.
- *
- * If a buffer is validated multiple times in a batch submission, it ends up
- * with the intersection of the memory type flags and the union of the
- * access flags.
- */
-static void
-intel_add_validate_buffer(dri_bo *buf,
- uint64_t flags)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
-
- /* If we delayed doing an unmap to mitigate map/unmap syscall thrashing,
- * do that now.
- */
- if (ttm_buf->delayed_unmap) {
- drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
- ttm_buf->delayed_unmap = GL_FALSE;
- }
-
- if (ttm_buf->validate_index == -1) {
- struct intel_validate_entry *entry;
- struct drm_i915_op_arg *arg;
- struct drm_bo_op_req *req;
- int index;
-
- /* Extend the array of validation entries as necessary. */
- if (bufmgr_ttm->validate_count == bufmgr_ttm->validate_array_size) {
- int i, new_size = bufmgr_ttm->validate_array_size * 2;
-
- if (new_size == 0)
- new_size = 5;
-
- bufmgr_ttm->validate_array =
- realloc(bufmgr_ttm->validate_array,
- sizeof(struct intel_validate_entry) * new_size);
- bufmgr_ttm->validate_array_size = new_size;
-
- /* Update pointers for realloced mem. */
- for (i = 0; i < bufmgr_ttm->validate_count - 1; i++) {
- bufmgr_ttm->validate_array[i].bo_arg.next = (unsigned long)
- &bufmgr_ttm->validate_array[i + 1].bo_arg;
- }
- }
-
- /* Pick out the new array entry for ourselves */
- index = bufmgr_ttm->validate_count;
- ttm_buf->validate_index = index;
- entry = &bufmgr_ttm->validate_array[index];
- bufmgr_ttm->validate_count++;
-
- /* Fill in array entry */
- entry->bo = buf;
- dri_bo_reference(buf);
-
- /* Fill in kernel arg */
- arg = &entry->bo_arg;
- req = &arg->d.req;
-
- memset(arg, 0, sizeof(*arg));
- req->bo_req.handle = ttm_buf->drm_bo.handle;
- req->op = drm_bo_validate;
- req->bo_req.flags = flags;
- req->bo_req.hint = 0;
-#ifdef DRM_BO_HINT_PRESUMED_OFFSET
- /* PRESUMED_OFFSET indicates that all relocations pointing at this
- * buffer have the correct offset. If any of our relocations don't,
- * this flag will be cleared off the buffer later in the relocation
- * processing.
- */
- req->bo_req.hint |= DRM_BO_HINT_PRESUMED_OFFSET;
- req->bo_req.presumed_offset = buf->offset;
-#endif
- req->bo_req.mask = INTEL_BO_MASK;
- req->bo_req.fence_class = 0; /* Backwards compat. */
-
- if (ttm_buf->reloc_buf_data != NULL)
- arg->reloc_ptr = (unsigned long)(void *)ttm_buf->reloc_buf_data;
- else
- arg->reloc_ptr = 0;
-
- /* Hook up the linked list of args for the kernel */
- arg->next = 0;
- if (index != 0) {
- bufmgr_ttm->validate_array[index - 1].bo_arg.next =
- (unsigned long)arg;
- }
- } else {
- struct intel_validate_entry *entry =
- &bufmgr_ttm->validate_array[ttm_buf->validate_index];
- struct drm_i915_op_arg *arg = &entry->bo_arg;
- struct drm_bo_op_req *req = &arg->d.req;
- uint64_t memFlags = req->bo_req.flags & flags & DRM_BO_MASK_MEM;
- uint64_t modeFlags = (req->bo_req.flags | flags) & ~DRM_BO_MASK_MEM;
-
- /* Buffer was already in the validate list. Extend its flags as
- * necessary.
- */
-
- if (memFlags == 0) {
- fprintf(stderr,
- "%s: No shared memory types between "
- "0x%16llx and 0x%16llx\n",
- __FUNCTION__, req->bo_req.flags, flags);
- abort();
- }
- if (flags & ~INTEL_BO_MASK) {
- fprintf(stderr,
- "%s: Flags bits 0x%16llx are not supposed to be used in a relocation\n",
- __FUNCTION__, flags & ~INTEL_BO_MASK);
- abort();
- }
- req->bo_req.flags = memFlags | modeFlags;
- }
-}
-
-
-#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
- sizeof(uint32_t))
-
-static int
-intel_setup_reloc_list(dri_bo *bo)
-{
- dri_bo_ttm *bo_ttm = (dri_bo_ttm *)bo;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bo->bufmgr;
-
- bo_ttm->relocs = calloc(bufmgr_ttm->max_relocs,
- sizeof(struct dri_ttm_reloc));
- bo_ttm->reloc_buf_data = calloc(1, RELOC_BUF_SIZE(bufmgr_ttm->max_relocs));
-
- /* Initialize the relocation list with the header:
- * DWORD 0: relocation count
- * DWORD 1: relocation type
- * DWORD 2+3: handle to next relocation list (currently none) 64-bits
- */
- bo_ttm->reloc_buf_data[0] = 0;
- bo_ttm->reloc_buf_data[1] = I915_RELOC_TYPE_0;
- bo_ttm->reloc_buf_data[2] = 0;
- bo_ttm->reloc_buf_data[3] = 0;
-
- return 0;
-}
-
-#if 0
-int
-driFenceSignaled(DriFenceObject * fence, unsigned type)
-{
- int signaled;
- int ret;
-
- if (fence == NULL)
- return GL_TRUE;
-
- ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
- BM_CKFATAL(ret);
- return signaled;
-}
-#endif
-
-static dri_bo *
-dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name,
- unsigned long size, unsigned int alignment,
- uint64_t location_mask)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
- dri_bo_ttm *ttm_buf;
- unsigned int pageSize = getpagesize();
- int ret;
- uint64_t flags;
- unsigned int hint;
- unsigned long alloc_size;
- struct dri_ttm_bo_bucket *bucket;
- GLboolean alloc_from_cache = GL_FALSE;
-
- ttm_buf = calloc(1, sizeof(*ttm_buf));
- if (!ttm_buf)
- return NULL;
-
- /* The mask argument doesn't do anything for us that we want other than
- * determine which pool (TTM or local) the buffer is allocated into, so
- * just pass all of the allocation class flags.
- */
- flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_EXE;
- /* No hints we want to use. */
- hint = 0;
-
- /* Round the allocated size up to a power of two number of pages. */
- alloc_size = 1 << logbase2(size);
- if (alloc_size < pageSize)
- alloc_size = pageSize;
- bucket = dri_ttm_bo_bucket_for_size(bufmgr_ttm, alloc_size);
-
- /* If we don't have caching at this size, don't actually round the
- * allocation up.
- */
- if (bucket == NULL || bucket->max_entries == 0)
- alloc_size = size;
-
- /* Get a buffer out of the cache if available */
- if (bucket != NULL && bucket->num_entries > 0) {
- struct dri_ttm_bo_bucket_entry *entry = bucket->head;
- int busy;
-
- /* Check if the buffer is still in flight. If not, reuse it. */
- ret = drmBOBusy(bufmgr_ttm->fd, &entry->drm_bo, &busy);
- alloc_from_cache = (ret == 0 && busy == 0);
-
- if (alloc_from_cache) {
- bucket->head = entry->next;
- if (entry->next == NULL)
- bucket->tail = &bucket->head;
- bucket->num_entries--;
-
- ttm_buf->drm_bo = entry->drm_bo;
- free(entry);
- }
- }
-
- if (!alloc_from_cache) {
- ret = drmBOCreate(bufmgr_ttm->fd, alloc_size, alignment / pageSize,
- NULL, flags, hint, &ttm_buf->drm_bo);
- if (ret != 0) {
- free(ttm_buf);
- return NULL;
- }
- }
-
- ttm_buf->bo.size = size;
- ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
- ttm_buf->bo.virtual = NULL;
- ttm_buf->bo.bufmgr = bufmgr;
- ttm_buf->name = name;
- ttm_buf->refcount = 1;
- ttm_buf->reloc_buf_data = NULL;
- ttm_buf->relocs = NULL;
- ttm_buf->last_flags = ttm_buf->drm_bo.flags;
- ttm_buf->shared = GL_FALSE;
- ttm_buf->delayed_unmap = GL_FALSE;
- ttm_buf->validate_index = -1;
-
- DBG("bo_create: %p (%s) %ldb\n", &ttm_buf->bo, ttm_buf->name, size);
-
- return &ttm_buf->bo;
-}
-
-/* Our TTM backend doesn't allow creation of static buffers, as that requires
- * privelege for the non-fake case, and the lock in the fake case where we were
- * working around the X Server not creating buffers and passing handles to us.
- */
-static dri_bo *
-dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name,
- unsigned long offset, unsigned long size, void *virtual,
- uint64_t location_mask)
-{
- return NULL;
-}
-
-/**
- * Returns a dri_bo wrapping the given buffer object handle.
- *
- * This can be used when one application needs to pass a buffer object
- * to another.
- */
-dri_bo *
-intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
- unsigned int handle)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
- dri_bo_ttm *ttm_buf;
- int ret;
-
- ttm_buf = calloc(1, sizeof(*ttm_buf));
- if (!ttm_buf)
- return NULL;
-
- ret = drmBOReference(bufmgr_ttm->fd, handle, &ttm_buf->drm_bo);
- if (ret != 0) {
- fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
- name, handle, strerror(-ret));
- free(ttm_buf);
- return NULL;
- }
- ttm_buf->bo.size = ttm_buf->drm_bo.size;
- ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
- ttm_buf->bo.virtual = NULL;
- ttm_buf->bo.bufmgr = bufmgr;
- ttm_buf->name = name;
- ttm_buf->refcount = 1;
- ttm_buf->reloc_buf_data = NULL;
- ttm_buf->relocs = NULL;
- ttm_buf->last_flags = ttm_buf->drm_bo.flags;
- ttm_buf->shared = GL_TRUE;
- ttm_buf->delayed_unmap = GL_FALSE;
- ttm_buf->validate_index = -1;
-
- DBG("bo_create_from_handle: %p %08x (%s)\n",
- &ttm_buf->bo, handle, ttm_buf->name);
-
- return &ttm_buf->bo;
-}
-
-static void
-dri_ttm_bo_reference(dri_bo *buf)
-{
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
-
- ttm_buf->refcount++;
-}
-
-static void
-dri_ttm_bo_unreference(dri_bo *buf)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
-
- if (!buf)
- return;
-
- if (--ttm_buf->refcount == 0) {
- struct dri_ttm_bo_bucket *bucket;
- int ret;
-
- assert(ttm_buf->map_count == 0);
-
- if (ttm_buf->reloc_buf_data) {
- int i;
-
- /* Unreference all the target buffers */
- for (i = 0; i < (ttm_buf->reloc_buf_data[0] & 0xffff); i++)
- dri_bo_unreference(ttm_buf->relocs[i].target_buf);
- free(ttm_buf->relocs);
-
- /* Free the kernel BO containing relocation entries */
- free(ttm_buf->reloc_buf_data);
- ttm_buf->reloc_buf_data = NULL;
- }
-
- if (ttm_buf->delayed_unmap) {
- int ret = drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
-
- if (ret != 0) {
- fprintf(stderr, "%s:%d: Error unmapping buffer %s: %s.\n",
- __FILE__, __LINE__, ttm_buf->name, strerror(-ret));
- }
- }
-
- bucket = dri_ttm_bo_bucket_for_size(bufmgr_ttm, ttm_buf->drm_bo.size);
- /* Put the buffer into our internal cache for reuse if we can. */
- if (!ttm_buf->shared &&
- bucket != NULL &&
- (bucket->max_entries == -1 ||
- (bucket->max_entries > 0 &&
- bucket->num_entries < bucket->max_entries)))
- {
- struct dri_ttm_bo_bucket_entry *entry;
-
- entry = calloc(1, sizeof(*entry));
- entry->drm_bo = ttm_buf->drm_bo;
-
- entry->next = NULL;
- *bucket->tail = entry;
- bucket->tail = &entry->next;
- bucket->num_entries++;
- } else {
- /* Decrement the kernel refcount for the buffer. */
- ret = drmBOUnreference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
- if (ret != 0) {
- fprintf(stderr, "drmBOUnreference failed (%s): %s\n",
- ttm_buf->name, strerror(-ret));
- }
- }
-
- DBG("bo_unreference final: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-
- free(buf);
- return;
- }
-}
-
-static int
-dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- uint64_t flags;
- int ret;
-
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
-
- flags = DRM_BO_FLAG_READ;
- if (write_enable)
- flags |= DRM_BO_FLAG_WRITE;
-
- /* Allow recursive mapping. Mesa may recursively map buffers with
- * nested display loops.
- */
- if (ttm_buf->map_count++ != 0)
- return 0;
-
- assert(buf->virtual == NULL);
-
- DBG("bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-
- /* XXX: What about if we're upgrading from READ to WRITE? */
- if (ttm_buf->delayed_unmap) {
- buf->virtual = ttm_buf->saved_virtual;
- return 0;
- }
-
- ret = drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
- if (ret != 0) {
- fprintf(stderr, "%s:%d: Error mapping buffer %s: %s .\n",
- __FILE__, __LINE__, ttm_buf->name, strerror(-ret));
- }
-
- return ret;
-}
-
-static int
-dri_ttm_bo_unmap(dri_bo *buf)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- int ret;
-
- if (buf == NULL)
- return 0;
-
- assert(ttm_buf->map_count != 0);
- if (--ttm_buf->map_count != 0)
- return 0;
-
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
-
- assert(buf->virtual != NULL);
-
- DBG("bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-
- if (!ttm_buf->shared) {
- ttm_buf->saved_virtual = buf->virtual;
- ttm_buf->delayed_unmap = GL_TRUE;
- buf->virtual = NULL;
-
- return 0;
- }
-
- buf->virtual = NULL;
-
- ret = drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
- if (ret != 0) {
- fprintf(stderr, "%s:%d: Error unmapping buffer %s: %s.\n",
- __FILE__, __LINE__, ttm_buf->name, strerror(-ret));
- }
-
- return ret;
-}
-
-/**
- * Returns a dri_bo wrapping the given buffer object handle.
- *
- * This can be used when one application needs to pass a buffer object
- * to another.
- */
-dri_fence *
-intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
- drm_fence_arg_t *arg)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
- dri_fence_ttm *ttm_fence;
-
- ttm_fence = malloc(sizeof(*ttm_fence));
- if (!ttm_fence)
- return NULL;
-
- ttm_fence->drm_fence.handle = arg->handle;
- ttm_fence->drm_fence.fence_class = arg->fence_class;
- ttm_fence->drm_fence.type = arg->type;
- ttm_fence->drm_fence.flags = arg->flags;
- ttm_fence->drm_fence.signaled = 0;
- ttm_fence->drm_fence.sequence = arg->sequence;
-
- ttm_fence->fence.bufmgr = bufmgr;
- ttm_fence->name = name;
- ttm_fence->refcount = 1;
-
- DBG("fence_create_from_handle: %p (%s)\n",
- &ttm_fence->fence, ttm_fence->name);
-
- return &ttm_fence->fence;
-}
-
-
-static void
-dri_ttm_fence_reference(dri_fence *fence)
-{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
-
- ++fence_ttm->refcount;
- DBG("fence_reference: %p (%s)\n", &fence_ttm->fence, fence_ttm->name);
-}
-
-static void
-dri_ttm_fence_unreference(dri_fence *fence)
-{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
-
- if (!fence)
- return;
-
- DBG("fence_unreference: %p (%s)\n", &fence_ttm->fence, fence_ttm->name);
-
- if (--fence_ttm->refcount == 0) {
- int ret;
-
- ret = drmFenceUnreference(bufmgr_ttm->fd, &fence_ttm->drm_fence);
- if (ret != 0) {
- fprintf(stderr, "drmFenceUnreference failed (%s): %s\n",
- fence_ttm->name, strerror(-ret));
- }
-
- free(fence);
- return;
- }
-}
-
-static void
-dri_ttm_fence_wait(dri_fence *fence)
-{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
- int ret;
-
- ret = drmFenceWait(bufmgr_ttm->fd, DRM_FENCE_FLAG_WAIT_LAZY, &fence_ttm->drm_fence, 0);
- if (ret != 0) {
- fprintf(stderr, "%s:%d: Error waiting for fence %s: %s.\n",
- __FILE__, __LINE__, fence_ttm->name, strerror(-ret));
- abort();
- }
-
- DBG("fence_wait: %p (%s)\n", &fence_ttm->fence, fence_ttm->name);
-}
-
-static void
-dri_bufmgr_ttm_destroy(dri_bufmgr *bufmgr)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
- int i;
-
- free(bufmgr_ttm->validate_array);
-
- /* Free any cached buffer objects we were going to reuse */
- for (i = 0; i < INTEL_TTM_BO_BUCKETS; i++) {
- struct dri_ttm_bo_bucket *bucket = &bufmgr_ttm->cache_bucket[i];
- struct dri_ttm_bo_bucket_entry *entry;
-
- while ((entry = bucket->head) != NULL) {
- int ret;
-
- bucket->head = entry->next;
- if (entry->next == NULL)
- bucket->tail = &bucket->head;
- bucket->num_entries--;
-
- /* Decrement the kernel refcount for the buffer. */
- ret = drmBOUnreference(bufmgr_ttm->fd, &entry->drm_bo);
- if (ret != 0) {
- fprintf(stderr, "drmBOUnreference failed: %s\n",
- strerror(-ret));
- }
-
- free(entry);
- }
- }
-
- free(bufmgr);
-}
-
-/**
- * Adds the target buffer to the validation list and adds the relocation
- * to the reloc_buffer's relocation list.
- *
- * The relocation entry at the given offset must already contain the
- * precomputed relocation value, because the kernel will optimize out
- * the relocation entry write when the buffer hasn't moved from the
- * last known offset in target_buf.
- */
-static int
-dri_ttm_emit_reloc(dri_bo *reloc_buf, uint64_t flags, GLuint delta,
- GLuint offset, dri_bo *target_buf)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)reloc_buf->bufmgr;
- dri_bo_ttm *reloc_buf_ttm = (dri_bo_ttm *)reloc_buf;
- dri_bo_ttm *target_buf_ttm = (dri_bo_ttm *)target_buf;
- int num_relocs;
- uint32_t *this_reloc;
-
- /* Create a new relocation list if needed */
- if (reloc_buf_ttm->reloc_buf_data == NULL)
- intel_setup_reloc_list(reloc_buf);
-
- num_relocs = reloc_buf_ttm->reloc_buf_data[0];
-
- /* Check overflow */
- assert(num_relocs < bufmgr_ttm->max_relocs);
-
- this_reloc = reloc_buf_ttm->reloc_buf_data + I915_RELOC_HEADER +
- num_relocs * I915_RELOC0_STRIDE;
-
- this_reloc[0] = offset;
- this_reloc[1] = delta;
- this_reloc[2] = target_buf_ttm->drm_bo.handle; /* To be filled in at exec time */
- this_reloc[3] = 0;
-
- reloc_buf_ttm->relocs[num_relocs].validate_flags = flags;
- reloc_buf_ttm->relocs[num_relocs].target_buf = target_buf;
- dri_bo_reference(target_buf);
-
- reloc_buf_ttm->reloc_buf_data[0]++; /* Increment relocation count */
- /* Check wraparound */
- assert(reloc_buf_ttm->reloc_buf_data[0] != 0);
- return 0;
-}
-
-/**
- * Walk the tree of relocations rooted at BO and accumulate the list of
- * validations to be performed and update the relocation buffers with
- * index values into the validation list.
- */
-static void
-dri_ttm_bo_process_reloc(dri_bo *bo)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bo->bufmgr;
- dri_bo_ttm *bo_ttm = (dri_bo_ttm *)bo;
- unsigned int nr_relocs;
- int i;
-
- if (bo_ttm->reloc_buf_data == NULL)
- return;
-
- nr_relocs = bo_ttm->reloc_buf_data[0] & 0xffff;
-
- for (i = 0; i < nr_relocs; i++) {
- struct dri_ttm_reloc *r = &bo_ttm->relocs[i];
-
- /* Continue walking the tree depth-first. */
- dri_ttm_bo_process_reloc(r->target_buf);
-
- /* Add the target to the validate list */
- intel_add_validate_buffer(r->target_buf, r->validate_flags);
-
- /* Clear the PRESUMED_OFFSET flag from the validate list entry of the
- * target if this buffer has a stale relocated pointer at it.
- */
- if (r->last_target_offset != r->target_buf->offset) {
- dri_bo_ttm *target_buf_ttm = (dri_bo_ttm *)r->target_buf;
- struct intel_validate_entry *entry =
- &bufmgr_ttm->validate_array[target_buf_ttm->validate_index];
-
- entry->bo_arg.d.req.bo_req.hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
- }
- }
-}
-
-static void *
-dri_ttm_process_reloc(dri_bo *batch_buf, GLuint *count)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
-
- /* Update indices and set up the validate list. */
- dri_ttm_bo_process_reloc(batch_buf);
-
- /* Add the batch buffer to the validation list. There are no relocations
- * pointing to it.
- */
- intel_add_validate_buffer(batch_buf,
- DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE);
-
- *count = bufmgr_ttm->validate_count;
- return &bufmgr_ttm->validate_array[0].bo_arg;
-}
-
-static const char *
-intel_get_flags_mem_type_string(uint64_t flags)
-{
- switch (flags & DRM_BO_MASK_MEM) {
- case DRM_BO_FLAG_MEM_LOCAL: return "local";
- case DRM_BO_FLAG_MEM_TT: return "ttm";
- case DRM_BO_FLAG_MEM_VRAM: return "vram";
- case DRM_BO_FLAG_MEM_PRIV0: return "priv0";
- case DRM_BO_FLAG_MEM_PRIV1: return "priv1";
- case DRM_BO_FLAG_MEM_PRIV2: return "priv2";
- case DRM_BO_FLAG_MEM_PRIV3: return "priv3";
- case DRM_BO_FLAG_MEM_PRIV4: return "priv4";
- default: return NULL;
- }
-}
-
-static const char *
-intel_get_flags_caching_string(uint64_t flags)
-{
- switch (flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED)) {
- case 0: return "UU";
- case DRM_BO_FLAG_CACHED: return "CU";
- case DRM_BO_FLAG_CACHED_MAPPED: return "UC";
- case DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED: return "CC";
- default: return NULL;
- }
-}
-
-static void
-intel_update_buffer_offsets (dri_bufmgr_ttm *bufmgr_ttm)
-{
- int i;
-
- for (i = 0; i < bufmgr_ttm->validate_count; i++) {
- dri_bo *bo = bufmgr_ttm->validate_array[i].bo;
- dri_bo_ttm *bo_ttm = (dri_bo_ttm *)bo;
- struct drm_i915_op_arg *arg = &bufmgr_ttm->validate_array[i].bo_arg;
- struct drm_bo_arg_rep *rep = &arg->d.rep;
-
- /* Update the flags */
- if (rep->bo_info.flags != bo_ttm->last_flags) {
- DBG("BO %s migrated: %s/%s -> %s/%s\n",
- bo_ttm->name,
- intel_get_flags_mem_type_string(bo_ttm->last_flags),
- intel_get_flags_caching_string(bo_ttm->last_flags),
- intel_get_flags_mem_type_string(rep->bo_info.flags),
- intel_get_flags_caching_string(rep->bo_info.flags));
-
- bo_ttm->last_flags = rep->bo_info.flags;
- }
- /* Update the buffer offset */
- if (rep->bo_info.offset != bo->offset) {
- DBG("BO %s migrated: 0x%08lx -> 0x%08lx\n",
- bo_ttm->name, bo->offset, (unsigned long)rep->bo_info.offset);
- bo->offset = rep->bo_info.offset;
- }
- }
-}
-
-/**
- * Update the last target offset field of relocation entries for PRESUMED_OFFSET
- * computation.
- */
-static void
-dri_ttm_bo_post_submit(dri_bo *bo)
-{
- dri_bo_ttm *bo_ttm = (dri_bo_ttm *)bo;
- unsigned int nr_relocs;
- int i;
-
- if (bo_ttm->reloc_buf_data == NULL)
- return;
-
- nr_relocs = bo_ttm->reloc_buf_data[0] & 0xffff;
-
- for (i = 0; i < nr_relocs; i++) {
- struct dri_ttm_reloc *r = &bo_ttm->relocs[i];
-
- /* Continue walking the tree depth-first. */
- dri_ttm_bo_post_submit(r->target_buf);
-
- r->last_target_offset = r->target_buf->offset;
- }
-}
-
-static void
-dri_ttm_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
- int i;
-
- intel_update_buffer_offsets (bufmgr_ttm);
-
- dri_ttm_bo_post_submit(batch_buf);
-
- if (bufmgr_ttm->bufmgr.debug)
- dri_ttm_dump_validation_list(bufmgr_ttm);
-
- for (i = 0; i < bufmgr_ttm->validate_count; i++) {
- dri_bo *bo = bufmgr_ttm->validate_array[i].bo;
- dri_bo_ttm *bo_ttm = (dri_bo_ttm *)bo;
-
- /* Disconnect the buffer from the validate list */
- bo_ttm->validate_index = -1;
- dri_bo_unreference(bo);
- bufmgr_ttm->validate_array[i].bo = NULL;
- }
- bufmgr_ttm->validate_count = 0;
-}
-
-/**
- * Enables unlimited caching of buffer objects for reuse.
- *
- * This is potentially very memory expensive, as the cache at each bucket
- * size is only bounded by how many buffers of that size we've managed to have
- * in flight at once.
- */
-void
-intel_ttm_enable_bo_reuse(dri_bufmgr *bufmgr)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
- int i;
-
- for (i = 0; i < INTEL_TTM_BO_BUCKETS; i++) {
- bufmgr_ttm->cache_bucket[i].max_entries = -1;
- }
-}
-
-/*
- *
- */
-static int
-dri_ttm_check_aperture_space(dri_bo *bo)
-{
- return 0;
-}
-
-/**
- * Initializes the TTM buffer manager, which uses the kernel to allocate, map,
- * and manage map buffer objections.
- *
- * \param fd File descriptor of the opened DRM device.
- * \param fence_type Driver-specific fence type used for fences with no flush.
- * \param fence_type_flush Driver-specific fence type used for fences with a
- * flush.
- */
-dri_bufmgr *
-intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
- unsigned int fence_type_flush, int batch_size)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
- int i;
-
- bufmgr_ttm = calloc(1, sizeof(*bufmgr_ttm));
- bufmgr_ttm->fd = fd;
- bufmgr_ttm->fence_type = fence_type;
- bufmgr_ttm->fence_type_flush = fence_type_flush;
-
- /* Let's go with one relocation per every 2 dwords (but round down a bit
- * since a power of two will mean an extra page allocation for the reloc
- * buffer).
- *
- * Every 4 was too few for the blender benchmark.
- */
- bufmgr_ttm->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
-
- bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
- bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
- bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
- bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
- bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
- bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
- bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
- bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
- bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
- bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
- bufmgr_ttm->bufmgr.emit_reloc = dri_ttm_emit_reloc;
- bufmgr_ttm->bufmgr.process_relocs = dri_ttm_process_reloc;
- bufmgr_ttm->bufmgr.post_submit = dri_ttm_post_submit;
- bufmgr_ttm->bufmgr.debug = GL_FALSE;
- bufmgr_ttm->bufmgr.check_aperture_space = dri_ttm_check_aperture_space;
- /* Initialize the linked lists for BO reuse cache. */
- for (i = 0; i < INTEL_TTM_BO_BUCKETS; i++)
- bufmgr_ttm->cache_bucket[i].tail = &bufmgr_ttm->cache_bucket[i].head;
-
- return &bufmgr_ttm->bufmgr;
-}
-
diff --git a/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.h b/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.h
deleted file mode 100644
index d267a168cd4..00000000000
--- a/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.h
+++ /dev/null
@@ -1,20 +0,0 @@
-
-#ifndef INTEL_BUFMGR_TTM_H
-#define INTEL_BUFMGR_TTM_H
-
-#include "dri_bufmgr.h"
-
-extern dri_bo *intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
- unsigned int handle);
-
-dri_fence *intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
- drm_fence_arg_t *arg);
-
-
-dri_bufmgr *intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
- unsigned int fence_type_flush, int batch_size);
-
-void
-intel_ttm_enable_bo_reuse(dri_bufmgr *bufmgr);
-
-#endif
diff --git a/src/mesa/drivers/dri/intel/intel_context.c b/src/mesa/drivers/dri/intel/intel_context.c
index e1941c302ce..f33805ba050 100644
--- a/src/mesa/drivers/dri/intel/intel_context.c
+++ b/src/mesa/drivers/dri/intel/intel_context.c
@@ -59,8 +59,7 @@
#include "intel_buffer_objects.h"
#include "intel_fbo.h"
#include "intel_decode.h"
-#include "intel_bufmgr_fake.h"
-#include "intel_bufmgr_gem.h"
+#include "intel_bufmgr.h"
#include "drirenderbuffer.h"
#include "vblank.h"
@@ -474,7 +473,7 @@ intel_init_bufmgr(struct intel_context *intel)
case DRI_CONF_BO_REUSE_DISABLED:
break;
case DRI_CONF_BO_REUSE_ALL:
- intel_gem_enable_bo_reuse(intel->bufmgr);
+ intel_bufmgr_gem_enable_reuse(intel->bufmgr);
break;
}
}
@@ -493,12 +492,12 @@ intel_init_bufmgr(struct intel_context *intel)
return GL_FALSE;
}
- intel->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
- intelScreen->tex.map,
- intelScreen->tex.size,
- intel_fence_emit,
- intel_fence_wait,
- intel);
+ intel->bufmgr = intel_bufmgr_fake_init(intelScreen->tex.offset,
+ intelScreen->tex.map,
+ intelScreen->tex.size,
+ intel_fence_emit,
+ intel_fence_wait,
+ intel);
}
/* XXX bufmgr should be per-screen, not per-context */
@@ -873,7 +872,7 @@ intelContendedLock(struct intel_context *intel, GLuint flags)
*/
if (!intel->ttm && sarea->texAge != intel->hHWContext) {
sarea->texAge = intel->hHWContext;
- dri_bufmgr_fake_contended_lock_take(intel->bufmgr);
+ intel_bufmgr_fake_contended_lock_take(intel->bufmgr);
if (INTEL_DEBUG & DEBUG_BATCH)
intel_decode_context_reset();
if (INTEL_DEBUG & DEBUG_BUFMGR)
diff --git a/src/mesa/drivers/dri/intel/intel_context.h b/src/mesa/drivers/dri/intel/intel_context.h
index 35ef22aa270..579883437fc 100644
--- a/src/mesa/drivers/dri/intel/intel_context.h
+++ b/src/mesa/drivers/dri/intel/intel_context.h
@@ -35,6 +35,7 @@
#include "mm.h"
#include "texmem.h"
#include "dri_bufmgr.h"
+#include "intel_bufmgr.h"
#include "intel_screen.h"
#include "intel_tex_obj.h"
diff --git a/src/mesa/drivers/dri/intel/intel_ioctl.c b/src/mesa/drivers/dri/intel/intel_ioctl.c
index 591548ae85b..58c81766cdd 100644
--- a/src/mesa/drivers/dri/intel/intel_ioctl.c
+++ b/src/mesa/drivers/dri/intel/intel_ioctl.c
@@ -45,7 +45,7 @@
#include "drm.h"
#include "i915_drm.h"
-#include "intel_bufmgr_gem.h"
+#include "intel_bufmgr.h"
#define FILE_DEBUG_FLAG DEBUG_IOCTL
diff --git a/src/mesa/drivers/dri/intel/intel_regions.c b/src/mesa/drivers/dri/intel/intel_regions.c
index 7d78e4eca72..c7e2c551ddf 100644
--- a/src/mesa/drivers/dri/intel/intel_regions.c
+++ b/src/mesa/drivers/dri/intel/intel_regions.c
@@ -44,7 +44,7 @@
#include "intel_blit.h"
#include "intel_buffer_objects.h"
#include "dri_bufmgr.h"
-#include "intel_bufmgr_gem.h"
+#include "intel_bufmgr.h"
#include "intel_batchbuffer.h"
#define FILE_DEBUG_FLAG DEBUG_REGION
@@ -106,10 +106,7 @@ intel_region_alloc(struct intel_context *intel,
dri_bo *buffer;
buffer = dri_bo_alloc(intel->bufmgr, "region",
- pitch * cpp * height, 64,
- DRM_BO_FLAG_MEM_LOCAL |
- DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_CACHED_MAPPED);
+ pitch * cpp * height, 64);
return intel_region_alloc_internal(intel, cpp, pitch, height, 0, buffer);
}
@@ -121,7 +118,7 @@ intel_region_alloc_for_handle(struct intel_context *intel,
{
dri_bo *buffer;
- buffer = intel_gem_bo_create_from_handle(intel->bufmgr, "region", handle);
+ buffer = intel_bo_gem_create_from_name(intel->bufmgr, "region", handle);
return intel_region_alloc_internal(intel,
cpp, pitch, height, tiled, buffer);
@@ -355,10 +352,7 @@ intel_region_release_pbo(struct intel_context *intel,
region->buffer = dri_bo_alloc(intel->bufmgr, "region",
region->pitch * region->cpp * region->height,
- 64,
- DRM_BO_FLAG_MEM_LOCAL |
- DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_CACHED_MAPPED);
+ 64);
}
/* Break the COW tie to the pbo. Both the pbo and the region end up
@@ -440,17 +434,16 @@ intel_recreate_static(struct intel_context *intel,
if (intel->ttm) {
assert(region_desc->bo_handle != -1);
- region->buffer = intel_gem_bo_create_from_handle(intel->bufmgr,
- name,
- region_desc->bo_handle);
+ region->buffer = intel_bo_gem_create_from_name(intel->bufmgr,
+ name,
+ region_desc->bo_handle);
} else {
- region->buffer = dri_bo_alloc_static(intel->bufmgr,
- name,
- region_desc->offset,
- intelScreen->pitch *
- intelScreen->height,
- region_desc->map,
- DRM_BO_FLAG_MEM_TT);
+ region->buffer = intel_bo_fake_alloc_static(intel->bufmgr,
+ name,
+ region_desc->offset,
+ intelScreen->pitch *
+ intelScreen->height,
+ region_desc->map);
}
assert(region->buffer != NULL);
diff --git a/src/mesa/drivers/dri/intel/intel_screen.c b/src/mesa/drivers/dri/intel/intel_screen.c
index a243324a39b..f325e703595 100644
--- a/src/mesa/drivers/dri/intel/intel_screen.c
+++ b/src/mesa/drivers/dri/intel/intel_screen.c
@@ -49,7 +49,7 @@
#include "i830_dri.h"
#include "intel_regions.h"
#include "intel_batchbuffer.h"
-#include "intel_bufmgr_gem.h"
+#include "intel_bufmgr.h"
PUBLIC const char __driConfigOptions[] =
DRI_CONF_BEGIN