summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers')
-rw-r--r--src/mesa/drivers/dri/i965/brw_bufmgr.h66
-rw-r--r--src/mesa/drivers/dri/i965/brw_compute.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.c32
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.h64
-rw-r--r--src/mesa/drivers/dri/i965/brw_cs.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw.c14
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw.h2
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw_upload.c12
-rw-r--r--src/mesa/drivers/dri/i965/brw_gs.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_object_purgeable.c10
-rw-r--r--src/mesa/drivers/dri/i965/brw_performance_query.c44
-rw-r--r--src/mesa/drivers/dri/i965/brw_pipe_control.c10
-rw-r--r--src/mesa/drivers/dri/i965/brw_program.c24
-rw-r--r--src/mesa/drivers/dri/i965/brw_program_cache.c34
-rw-r--r--src/mesa/drivers/dri/i965/brw_queryobj.c28
-rw-r--r--src/mesa/drivers/dri/i965/brw_state.h2
-rw-r--r--src/mesa/drivers/dri/i965/brw_sync.c16
-rw-r--r--src/mesa/drivers/dri/i965/brw_tcs.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_tes.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_vs.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_vs_surface_state.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm.c4
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm_surface_state.c22
-rw-r--r--src/mesa/drivers/dri/i965/gen6_queryobj.c20
-rw-r--r--src/mesa/drivers/dri/i965/gen6_sol.c14
-rw-r--r--src/mesa/drivers/dri/i965/gen7_sol_state.c2
-rw-r--r--src/mesa/drivers/dri/i965/gen8_sol_state.c2
-rw-r--r--src/mesa/drivers/dri/i965/genX_blorp_exec.c4
-rw-r--r--src/mesa/drivers/dri/i965/hsw_queryobj.c6
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.c60
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.h4
-rw-r--r--src/mesa/drivers/dri/i965/intel_blit.c10
-rw-r--r--src/mesa/drivers/dri/i965/intel_blit.h10
-rw-r--r--src/mesa/drivers/dri/i965/intel_buffer_objects.c62
-rw-r--r--src/mesa/drivers/dri/i965/intel_buffer_objects.h10
-rw-r--r--src/mesa/drivers/dri/i965/intel_bufmgr_gem.c146
-rw-r--r--src/mesa/drivers/dri/i965/intel_fbo.c4
-rw-r--r--src/mesa/drivers/dri/i965/intel_fbo.h4
-rw-r--r--src/mesa/drivers/dri/i965/intel_image.h2
-rw-r--r--src/mesa/drivers/dri/i965/intel_mipmap_tree.c64
-rw-r--r--src/mesa/drivers/dri/i965/intel_mipmap_tree.h10
-rw-r--r--src/mesa/drivers/dri/i965/intel_pixel_draw.c2
-rw-r--r--src/mesa/drivers/dri/i965/intel_pixel_read.c6
-rw-r--r--src/mesa/drivers/dri/i965/intel_screen.c60
-rw-r--r--src/mesa/drivers/dri/i965/intel_tex.c2
-rw-r--r--src/mesa/drivers/dri/i965/intel_tex_image.c8
-rw-r--r--src/mesa/drivers/dri/i965/intel_tex_subimage.c8
-rw-r--r--src/mesa/drivers/dri/i965/intel_upload.c20
48 files changed, 475 insertions, 477 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.h b/src/mesa/drivers/dri/i965/brw_bufmgr.h
index 7609425a35d..c05b67dda7f 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.h
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.h
@@ -45,9 +45,7 @@ extern "C" {
struct gen_device_info;
-typedef struct _drm_bacon_bo drm_bacon_bo;
-
-struct _drm_bacon_bo {
+struct brw_bo {
/**
* Size in bytes of the buffer object.
*
@@ -82,7 +80,7 @@ struct _drm_bacon_bo {
/**
* Last seen card virtual address (offset from the beginning of the
* aperture) for the object. This should be used to fill relocation
- * entries when calling drm_bacon_bo_emit_reloc()
+ * entries when calling brw_bo_emit_reloc()
*/
uint64_t offset64;
@@ -139,9 +137,9 @@ struct _drm_bacon_bo {
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped
- * using bo_map() or drm_bacon_gem_bo_map_gtt() to be used by the CPU.
+ * using bo_map() or brw_bo_map_gtt() to be used by the CPU.
*/
-drm_bacon_bo *drm_bacon_bo_alloc(struct brw_bufmgr *bufmgr, const char *name,
+struct brw_bo *brw_bo_alloc(struct brw_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a
@@ -149,7 +147,7 @@ drm_bacon_bo *drm_bacon_bo_alloc(struct brw_bufmgr *bufmgr, const char *name,
*
* This is otherwise the same as bo_alloc.
*/
-drm_bacon_bo *drm_bacon_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
+struct brw_bo *brw_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
@@ -169,7 +167,7 @@ drm_bacon_bo *drm_bacon_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
* 'tiling_mode' field on return, as well as the pitch value, which
* may have been rounded up to accommodate for tiling restrictions.
*/
-drm_bacon_bo *drm_bacon_bo_alloc_tiled(struct brw_bufmgr *bufmgr,
+struct brw_bo *brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
@@ -177,13 +175,13 @@ drm_bacon_bo *drm_bacon_bo_alloc_tiled(struct brw_bufmgr *bufmgr,
unsigned long flags);
/** Takes a reference on a buffer object */
-void drm_bacon_bo_reference(drm_bacon_bo *bo);
+void brw_bo_reference(struct brw_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* no references remain.
*/
-void drm_bacon_bo_unreference(drm_bacon_bo *bo);
+void brw_bo_unreference(struct brw_bo *bo);
/**
* Maps the buffer into userspace.
@@ -192,19 +190,19 @@ void drm_bacon_bo_unreference(drm_bacon_bo *bo);
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
-int drm_bacon_bo_map(drm_bacon_bo *bo, int write_enable);
+int brw_bo_map(struct brw_bo *bo, int write_enable);
/**
* Reduces the refcount on the userspace mapping of the buffer
* object.
*/
-int drm_bacon_bo_unmap(drm_bacon_bo *bo);
+int brw_bo_unmap(struct brw_bo *bo);
/** Write data into an object. */
-int drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
+int brw_bo_subdata(struct brw_bo *bo, unsigned long offset,
unsigned long size, const void *data);
/** Read data from an object. */
-int drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
+int brw_bo_get_subdata(struct brw_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
@@ -213,7 +211,7 @@ int drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
* bo_subdata, etc. It is merely a way for the driver to implement
* glFinish.
*/
-void drm_bacon_bo_wait_rendering(drm_bacon_bo *bo);
+void brw_bo_wait_rendering(struct brw_bo *bo);
/**
* Tears down the buffer manager instance.
@@ -226,7 +224,7 @@ void brw_bufmgr_destroy(struct brw_bufmgr *bufmgr);
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
-int drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
+int brw_bo_set_tiling(struct brw_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
/**
* Get the current tiling (and resulting swizzling) mode for the bo.
@@ -235,7 +233,7 @@ int drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
* \param tiling_mode returned tiling mode
* \param swizzle_mode returned swizzling mode
*/
-int drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
+int brw_bo_get_tiling(struct brw_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
/**
@@ -244,13 +242,13 @@ int drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
* \param buf Buffer to create a name for
* \param name Returned name
*/
-int drm_bacon_bo_flink(drm_bacon_bo *bo, uint32_t * name);
+int brw_bo_flink(struct brw_bo *bo, uint32_t * name);
/**
* Returns 1 if mapping the buffer for write could cause the process
* to block, due to the object being active in the GPU.
*/
-int drm_bacon_bo_busy(drm_bacon_bo *bo);
+int brw_bo_busy(struct brw_bo *bo);
/**
* Specify the volatility of the buffer.
@@ -264,7 +262,7 @@ int drm_bacon_bo_busy(drm_bacon_bo *bo);
* Returns 1 if the buffer was retained, or 0 if it was discarded whilst
* marked as I915_MADV_DONTNEED.
*/
-int drm_bacon_bo_madvise(drm_bacon_bo *bo, int madv);
+int brw_bo_madvise(struct brw_bo *bo, int madv);
/**
* Disable buffer reuse for buffers which will be shared in some way,
@@ -273,40 +271,40 @@ int drm_bacon_bo_madvise(drm_bacon_bo *bo, int madv);
*
* \param bo Buffer to disable reuse for
*/
-int drm_bacon_bo_disable_reuse(drm_bacon_bo *bo);
+int brw_bo_disable_reuse(struct brw_bo *bo);
/**
* Query whether a buffer is reusable.
*
* \param bo Buffer to query
*/
-int drm_bacon_bo_is_reusable(drm_bacon_bo *bo);
+int brw_bo_is_reusable(struct brw_bo *bo);
/* drm_bacon_bufmgr_gem.c */
struct brw_bufmgr *brw_bufmgr_init(struct gen_device_info *devinfo,
int fd, int batch_size);
-drm_bacon_bo *drm_bacon_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
- const char *name,
- unsigned int handle);
+struct brw_bo *brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle);
void brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr);
void brw_bufmgr_gem_set_vma_cache_size(struct brw_bufmgr *bufmgr,
int limit);
-int drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo);
-int drm_bacon_gem_bo_map_gtt(drm_bacon_bo *bo);
+int brw_bo_map_unsynchronized(struct brw_bo *bo);
+int brw_bo_map_gtt(struct brw_bo *bo);
-void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo *bo);
-void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo *bo);
-void *drm_bacon_gem_bo_map__wc(drm_bacon_bo *bo);
+void *brw_bo_map__cpu(struct brw_bo *bo);
+void *brw_bo_map__gtt(struct brw_bo *bo);
+void *brw_bo_map__wc(struct brw_bo *bo);
-void drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo *bo, int write_enable);
+void brw_bo_start_gtt_access(struct brw_bo *bo, int write_enable);
-int drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns);
+int brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns);
uint32_t brw_create_hw_context(struct brw_bufmgr *bufmgr);
void brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id);
-int drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd);
-drm_bacon_bo *drm_bacon_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr,
+int brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd);
+struct brw_bo *brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr,
int prime_fd, int size);
int brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset, uint64_t *result);
diff --git a/src/mesa/drivers/dri/i965/brw_compute.c b/src/mesa/drivers/dri/i965/brw_compute.c
index e924401c3af..80461536359 100644
--- a/src/mesa/drivers/dri/i965/brw_compute.c
+++ b/src/mesa/drivers/dri/i965/brw_compute.c
@@ -38,7 +38,7 @@ static void
prepare_indirect_gpgpu_walker(struct brw_context *brw)
{
GLintptr indirect_offset = brw->compute.num_work_groups_offset;
- drm_bacon_bo *bo = brw->compute.num_work_groups_bo;
+ struct brw_bo *bo = brw->compute.num_work_groups_bo;
brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMX, bo,
I915_GEM_DOMAIN_VERTEX, 0,
@@ -258,7 +258,7 @@ brw_dispatch_compute_indirect(struct gl_context *ctx, GLintptr indirect)
struct brw_context *brw = brw_context(ctx);
static const GLuint indirect_group_counts[3] = { 0, 0, 0 };
struct gl_buffer_object *indirect_buffer = ctx->DispatchIndirectBuffer;
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw,
intel_buffer_object(indirect_buffer),
indirect, 3 * sizeof(GLuint));
diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c
index 88acbbc1723..c9e8ca388ef 100644
--- a/src/mesa/drivers/dri/i965/brw_context.c
+++ b/src/mesa/drivers/dri/i965/brw_context.c
@@ -169,7 +169,7 @@ intel_update_framebuffer(struct gl_context *ctx,
}
static bool
-intel_disable_rb_aux_buffer(struct brw_context *brw, const drm_bacon_bo *bo)
+intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
{
const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
bool found = false;
@@ -413,7 +413,7 @@ intel_finish(struct gl_context * ctx)
intel_glFlush(ctx);
if (brw->batch.last_bo)
- drm_bacon_bo_wait_rendering(brw->batch.last_bo);
+ brw_bo_wait_rendering(brw->batch.last_bo);
}
static void
@@ -1188,17 +1188,17 @@ intelDestroyContext(__DRIcontext * driContextPriv)
brw_destroy_state(brw);
brw_draw_destroy(brw);
- drm_bacon_bo_unreference(brw->curbe.curbe_bo);
+ brw_bo_unreference(brw->curbe.curbe_bo);
if (brw->vs.base.scratch_bo)
- drm_bacon_bo_unreference(brw->vs.base.scratch_bo);
+ brw_bo_unreference(brw->vs.base.scratch_bo);
if (brw->tcs.base.scratch_bo)
- drm_bacon_bo_unreference(brw->tcs.base.scratch_bo);
+ brw_bo_unreference(brw->tcs.base.scratch_bo);
if (brw->tes.base.scratch_bo)
- drm_bacon_bo_unreference(brw->tes.base.scratch_bo);
+ brw_bo_unreference(brw->tes.base.scratch_bo);
if (brw->gs.base.scratch_bo)
- drm_bacon_bo_unreference(brw->gs.base.scratch_bo);
+ brw_bo_unreference(brw->gs.base.scratch_bo);
if (brw->wm.base.scratch_bo)
- drm_bacon_bo_unreference(brw->wm.base.scratch_bo);
+ brw_bo_unreference(brw->wm.base.scratch_bo);
brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
@@ -1214,8 +1214,8 @@ intelDestroyContext(__DRIcontext * driContextPriv)
brw_fini_pipe_control(brw);
intel_batchbuffer_free(&brw->batch);
- drm_bacon_bo_unreference(brw->throttle_batch[1]);
- drm_bacon_bo_unreference(brw->throttle_batch[0]);
+ brw_bo_unreference(brw->throttle_batch[1]);
+ brw_bo_unreference(brw->throttle_batch[0]);
brw->throttle_batch[1] = NULL;
brw->throttle_batch[0] = NULL;
@@ -1600,7 +1600,7 @@ intel_query_dri2_buffers(struct brw_context *brw,
* DRI2BufferDepthStencil are handled as special cases.
*
* \param buffer_name is a human readable name, such as "dri2 front buffer",
- * that is passed to drm_bacon_bo_gem_create_from_name().
+ * that is passed to brw_bo_gem_create_from_name().
*
* \see intel_update_renderbuffers()
*/
@@ -1612,7 +1612,7 @@ intel_process_dri2_buffer(struct brw_context *brw,
const char *buffer_name)
{
struct gl_framebuffer *fb = drawable->driverPrivate;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
if (!rb)
return;
@@ -1633,10 +1633,10 @@ intel_process_dri2_buffer(struct brw_context *brw,
if (last_mt) {
/* The bo already has a name because the miptree was created by a
* previous call to intel_process_dri2_buffer(). If a bo already has a
- * name, then drm_bacon_bo_flink() is a low-cost getter. It does not
+ * name, then brw_bo_flink() is a low-cost getter. It does not
* create a new name.
*/
- drm_bacon_bo_flink(last_mt->bo, &old_name);
+ brw_bo_flink(last_mt->bo, &old_name);
}
if (old_name == buffer->name)
@@ -1649,7 +1649,7 @@ intel_process_dri2_buffer(struct brw_context *brw,
buffer->cpp, buffer->pitch);
}
- bo = drm_bacon_bo_gem_create_from_name(brw->bufmgr, buffer_name,
+ bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
buffer->name);
if (!bo) {
fprintf(stderr,
@@ -1674,7 +1674,7 @@ intel_process_dri2_buffer(struct brw_context *brw,
assert(rb->mt);
- drm_bacon_bo_unreference(bo);
+ brw_bo_unreference(bo);
}
/**
diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h
index f5854bf8fb7..7b354c4f7ea 100644
--- a/src/mesa/drivers/dri/i965/brw_context.h
+++ b/src/mesa/drivers/dri/i965/brw_context.h
@@ -390,7 +390,7 @@ struct brw_cache {
struct brw_context *brw;
struct brw_cache_item **items;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
GLuint size, n_items;
uint32_t next_offset;
@@ -422,7 +422,7 @@ enum shader_time_shader_type {
struct brw_vertex_buffer {
/** Buffer object containing the uploaded vertex data */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t offset;
uint32_t size;
/** Byte stride between elements in the uploaded array */
@@ -442,7 +442,7 @@ struct brw_query_object {
struct gl_query_object Base;
/** Last query BO associated with this query. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/** Last index in bo with query data for this object. */
int last_index;
@@ -459,9 +459,9 @@ enum brw_gpu_ring {
struct intel_batchbuffer {
/** Current batchbuffer being queued up. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/** Last BO submitted to the hardware. Used for glFinish(). */
- drm_bacon_bo *last_bo;
+ struct brw_bo *last_bo;
#ifdef DEBUG
uint16_t emit, total;
@@ -482,7 +482,7 @@ struct intel_batchbuffer {
int reloc_array_size;
/** The validation list */
struct drm_i915_gem_exec_object2 *exec_objects;
- drm_bacon_bo **exec_bos;
+ struct brw_bo **exec_bos;
int exec_count;
int exec_array_size;
/** The amount of aperture space (in bytes) used by all exec_bos */
@@ -504,7 +504,7 @@ struct brw_transform_feedback_object {
struct gl_transform_feedback_object base;
/** A buffer to hold SO_WRITE_OFFSET(n) values while paused. */
- drm_bacon_bo *offset_bo;
+ struct brw_bo *offset_bo;
/** If true, SO_WRITE_OFFSET(n) should be reset to zero at next use. */
bool zero_offsets;
@@ -523,7 +523,7 @@ struct brw_transform_feedback_object {
* @{
*/
uint64_t prims_generated[BRW_MAX_XFB_STREAMS];
- drm_bacon_bo *prim_count_bo;
+ struct brw_bo *prim_count_bo;
unsigned prim_count_buffer_index; /**< in number of uint64_t units */
/** @} */
@@ -562,7 +562,7 @@ struct brw_stage_state
* unless you're taking additional measures to synchronize thread execution
* across slot size changes.
*/
- drm_bacon_bo *scratch_bo;
+ struct brw_bo *scratch_bo;
/**
* Scratch slot size allocated for each thread in the buffer object given
@@ -673,11 +673,11 @@ struct brw_context
uint32_t hw_ctx;
/** BO for post-sync nonzero writes for gen6 workaround. */
- drm_bacon_bo *workaround_bo;
+ struct brw_bo *workaround_bo;
uint8_t pipe_controls_since_last_cs_stall;
/**
- * Set of drm_bacon_bo * that have been rendered to within this batchbuffer
+ * Set of struct brw_bo * that have been rendered to within this batchbuffer
* and would need flushing before being used from another cache domain that
* isn't coherent with it (i.e. the sampler).
*/
@@ -695,7 +695,7 @@ struct brw_context
bool no_batch_wrap;
struct {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t next_offset;
} upload;
@@ -708,7 +708,7 @@ struct brw_context
bool front_buffer_dirty;
/** Framerate throttling: @{ */
- drm_bacon_bo *throttle_batch[2];
+ struct brw_bo *throttle_batch[2];
/* Limit the number of outstanding SwapBuffers by waiting for an earlier
* frame of rendering to complete. This gives a very precise cap to the
@@ -822,7 +822,7 @@ struct brw_context
* Buffer and offset used for GL_ARB_shader_draw_parameters
* (for now, only gl_BaseVertex).
*/
- drm_bacon_bo *draw_params_bo;
+ struct brw_bo *draw_params_bo;
uint32_t draw_params_offset;
/**
@@ -831,7 +831,7 @@ struct brw_context
* draw parameters.
*/
int gl_drawid;
- drm_bacon_bo *draw_id_bo;
+ struct brw_bo *draw_id_bo;
uint32_t draw_id_offset;
} draw;
@@ -841,7 +841,7 @@ struct brw_context
* an indirect call, and num_work_groups_offset is valid. Otherwise,
* num_work_groups is set based on glDispatchCompute.
*/
- drm_bacon_bo *num_work_groups_bo;
+ struct brw_bo *num_work_groups_bo;
GLintptr num_work_groups_offset;
const GLuint *num_work_groups;
} compute;
@@ -883,7 +883,7 @@ struct brw_context
const struct _mesa_index_buffer *ib;
/* Updates are signaled by BRW_NEW_INDEX_BUFFER. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t size;
GLuint type;
@@ -971,7 +971,7 @@ struct brw_context
* Pointer to the (intel_upload.c-generated) BO containing the uniforms
* for upload to the CURBE.
*/
- drm_bacon_bo *curbe_bo;
+ struct brw_bo *curbe_bo;
/** Offset within curbe_bo of space for current curbe entry */
GLuint curbe_offset;
} curbe;
@@ -1077,7 +1077,7 @@ struct brw_context
* Buffer object used in place of multisampled null render targets on
* Gen6. See brw_emit_null_surface_state().
*/
- drm_bacon_bo *multisampled_null_render_target_bo;
+ struct brw_bo *multisampled_null_render_target_bo;
uint32_t fast_clear_op;
float offset_clamp;
@@ -1219,7 +1219,7 @@ struct brw_context
} l3;
struct {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
const char **names;
int *ids;
enum shader_time_shader_type *types;
@@ -1309,8 +1309,8 @@ uint64_t brw_raw_timestamp_delta(struct brw_context *brw,
/** gen6_queryobj.c */
void gen6_init_queryobj_functions(struct dd_function_table *functions);
-void brw_write_timestamp(struct brw_context *brw, drm_bacon_bo *bo, int idx);
-void brw_write_depth_count(struct brw_context *brw, drm_bacon_bo *bo, int idx);
+void brw_write_timestamp(struct brw_context *brw, struct brw_bo *bo, int idx);
+void brw_write_depth_count(struct brw_context *brw, struct brw_bo *bo, int idx);
/** hsw_queryobj.c */
void hsw_overflow_result_to_gpr0(struct brw_context *brw,
@@ -1325,18 +1325,18 @@ bool brw_check_conditional_render(struct brw_context *brw);
/** intel_batchbuffer.c */
void brw_load_register_mem(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset);
void brw_load_register_mem64(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset);
void brw_store_register_mem32(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset);
+ struct brw_bo *bo, uint32_t reg, uint32_t offset);
void brw_store_register_mem64(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset);
+ struct brw_bo *bo, uint32_t reg, uint32_t offset);
void brw_load_register_imm32(struct brw_context *brw,
uint32_t reg, uint32_t imm);
void brw_load_register_imm64(struct brw_context *brw,
@@ -1345,9 +1345,9 @@ void brw_load_register_reg(struct brw_context *brw, uint32_t src,
uint32_t dest);
void brw_load_register_reg64(struct brw_context *brw, uint32_t src,
uint32_t dest);
-void brw_store_data_imm32(struct brw_context *brw, drm_bacon_bo *bo,
+void brw_store_data_imm32(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint32_t imm);
-void brw_store_data_imm64(struct brw_context *brw, drm_bacon_bo *bo,
+void brw_store_data_imm64(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint64_t imm);
/*======================================================================
@@ -1372,7 +1372,7 @@ key_debug(struct brw_context *brw, const char *name, int a, int b)
void brwInitFragProgFuncs( struct dd_function_table *functions );
void brw_get_scratch_bo(struct brw_context *brw,
- drm_bacon_bo **scratch_bo, int size);
+ struct brw_bo **scratch_bo, int size);
void brw_alloc_stage_scratch(struct brw_context *brw,
struct brw_stage_state *stage_state,
unsigned per_thread_size,
@@ -1425,12 +1425,12 @@ void brw_prepare_vertices(struct brw_context *brw);
/* brw_wm_surface_state.c */
void brw_init_surface_formats(struct brw_context *brw);
void brw_create_constant_surface(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset,
uint32_t size,
uint32_t *out_offset);
void brw_create_buffer_surface(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset,
uint32_t size,
uint32_t *out_offset);
@@ -1699,7 +1699,7 @@ void brw_fini_pipe_control(struct brw_context *brw);
void brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags);
void brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
- drm_bacon_bo *bo, uint32_t offset,
+ struct brw_bo *bo, uint32_t offset,
uint32_t imm_lower, uint32_t imm_upper);
void brw_emit_mi_flush(struct brw_context *brw);
void brw_emit_post_sync_nonzero_flush(struct brw_context *brw);
diff --git a/src/mesa/drivers/dri/i965/brw_cs.c b/src/mesa/drivers/dri/i965/brw_cs.c
index bafe181143a..cc564a012b6 100644
--- a/src/mesa/drivers/dri/i965/brw_cs.c
+++ b/src/mesa/drivers/dri/i965/brw_cs.c
@@ -104,7 +104,7 @@ brw_codegen_cs_prog(struct brw_context *brw,
if (unlikely(brw->perf_debug)) {
start_busy = (brw->batch.last_bo &&
- drm_bacon_bo_busy(brw->batch.last_bo));
+ brw_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
@@ -131,7 +131,7 @@ brw_codegen_cs_prog(struct brw_context *brw,
}
cp->compiled_once = true;
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("CS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c
index bf09915d0c3..611cb86536f 100644
--- a/src/mesa/drivers/dri/i965/brw_draw.c
+++ b/src/mesa/drivers/dri/i965/brw_draw.c
@@ -220,7 +220,7 @@ brw_emit_prim(struct brw_context *brw,
ADVANCE_BATCH();
} else if (prim->is_indirect) {
struct gl_buffer_object *indirect_buffer = brw->ctx.DrawIndirectBuffer;
- drm_bacon_bo *bo = intel_bufferobj_buffer(brw,
+ struct brw_bo *bo = intel_bufferobj_buffer(brw,
intel_buffer_object(indirect_buffer),
prim->indirect_offset, 5 * sizeof(GLuint));
@@ -291,7 +291,7 @@ brw_merge_inputs(struct brw_context *brw,
GLuint i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
- drm_bacon_bo_unreference(brw->vb.buffers[i].bo);
+ brw_bo_unreference(brw->vb.buffers[i].bo);
brw->vb.buffers[i].bo = NULL;
}
brw->vb.nr_buffers = 0;
@@ -551,13 +551,13 @@ brw_try_draw_prims(struct gl_context *ctx,
brw->draw.params.gl_basevertex = new_basevertex;
brw->draw.params.gl_baseinstance = new_baseinstance;
- drm_bacon_bo_unreference(brw->draw.draw_params_bo);
+ brw_bo_unreference(brw->draw.draw_params_bo);
if (prims[i].is_indirect) {
/* Point draw_params_bo at the indirect buffer. */
brw->draw.draw_params_bo =
intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
- drm_bacon_bo_reference(brw->draw.draw_params_bo);
+ brw_bo_reference(brw->draw.draw_params_bo);
brw->draw.draw_params_offset =
prims[i].indirect_offset + (prims[i].indexed ? 12 : 8);
} else {
@@ -575,7 +575,7 @@ brw_try_draw_prims(struct gl_context *ctx,
* the loop.
*/
brw->draw.gl_drawid = prims[i].draw_id;
- drm_bacon_bo_unreference(brw->draw.draw_id_bo);
+ brw_bo_unreference(brw->draw.draw_id_bo);
brw->draw.draw_id_bo = NULL;
if (i > 0 && vs_prog_data->uses_drawid)
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
@@ -711,7 +711,7 @@ brw_draw_destroy(struct brw_context *brw)
unsigned i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
- drm_bacon_bo_unreference(brw->vb.buffers[i].bo);
+ brw_bo_unreference(brw->vb.buffers[i].bo);
brw->vb.buffers[i].bo = NULL;
}
brw->vb.nr_buffers = 0;
@@ -721,6 +721,6 @@ brw_draw_destroy(struct brw_context *brw)
}
brw->vb.nr_enabled = 0;
- drm_bacon_bo_unreference(brw->ib.bo);
+ brw_bo_unreference(brw->ib.bo);
brw->ib.bo = NULL;
}
diff --git a/src/mesa/drivers/dri/i965/brw_draw.h b/src/mesa/drivers/dri/i965/brw_draw.h
index 8cac2aba9ed..3b999153c43 100644
--- a/src/mesa/drivers/dri/i965/brw_draw.h
+++ b/src/mesa/drivers/dri/i965/brw_draw.h
@@ -34,7 +34,7 @@ struct brw_context;
uint32_t *
brw_emit_vertex_buffer_state(struct brw_context *brw,
unsigned buffer_nr,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
unsigned start_offset,
unsigned end_offset,
unsigned stride,
diff --git a/src/mesa/drivers/dri/i965/brw_draw_upload.c b/src/mesa/drivers/dri/i965/brw_draw_upload.c
index 843b7d32b7b..14b60a9abc2 100644
--- a/src/mesa/drivers/dri/i965/brw_draw_upload.c
+++ b/src/mesa/drivers/dri/i965/brw_draw_upload.c
@@ -702,7 +702,7 @@ brw_prepare_vertices(struct brw_context *brw)
const uint32_t range = buffer_range_end[i] - buffer_range_start[i];
buffer->bo = intel_bufferobj_buffer(brw, enabled_buffer[i], start, range);
- drm_bacon_bo_reference(buffer->bo);
+ brw_bo_reference(buffer->bo);
}
/* If we need to upload all the arrays, then we can trim those arrays to
@@ -792,7 +792,7 @@ brw_prepare_shader_draw_parameters(struct brw_context *brw)
uint32_t *
brw_emit_vertex_buffer_state(struct brw_context *brw,
unsigned buffer_nr,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
unsigned start_offset,
unsigned end_offset,
unsigned stride,
@@ -1166,7 +1166,7 @@ brw_upload_indices(struct brw_context *brw)
struct gl_context *ctx = &brw->ctx;
const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
GLuint ib_size;
- drm_bacon_bo *old_bo = brw->ib.bo;
+ struct brw_bo *old_bo = brw->ib.bo;
struct gl_buffer_object *bufferobj;
GLuint offset;
GLuint ib_type_size;
@@ -1210,14 +1210,14 @@ brw_upload_indices(struct brw_context *brw)
ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
} else {
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw, intel_buffer_object(bufferobj),
offset, ib_size);
if (bo != brw->ib.bo) {
- drm_bacon_bo_unreference(brw->ib.bo);
+ brw_bo_unreference(brw->ib.bo);
brw->ib.bo = bo;
brw->ib.size = bufferobj->Size;
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
}
}
}
diff --git a/src/mesa/drivers/dri/i965/brw_gs.c b/src/mesa/drivers/dri/i965/brw_gs.c
index b0ea20a2ed4..0c04ef0dacc 100644
--- a/src/mesa/drivers/dri/i965/brw_gs.c
+++ b/src/mesa/drivers/dri/i965/brw_gs.c
@@ -124,7 +124,7 @@ brw_codegen_gs_prog(struct brw_context *brw,
st_index = brw_get_shader_time_index(brw, &gp->program, ST_GS, true);
if (unlikely(brw->perf_debug)) {
- start_busy = brw->batch.last_bo && drm_bacon_bo_busy(brw->batch.last_bo);
+ start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo);
start_time = get_time();
}
@@ -147,7 +147,7 @@ brw_codegen_gs_prog(struct brw_context *brw,
if (gp->compiled_once) {
brw_gs_debug_recompile(brw, &gp->program, key);
}
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("GS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
diff --git a/src/mesa/drivers/dri/i965/brw_object_purgeable.c b/src/mesa/drivers/dri/i965/brw_object_purgeable.c
index 40b1b9ef093..c6d4a085f0f 100644
--- a/src/mesa/drivers/dri/i965/brw_object_purgeable.c
+++ b/src/mesa/drivers/dri/i965/brw_object_purgeable.c
@@ -38,12 +38,12 @@
#include "intel_mipmap_tree.h"
static GLenum
-intel_buffer_purgeable(drm_bacon_bo *buffer)
+intel_buffer_purgeable(struct brw_bo *buffer)
{
int retained = 0;
if (buffer != NULL)
- retained = drm_bacon_bo_madvise(buffer, I915_MADV_DONTNEED);
+ retained = brw_bo_madvise(buffer, I915_MADV_DONTNEED);
return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
}
@@ -101,13 +101,13 @@ intel_render_object_purgeable(struct gl_context * ctx,
}
static int
-intel_bo_unpurgeable(drm_bacon_bo *buffer)
+intel_bo_unpurgeable(struct brw_bo *buffer)
{
int retained;
retained = 0;
if (buffer != NULL)
- retained = drm_bacon_bo_madvise(buffer, I915_MADV_WILLNEED);
+ retained = brw_bo_madvise(buffer, I915_MADV_WILLNEED);
return retained;
}
@@ -125,7 +125,7 @@ intel_buffer_object_unpurgeable(struct gl_context * ctx,
return GL_UNDEFINED_APPLE;
if (option == GL_UNDEFINED_APPLE || !intel_bo_unpurgeable(intel->buffer)) {
- drm_bacon_bo_unreference(intel->buffer);
+ brw_bo_unreference(intel->buffer);
intel->buffer = NULL;
return GL_UNDEFINED_APPLE;
}
diff --git a/src/mesa/drivers/dri/i965/brw_performance_query.c b/src/mesa/drivers/dri/i965/brw_performance_query.c
index a0f4d9b2903..fa63a3fe800 100644
--- a/src/mesa/drivers/dri/i965/brw_performance_query.c
+++ b/src/mesa/drivers/dri/i965/brw_performance_query.c
@@ -224,7 +224,7 @@ struct brw_perf_query_object
/**
* BO containing OA counter snapshots at query Begin/End time.
*/
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/**
* The MI_REPORT_PERF_COUNT command lets us specify a unique
@@ -264,7 +264,7 @@ struct brw_perf_query_object
* BO containing starting and ending snapshots for the
* statistics counters.
*/
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
} pipeline_stats;
};
};
@@ -476,7 +476,7 @@ snapshot_statistics_registers(struct brw_context *brw,
*/
static void
emit_mi_report_perf_count(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset_in_bytes,
uint32_t report_id)
{
@@ -713,7 +713,7 @@ accumulate_oa_reports(struct brw_context *brw,
if (!read_oa_samples(brw))
goto error;
- drm_bacon_bo_map(obj->oa.bo, false);
+ brw_bo_map(obj->oa.bo, false);
query_buffer = obj->oa.bo->virtual;
start = last = query_buffer;
@@ -793,7 +793,7 @@ end:
DBG("Marking %d accumulated - results gathered\n", o->Id);
- drm_bacon_bo_unmap(obj->oa.bo);
+ brw_bo_unmap(obj->oa.bo);
obj->oa.results_accumulated = true;
drop_from_unaccumulated_query_list(brw, obj);
dec_n_oa_users(brw);
@@ -802,7 +802,7 @@ end:
error:
- drm_bacon_bo_unmap(obj->oa.bo);
+ brw_bo_unmap(obj->oa.bo);
discard_all_queries(brw);
}
@@ -984,18 +984,18 @@ brw_begin_perf_query(struct gl_context *ctx,
}
if (obj->oa.bo) {
- drm_bacon_bo_unreference(obj->oa.bo);
+ brw_bo_unreference(obj->oa.bo);
obj->oa.bo = NULL;
}
obj->oa.bo =
- drm_bacon_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
- MI_RPC_BO_SIZE, 64);
+ brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
+ MI_RPC_BO_SIZE, 64);
#ifdef DEBUG
/* Pre-filling the BO helps debug whether writes landed. */
- drm_bacon_bo_map(obj->oa.bo, true);
+ brw_bo_map(obj->oa.bo, true);
memset((char *) obj->oa.bo->virtual, 0x80, MI_RPC_BO_SIZE);
- drm_bacon_bo_unmap(obj->oa.bo);
+ brw_bo_unmap(obj->oa.bo);
#endif
obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
@@ -1031,12 +1031,12 @@ brw_begin_perf_query(struct gl_context *ctx,
case PIPELINE_STATS:
if (obj->pipeline_stats.bo) {
- drm_bacon_bo_unreference(obj->pipeline_stats.bo);
+ brw_bo_unreference(obj->pipeline_stats.bo);
obj->pipeline_stats.bo = NULL;
}
obj->pipeline_stats.bo =
- drm_bacon_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
+ brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
STATS_BO_SIZE, 64);
/* Take starting snapshots. */
@@ -1108,7 +1108,7 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
{
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *obj = brw_perf_query(o);
- drm_bacon_bo *bo = NULL;
+ struct brw_bo *bo = NULL;
assert(!o->Ready);
@@ -1132,11 +1132,11 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
intel_batchbuffer_flush(brw);
if (unlikely(brw->perf_debug)) {
- if (drm_bacon_bo_busy(bo))
+ if (brw_bo_busy(bo))
perf_debug("Stalling GPU waiting for a performance query object.\n");
}
- drm_bacon_bo_wait_rendering(bo);
+ brw_bo_wait_rendering(bo);
}
static bool
@@ -1154,12 +1154,12 @@ brw_is_perf_query_ready(struct gl_context *ctx,
return (obj->oa.results_accumulated ||
(obj->oa.bo &&
!brw_batch_references(&brw->batch, obj->oa.bo) &&
- !drm_bacon_bo_busy(obj->oa.bo)));
+ !brw_bo_busy(obj->oa.bo)));
case PIPELINE_STATS:
return (obj->pipeline_stats.bo &&
!brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
- !drm_bacon_bo_busy(obj->pipeline_stats.bo));
+ !brw_bo_busy(obj->pipeline_stats.bo));
}
unreachable("missing ready check for unknown query kind");
@@ -1220,7 +1220,7 @@ get_pipeline_stats_data(struct brw_context *brw,
int n_counters = obj->query->n_counters;
uint8_t *p = data;
- drm_bacon_bo_map(obj->pipeline_stats.bo, false);
+ brw_bo_map(obj->pipeline_stats.bo, false);
uint64_t *start = obj->pipeline_stats.bo->virtual;
uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
@@ -1238,7 +1238,7 @@ get_pipeline_stats_data(struct brw_context *brw,
p += 8;
}
- drm_bacon_bo_unmap(obj->pipeline_stats.bo);
+ brw_bo_unmap(obj->pipeline_stats.bo);
return p - data;
}
@@ -1329,7 +1329,7 @@ brw_delete_perf_query(struct gl_context *ctx,
dec_n_oa_users(brw);
}
- drm_bacon_bo_unreference(obj->oa.bo);
+ brw_bo_unreference(obj->oa.bo);
obj->oa.bo = NULL;
}
@@ -1338,7 +1338,7 @@ brw_delete_perf_query(struct gl_context *ctx,
case PIPELINE_STATS:
if (obj->pipeline_stats.bo) {
- drm_bacon_bo_unreference(obj->pipeline_stats.bo);
+ brw_bo_unreference(obj->pipeline_stats.bo);
obj->pipeline_stats.bo = NULL;
}
break;
diff --git a/src/mesa/drivers/dri/i965/brw_pipe_control.c b/src/mesa/drivers/dri/i965/brw_pipe_control.c
index 090d98dcc5b..f4ede2deb3f 100644
--- a/src/mesa/drivers/dri/i965/brw_pipe_control.c
+++ b/src/mesa/drivers/dri/i965/brw_pipe_control.c
@@ -177,7 +177,7 @@ brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags)
*/
void
brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
- drm_bacon_bo *bo, uint32_t offset,
+ struct brw_bo *bo, uint32_t offset,
uint32_t imm_lower, uint32_t imm_upper)
{
if (brw->gen >= 8) {
@@ -372,9 +372,9 @@ brw_init_pipe_control(struct brw_context *brw,
* the gen6 workaround because it involves actually writing to
* the buffer, and the kernel doesn't let us write to the batch.
*/
- brw->workaround_bo = drm_bacon_bo_alloc(brw->bufmgr,
- "pipe_control workaround",
- 4096, 4096);
+ brw->workaround_bo = brw_bo_alloc(brw->bufmgr,
+ "pipe_control workaround",
+ 4096, 4096);
if (brw->workaround_bo == NULL)
return -ENOMEM;
@@ -386,5 +386,5 @@ brw_init_pipe_control(struct brw_context *brw,
void
brw_fini_pipe_control(struct brw_context *brw)
{
- drm_bacon_bo_unreference(brw->workaround_bo);
+ brw_bo_unreference(brw->workaround_bo);
}
diff --git a/src/mesa/drivers/dri/i965/brw_program.c b/src/mesa/drivers/dri/i965/brw_program.c
index 5a01a8cb327..ec9eb95f098 100644
--- a/src/mesa/drivers/dri/i965/brw_program.c
+++ b/src/mesa/drivers/dri/i965/brw_program.c
@@ -343,17 +343,17 @@ brw_blend_barrier(struct gl_context *ctx)
void
brw_get_scratch_bo(struct brw_context *brw,
- drm_bacon_bo **scratch_bo, int size)
+ struct brw_bo **scratch_bo, int size)
{
- drm_bacon_bo *old_bo = *scratch_bo;
+ struct brw_bo *old_bo = *scratch_bo;
if (old_bo && old_bo->size < size) {
- drm_bacon_bo_unreference(old_bo);
+ brw_bo_unreference(old_bo);
old_bo = NULL;
}
if (!old_bo) {
- *scratch_bo = drm_bacon_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
+ *scratch_bo = brw_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
}
}
@@ -371,11 +371,11 @@ brw_alloc_stage_scratch(struct brw_context *brw,
stage_state->per_thread_scratch = per_thread_size;
if (stage_state->scratch_bo)
- drm_bacon_bo_unreference(stage_state->scratch_bo);
+ brw_bo_unreference(stage_state->scratch_bo);
stage_state->scratch_bo =
- drm_bacon_bo_alloc(brw->bufmgr, "shader scratch space",
- per_thread_size * thread_count, 4096);
+ brw_bo_alloc(brw->bufmgr, "shader scratch space",
+ per_thread_size * thread_count, 4096);
}
}
@@ -404,8 +404,8 @@ brw_init_shader_time(struct brw_context *brw)
{
const int max_entries = 2048;
brw->shader_time.bo =
- drm_bacon_bo_alloc(brw->bufmgr, "shader time",
- max_entries * BRW_SHADER_TIME_STRIDE * 3, 4096);
+ brw_bo_alloc(brw->bufmgr, "shader time",
+ max_entries * BRW_SHADER_TIME_STRIDE * 3, 4096);
brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
@@ -580,7 +580,7 @@ brw_collect_shader_time(struct brw_context *brw)
* delaying reading the reports, but it doesn't look like it's a big
* overhead compared to the cost of tracking the time in the first place.
*/
- drm_bacon_bo_map(brw->shader_time.bo, true);
+ brw_bo_map(brw->shader_time.bo, true);
void *bo_map = brw->shader_time.bo->virtual;
for (int i = 0; i < brw->shader_time.num_entries; i++) {
@@ -594,7 +594,7 @@ brw_collect_shader_time(struct brw_context *brw)
/* Zero the BO out to clear it out for our next collection.
*/
memset(bo_map, 0, brw->shader_time.bo->size);
- drm_bacon_bo_unmap(brw->shader_time.bo);
+ brw_bo_unmap(brw->shader_time.bo);
}
void
@@ -643,7 +643,7 @@ brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
void
brw_destroy_shader_time(struct brw_context *brw)
{
- drm_bacon_bo_unreference(brw->shader_time.bo);
+ brw_bo_unreference(brw->shader_time.bo);
brw->shader_time.bo = NULL;
}
diff --git a/src/mesa/drivers/dri/i965/brw_program_cache.c b/src/mesa/drivers/dri/i965/brw_program_cache.c
index a6f4bae8c27..867e0e53458 100644
--- a/src/mesa/drivers/dri/i965/brw_program_cache.c
+++ b/src/mesa/drivers/dri/i965/brw_program_cache.c
@@ -213,27 +213,27 @@ static void
brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
{
struct brw_context *brw = cache->brw;
- drm_bacon_bo *new_bo;
+ struct brw_bo *new_bo;
- new_bo = drm_bacon_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
+ new_bo = brw_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
if (brw->has_llc)
- drm_bacon_gem_bo_map_unsynchronized(new_bo);
+ brw_bo_map_unsynchronized(new_bo);
/* Copy any existing data that needs to be saved. */
if (cache->next_offset != 0) {
if (brw->has_llc) {
memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset);
} else {
- drm_bacon_bo_map(cache->bo, false);
- drm_bacon_bo_subdata(new_bo, 0, cache->next_offset,
+ brw_bo_map(cache->bo, false);
+ brw_bo_subdata(new_bo, 0, cache->next_offset,
cache->bo->virtual);
- drm_bacon_bo_unmap(cache->bo);
+ brw_bo_unmap(cache->bo);
}
}
if (brw->has_llc)
- drm_bacon_bo_unmap(cache->bo);
- drm_bacon_bo_unreference(cache->bo);
+ brw_bo_unmap(cache->bo);
+ brw_bo_unreference(cache->bo);
cache->bo = new_bo;
cache->bo_used_by_gpu = false;
@@ -264,10 +264,10 @@ brw_lookup_prog(const struct brw_cache *cache,
continue;
if (!brw->has_llc)
- drm_bacon_bo_map(cache->bo, false);
+ brw_bo_map(cache->bo, false);
ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
if (!brw->has_llc)
- drm_bacon_bo_unmap(cache->bo);
+ brw_bo_unmap(cache->bo);
if (ret)
continue;
@@ -369,7 +369,7 @@ brw_upload_cache(struct brw_cache *cache,
if (brw->has_llc) {
memcpy((char *)cache->bo->virtual + item->offset, data, data_size);
} else {
- drm_bacon_bo_subdata(cache->bo, item->offset, data_size, data);
+ brw_bo_subdata(cache->bo, item->offset, data_size, data);
}
}
@@ -406,9 +406,9 @@ brw_init_caches(struct brw_context *brw)
cache->items =
calloc(cache->size, sizeof(struct brw_cache_item *));
- cache->bo = drm_bacon_bo_alloc(brw->bufmgr, "program cache", 4096, 64);
+ cache->bo = brw_bo_alloc(brw->bufmgr, "program cache", 4096, 64);
if (brw->has_llc)
- drm_bacon_gem_bo_map_unsynchronized(cache->bo);
+ brw_bo_map_unsynchronized(cache->bo);
}
static void
@@ -486,8 +486,8 @@ brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
DBG("%s\n", __func__);
if (brw->has_llc)
- drm_bacon_bo_unmap(cache->bo);
- drm_bacon_bo_unreference(cache->bo);
+ brw_bo_unmap(cache->bo);
+ brw_bo_unreference(cache->bo);
cache->bo = NULL;
brw_clear_cache(brw, cache);
free(cache->items);
@@ -536,7 +536,7 @@ brw_print_program_cache(struct brw_context *brw)
struct brw_cache_item *item;
if (!brw->has_llc)
- drm_bacon_bo_map(cache->bo, false);
+ brw_bo_map(cache->bo, false);
for (unsigned i = 0; i < cache->size; i++) {
for (item = cache->items[i]; item; item = item->next) {
@@ -547,5 +547,5 @@ brw_print_program_cache(struct brw_context *brw)
}
if (!brw->has_llc)
- drm_bacon_bo_unmap(cache->bo);
+ brw_bo_unmap(cache->bo);
}
diff --git a/src/mesa/drivers/dri/i965/brw_queryobj.c b/src/mesa/drivers/dri/i965/brw_queryobj.c
index ff6c0c87c9d..68696cc63ed 100644
--- a/src/mesa/drivers/dri/i965/brw_queryobj.c
+++ b/src/mesa/drivers/dri/i965/brw_queryobj.c
@@ -82,7 +82,7 @@ brw_raw_timestamp_delta(struct brw_context *brw, uint64_t time0, uint64_t time1)
* Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
*/
void
-brw_write_timestamp(struct brw_context *brw, drm_bacon_bo *query_bo, int idx)
+brw_write_timestamp(struct brw_context *brw, struct brw_bo *query_bo, int idx)
{
if (brw->gen == 6) {
/* Emit Sandybridge workaround flush: */
@@ -104,7 +104,7 @@ brw_write_timestamp(struct brw_context *brw, drm_bacon_bo *query_bo, int idx)
* Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
*/
void
-brw_write_depth_count(struct brw_context *brw, drm_bacon_bo *query_bo, int idx)
+brw_write_depth_count(struct brw_context *brw, struct brw_bo *query_bo, int idx)
{
uint32_t flags = PIPE_CONTROL_WRITE_DEPTH_COUNT | PIPE_CONTROL_DEPTH_STALL;
@@ -141,12 +141,12 @@ brw_queryobj_get_results(struct gl_context *ctx,
intel_batchbuffer_flush(brw);
if (unlikely(brw->perf_debug)) {
- if (drm_bacon_bo_busy(query->bo)) {
+ if (brw_bo_busy(query->bo)) {
perf_debug("Stalling on the GPU waiting for a query object.\n");
}
}
- drm_bacon_bo_map(query->bo, false);
+ brw_bo_map(query->bo, false);
results = query->bo->virtual;
switch (query->Base.Target) {
case GL_TIME_ELAPSED_EXT:
@@ -199,12 +199,12 @@ brw_queryobj_get_results(struct gl_context *ctx,
default:
unreachable("Unrecognized query target in brw_queryobj_get_results()");
}
- drm_bacon_bo_unmap(query->bo);
+ brw_bo_unmap(query->bo);
/* Now that we've processed the data stored in the query's buffer object,
* we can release it.
*/
- drm_bacon_bo_unreference(query->bo);
+ brw_bo_unreference(query->bo);
query->bo = NULL;
}
@@ -236,7 +236,7 @@ brw_delete_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_query_object *query = (struct brw_query_object *)q;
- drm_bacon_bo_unreference(query->bo);
+ brw_bo_unreference(query->bo);
free(query);
}
@@ -275,8 +275,8 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
* obtain the time elapsed. Notably, this includes time elapsed while
* the system was doing other work, such as running other applications.
*/
- drm_bacon_bo_unreference(query->bo);
- query->bo = drm_bacon_bo_alloc(brw->bufmgr, "timer query", 4096, 4096);
+ brw_bo_unreference(query->bo);
+ query->bo = brw_bo_alloc(brw->bufmgr, "timer query", 4096, 4096);
brw_write_timestamp(brw, query->bo, 0);
break;
@@ -290,7 +290,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
* Since we're starting a new query, we need to be sure to throw away
* any previous occlusion query results.
*/
- drm_bacon_bo_unreference(query->bo);
+ brw_bo_unreference(query->bo);
query->bo = NULL;
query->last_index = -1;
@@ -405,7 +405,7 @@ static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
if (query->bo && brw_batch_references(&brw->batch, query->bo))
intel_batchbuffer_flush(brw);
- if (query->bo == NULL || !drm_bacon_bo_busy(query->bo)) {
+ if (query->bo == NULL || !brw_bo_busy(query->bo)) {
brw_queryobj_get_results(ctx, query);
query->Base.Ready = true;
}
@@ -434,7 +434,7 @@ ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query)
brw_queryobj_get_results(ctx, query);
}
- query->bo = drm_bacon_bo_alloc(brw->bufmgr, "query", 4096, 1);
+ query->bo = brw_bo_alloc(brw->bufmgr, "query", 4096, 1);
query->last_index = 0;
}
}
@@ -519,8 +519,8 @@ brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
assert(q->Target == GL_TIMESTAMP);
- drm_bacon_bo_unreference(query->bo);
- query->bo = drm_bacon_bo_alloc(brw->bufmgr, "timestamp query", 4096, 4096);
+ brw_bo_unreference(query->bo);
+ query->bo = brw_bo_alloc(brw->bufmgr, "timestamp query", 4096, 4096);
brw_write_timestamp(brw, query->bo, 0);
query->flushed = false;
diff --git a/src/mesa/drivers/dri/i965/brw_state.h b/src/mesa/drivers/dri/i965/brw_state.h
index 5b5b35e7104..ec79a4e041a 100644
--- a/src/mesa/drivers/dri/i965/brw_state.h
+++ b/src/mesa/drivers/dri/i965/brw_state.h
@@ -274,7 +274,7 @@ int brw_get_texture_swizzle(const struct gl_context *ctx,
void brw_emit_buffer_surface_state(struct brw_context *brw,
uint32_t *out_offset,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
unsigned buffer_offset,
unsigned surface_format,
unsigned buffer_size,
diff --git a/src/mesa/drivers/dri/i965/brw_sync.c b/src/mesa/drivers/dri/i965/brw_sync.c
index 83a6b3d7c09..5b78503b34f 100644
--- a/src/mesa/drivers/dri/i965/brw_sync.c
+++ b/src/mesa/drivers/dri/i965/brw_sync.c
@@ -57,7 +57,7 @@ struct brw_fence {
} type;
union {
- drm_bacon_bo *batch_bo;
+ struct brw_bo *batch_bo;
/* This struct owns the fd. */
int sync_fd;
@@ -96,7 +96,7 @@ brw_fence_finish(struct brw_fence *fence)
switch (fence->type) {
case BRW_FENCE_TYPE_BO_WAIT:
if (fence->batch_bo)
- drm_bacon_bo_unreference(fence->batch_bo);
+ brw_bo_unreference(fence->batch_bo);
break;
case BRW_FENCE_TYPE_SYNC_FD:
if (fence->sync_fd != -1)
@@ -118,10 +118,10 @@ brw_fence_insert_locked(struct brw_context *brw, struct brw_fence *fence)
assert(!fence->signalled);
fence->batch_bo = brw->batch.bo;
- drm_bacon_bo_reference(fence->batch_bo);
+ brw_bo_reference(fence->batch_bo);
if (intel_batchbuffer_flush(brw) < 0) {
- drm_bacon_bo_unreference(fence->batch_bo);
+ brw_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
return false;
}
@@ -179,10 +179,10 @@ brw_fence_has_completed_locked(struct brw_fence *fence)
return false;
}
- if (drm_bacon_bo_busy(fence->batch_bo))
+ if (brw_bo_busy(fence->batch_bo))
return false;
- drm_bacon_bo_unreference(fence->batch_bo);
+ brw_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
fence->signalled = true;
@@ -238,11 +238,11 @@ brw_fence_client_wait_locked(struct brw_context *brw, struct brw_fence *fence,
if (timeout > INT64_MAX)
timeout = INT64_MAX;
- if (drm_bacon_gem_bo_wait(fence->batch_bo, timeout) != 0)
+ if (brw_bo_wait(fence->batch_bo, timeout) != 0)
return false;
fence->signalled = true;
- drm_bacon_bo_unreference(fence->batch_bo);
+ brw_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
return true;
diff --git a/src/mesa/drivers/dri/i965/brw_tcs.c b/src/mesa/drivers/dri/i965/brw_tcs.c
index 00fad98f47f..3cc6cdbf3c0 100644
--- a/src/mesa/drivers/dri/i965/brw_tcs.c
+++ b/src/mesa/drivers/dri/i965/brw_tcs.c
@@ -237,7 +237,7 @@ brw_codegen_tcs_prog(struct brw_context *brw, struct brw_program *tcp,
st_index = brw_get_shader_time_index(brw, &tep->program, ST_TCS, true);
if (unlikely(brw->perf_debug)) {
- start_busy = brw->batch.last_bo && drm_bacon_bo_busy(brw->batch.last_bo);
+ start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo);
start_time = get_time();
}
@@ -267,7 +267,7 @@ brw_codegen_tcs_prog(struct brw_context *brw, struct brw_program *tcp,
tcp->compiled_once = true;
}
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("TCS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
diff --git a/src/mesa/drivers/dri/i965/brw_tes.c b/src/mesa/drivers/dri/i965/brw_tes.c
index 88205901b84..449f946d854 100644
--- a/src/mesa/drivers/dri/i965/brw_tes.c
+++ b/src/mesa/drivers/dri/i965/brw_tes.c
@@ -108,7 +108,7 @@ brw_codegen_tes_prog(struct brw_context *brw,
st_index = brw_get_shader_time_index(brw, &tep->program, ST_TES, true);
if (unlikely(brw->perf_debug)) {
- start_busy = brw->batch.last_bo && drm_bacon_bo_busy(brw->batch.last_bo);
+ start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo);
start_time = get_time();
}
@@ -137,7 +137,7 @@ brw_codegen_tes_prog(struct brw_context *brw,
if (tep->compiled_once) {
brw_tes_debug_recompile(brw, &tep->program, key);
}
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("TES compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
diff --git a/src/mesa/drivers/dri/i965/brw_vs.c b/src/mesa/drivers/dri/i965/brw_vs.c
index 4212ccbdc67..74b07cb3ccc 100644
--- a/src/mesa/drivers/dri/i965/brw_vs.c
+++ b/src/mesa/drivers/dri/i965/brw_vs.c
@@ -227,7 +227,7 @@ brw_codegen_vs_prog(struct brw_context *brw,
if (unlikely(brw->perf_debug)) {
start_busy = (brw->batch.last_bo &&
- drm_bacon_bo_busy(brw->batch.last_bo));
+ brw_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
@@ -266,7 +266,7 @@ brw_codegen_vs_prog(struct brw_context *brw,
if (vp->compiled_once) {
brw_vs_debug_recompile(brw, &vp->program, key);
}
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("VS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
diff --git a/src/mesa/drivers/dri/i965/brw_vs_surface_state.c b/src/mesa/drivers/dri/i965/brw_vs_surface_state.c
index 521c5324242..016da7456af 100644
--- a/src/mesa/drivers/dri/i965/brw_vs_surface_state.c
+++ b/src/mesa/drivers/dri/i965/brw_vs_surface_state.c
@@ -74,7 +74,7 @@ brw_upload_pull_constants(struct brw_context *brw,
/* BRW_NEW_*_PROG_DATA | _NEW_PROGRAM_CONSTANTS */
uint32_t size = prog_data->nr_pull_params * 4;
- drm_bacon_bo *const_bo = NULL;
+ struct brw_bo *const_bo = NULL;
uint32_t const_offset;
gl_constant_value *constants = intel_upload_space(brw, size, 64,
&const_bo, &const_offset);
@@ -95,7 +95,7 @@ brw_upload_pull_constants(struct brw_context *brw,
brw_create_constant_surface(brw, const_bo, const_offset, size,
&stage_state->surf_offset[surf_index]);
- drm_bacon_bo_unreference(const_bo);
+ brw_bo_unreference(const_bo);
brw->ctx.NewDriverState |= brw_new_constbuf;
}
diff --git a/src/mesa/drivers/dri/i965/brw_wm.c b/src/mesa/drivers/dri/i965/brw_wm.c
index bedfec13f23..59d503e746b 100644
--- a/src/mesa/drivers/dri/i965/brw_wm.c
+++ b/src/mesa/drivers/dri/i965/brw_wm.c
@@ -172,7 +172,7 @@ brw_codegen_wm_prog(struct brw_context *brw,
if (unlikely(brw->perf_debug)) {
start_busy = (brw->batch.last_bo &&
- drm_bacon_bo_busy(brw->batch.last_bo));
+ brw_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
@@ -208,7 +208,7 @@ brw_codegen_wm_prog(struct brw_context *brw,
brw_wm_debug_recompile(brw, &fp->program, key);
fp->compiled_once = true;
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("FS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
diff --git a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
index c5824844bdc..49383c7463b 100644
--- a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
+++ b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
@@ -133,7 +133,7 @@ brw_emit_surface_state(struct brw_context *brw,
union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
- drm_bacon_bo *aux_bo;
+ struct brw_bo *aux_bo;
struct isl_surf *aux_surf = NULL, aux_surf_s;
uint64_t aux_offset = 0;
enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
@@ -645,7 +645,7 @@ brw_update_texture_surface(struct gl_context *ctx,
void
brw_emit_buffer_surface_state(struct brw_context *brw,
uint32_t *out_offset,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
unsigned buffer_offset,
unsigned surface_format,
unsigned buffer_size,
@@ -682,7 +682,7 @@ brw_update_buffer_texture_surface(struct gl_context *ctx,
struct intel_buffer_object *intel_obj =
intel_buffer_object(tObj->BufferObject);
uint32_t size = tObj->BufferSize;
- drm_bacon_bo *bo = NULL;
+ struct brw_bo *bo = NULL;
mesa_format format = tObj->_BufferObjectFormat;
uint32_t brw_format = brw_isl_format_for_mesa_format(format);
int texel_size = _mesa_get_format_bytes(format);
@@ -729,7 +729,7 @@ brw_update_buffer_texture_surface(struct gl_context *ctx,
*/
void
brw_create_constant_surface(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset,
uint32_t size,
uint32_t *out_offset)
@@ -746,7 +746,7 @@ brw_create_constant_surface(struct brw_context *brw,
*/
void
brw_create_buffer_surface(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset,
uint32_t size,
uint32_t *out_offset)
@@ -775,7 +775,7 @@ brw_update_sol_surface(struct brw_context *brw,
{
struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
uint32_t offset_bytes = 4 * offset_dwords;
- drm_bacon_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
+ struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
offset_bytes,
buffer_obj->Size - offset_bytes);
uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
@@ -909,7 +909,7 @@ brw_emit_null_surface_state(struct brw_context *brw,
* - Surface Format must be R8G8B8A8_UNORM.
*/
unsigned surface_type = BRW_SURFACE_NULL;
- drm_bacon_bo *bo = NULL;
+ struct brw_bo *bo = NULL;
unsigned pitch_minus_1 = 0;
uint32_t multisampling_state = 0;
uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
@@ -1399,7 +1399,7 @@ brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
if (!binding->AutomaticSize)
size = MIN2(size, binding->Size);
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw, intel_bo,
binding->Offset,
size);
@@ -1424,7 +1424,7 @@ brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
if (!binding->AutomaticSize)
size = MIN2(size, binding->Size);
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw, intel_bo,
binding->Offset,
size);
@@ -1499,7 +1499,7 @@ brw_upload_abo_surfaces(struct brw_context *brw,
&ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
struct intel_buffer_object *intel_bo =
intel_buffer_object(binding->BufferObject);
- drm_bacon_bo *bo = intel_bufferobj_buffer(
+ struct brw_bo *bo = intel_bufferobj_buffer(
brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
@@ -1854,7 +1854,7 @@ brw_upload_cs_work_groups_surface(struct brw_context *brw)
const unsigned surf_idx =
cs_prog_data->binding_table.work_groups_start;
uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t bo_offset;
if (brw->compute.num_work_groups_bo == NULL) {
diff --git a/src/mesa/drivers/dri/i965/gen6_queryobj.c b/src/mesa/drivers/dri/i965/gen6_queryobj.c
index d8edc079174..7295461c371 100644
--- a/src/mesa/drivers/dri/i965/gen6_queryobj.c
+++ b/src/mesa/drivers/dri/i965/gen6_queryobj.c
@@ -69,7 +69,7 @@ set_query_availability(struct brw_context *brw, struct brw_query_object *query,
static void
write_primitives_generated(struct brw_context *brw,
- drm_bacon_bo *query_bo, int stream, int idx)
+ struct brw_bo *query_bo, int stream, int idx)
{
brw_emit_mi_flush(brw);
@@ -85,7 +85,7 @@ write_primitives_generated(struct brw_context *brw,
static void
write_xfb_primitives_written(struct brw_context *brw,
- drm_bacon_bo *bo, int stream, int idx)
+ struct brw_bo *bo, int stream, int idx)
{
brw_emit_mi_flush(brw);
@@ -100,7 +100,7 @@ write_xfb_primitives_written(struct brw_context *brw,
static void
write_xfb_overflow_streams(struct gl_context *ctx,
- drm_bacon_bo *bo, int stream, int count,
+ struct brw_bo *bo, int stream, int count,
int idx)
{
struct brw_context *brw = brw_context(ctx);
@@ -156,7 +156,7 @@ pipeline_target_to_index(int target)
}
static void
-emit_pipeline_stat(struct brw_context *brw, drm_bacon_bo *bo,
+emit_pipeline_stat(struct brw_context *brw, struct brw_bo *bo,
int stream, int target, int idx)
{
/* One source of confusion is the tessellation shader statistics. The
@@ -212,7 +212,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
if (query->bo == NULL)
return;
- drm_bacon_bo_map(query->bo, false);
+ brw_bo_map(query->bo, false);
uint64_t *results = query->bo->virtual;
switch (query->Base.Target) {
case GL_TIME_ELAPSED:
@@ -288,12 +288,12 @@ gen6_queryobj_get_results(struct gl_context *ctx,
default:
unreachable("Unrecognized query target in brw_queryobj_get_results()");
}
- drm_bacon_bo_unmap(query->bo);
+ brw_bo_unmap(query->bo);
/* Now that we've processed the data stored in the query's buffer object,
* we can release it.
*/
- drm_bacon_bo_unreference(query->bo);
+ brw_bo_unreference(query->bo);
query->bo = NULL;
query->Base.Ready = true;
@@ -312,8 +312,8 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
struct brw_query_object *query = (struct brw_query_object *)q;
/* Since we're starting a new query, we need to throw away old results. */
- drm_bacon_bo_unreference(query->bo);
- query->bo = drm_bacon_bo_alloc(brw->bufmgr, "query results", 4096, 4096);
+ brw_bo_unreference(query->bo);
+ query->bo = brw_bo_alloc(brw->bufmgr, "query results", 4096, 4096);
/* For ARB_query_buffer_object: The result is not available */
set_query_availability(brw, query, false);
@@ -519,7 +519,7 @@ static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
*/
flush_batch_if_needed(brw, query);
- if (!drm_bacon_bo_busy(query->bo)) {
+ if (!brw_bo_busy(query->bo)) {
gen6_queryobj_get_results(ctx, query);
}
}
diff --git a/src/mesa/drivers/dri/i965/gen6_sol.c b/src/mesa/drivers/dri/i965/gen6_sol.c
index f7b53b20501..d69248ff20c 100644
--- a/src/mesa/drivers/dri/i965/gen6_sol.c
+++ b/src/mesa/drivers/dri/i965/gen6_sol.c
@@ -194,9 +194,9 @@ brw_new_transform_feedback(struct gl_context *ctx, GLuint name)
_mesa_init_transform_feedback_object(&brw_obj->base, name);
brw_obj->offset_bo =
- drm_bacon_bo_alloc(brw->bufmgr, "transform feedback offsets", 16, 64);
+ brw_bo_alloc(brw->bufmgr, "transform feedback offsets", 16, 64);
brw_obj->prim_count_bo =
- drm_bacon_bo_alloc(brw->bufmgr, "xfb primitive counts", 4096, 64);
+ brw_bo_alloc(brw->bufmgr, "xfb primitive counts", 4096, 64);
return &brw_obj->base;
}
@@ -212,8 +212,8 @@ brw_delete_transform_feedback(struct gl_context *ctx,
_mesa_reference_buffer_object(ctx, &obj->Buffers[i], NULL);
}
- drm_bacon_bo_unreference(brw_obj->offset_bo);
- drm_bacon_bo_unreference(brw_obj->prim_count_bo);
+ brw_bo_unreference(brw_obj->offset_bo);
+ brw_bo_unreference(brw_obj->prim_count_bo);
free(brw_obj);
}
@@ -244,10 +244,10 @@ tally_prims_generated(struct brw_context *brw,
if (brw_batch_references(&brw->batch, obj->prim_count_bo))
intel_batchbuffer_flush(brw);
- if (unlikely(brw->perf_debug && drm_bacon_bo_busy(obj->prim_count_bo)))
+ if (unlikely(brw->perf_debug && brw_bo_busy(obj->prim_count_bo)))
perf_debug("Stalling for # of transform feedback primitives written.\n");
- drm_bacon_bo_map(obj->prim_count_bo, false);
+ brw_bo_map(obj->prim_count_bo, false);
uint64_t *prim_counts = obj->prim_count_bo->virtual;
assert(obj->prim_count_buffer_index % (2 * streams) == 0);
@@ -260,7 +260,7 @@ tally_prims_generated(struct brw_context *brw,
prim_counts += 2 * streams; /* move to the next pair */
}
- drm_bacon_bo_unmap(obj->prim_count_bo);
+ brw_bo_unmap(obj->prim_count_bo);
/* We've already gathered up the old data; we can safely overwrite it now. */
obj->prim_count_buffer_index = 0;
diff --git a/src/mesa/drivers/dri/i965/gen7_sol_state.c b/src/mesa/drivers/dri/i965/gen7_sol_state.c
index 9c8e1a059c6..f1bd19c24f0 100644
--- a/src/mesa/drivers/dri/i965/gen7_sol_state.c
+++ b/src/mesa/drivers/dri/i965/gen7_sol_state.c
@@ -52,7 +52,7 @@ upload_3dstate_so_buffers(struct brw_context *brw)
for (i = 0; i < 4; i++) {
struct intel_buffer_object *bufferobj =
intel_buffer_object(xfb_obj->Buffers[i]);
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t start, end;
uint32_t stride;
diff --git a/src/mesa/drivers/dri/i965/gen8_sol_state.c b/src/mesa/drivers/dri/i965/gen8_sol_state.c
index b161463bac7..6866539c3b2 100644
--- a/src/mesa/drivers/dri/i965/gen8_sol_state.c
+++ b/src/mesa/drivers/dri/i965/gen8_sol_state.c
@@ -70,7 +70,7 @@ gen8_upload_3dstate_so_buffers(struct brw_context *brw)
uint32_t start = xfb_obj->Offset[i];
assert(start % 4 == 0);
uint32_t end = ALIGN(start + xfb_obj->Size[i], 4);
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw, bufferobj, start, end - start);
assert(end <= bo->size);
diff --git a/src/mesa/drivers/dri/i965/genX_blorp_exec.c b/src/mesa/drivers/dri/i965/genX_blorp_exec.c
index 5c99841568e..3931b8ceee0 100644
--- a/src/mesa/drivers/dri/i965/genX_blorp_exec.c
+++ b/src/mesa/drivers/dri/i965/genX_blorp_exec.c
@@ -67,7 +67,7 @@ blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
{
assert(batch->blorp->driver_ctx == batch->driver_batch);
struct brw_context *brw = batch->driver_batch;
- drm_bacon_bo *bo = address.buffer;
+ struct brw_bo *bo = address.buffer;
brw_emit_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
address.read_domains, address.write_domain);
@@ -199,7 +199,7 @@ genX(blorp_exec)(struct blorp_batch *batch,
retry:
intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
intel_batchbuffer_save_state(brw);
- drm_bacon_bo *saved_bo = brw->batch.bo;
+ struct brw_bo *saved_bo = brw->batch.bo;
uint32_t saved_used = USED_BATCH(brw->batch);
uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
diff --git a/src/mesa/drivers/dri/i965/hsw_queryobj.c b/src/mesa/drivers/dri/i965/hsw_queryobj.c
index dd51c08c64e..b81ab3b6f88 100644
--- a/src/mesa/drivers/dri/i965/hsw_queryobj.c
+++ b/src/mesa/drivers/dri/i965/hsw_queryobj.c
@@ -393,7 +393,7 @@ hsw_result_to_gpr0(struct gl_context *ctx, struct brw_query_object *query,
* Store immediate data into the user buffer using the requested size.
*/
static void
-store_query_result_imm(struct brw_context *brw, drm_bacon_bo *bo,
+store_query_result_imm(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, GLenum ptype, uint64_t imm)
{
switch (ptype) {
@@ -411,7 +411,7 @@ store_query_result_imm(struct brw_context *brw, drm_bacon_bo *bo,
}
static void
-set_predicate(struct brw_context *brw, drm_bacon_bo *query_bo)
+set_predicate(struct brw_context *brw, struct brw_bo *query_bo)
{
brw_load_register_imm64(brw, MI_PREDICATE_SRC1, 0ull);
@@ -435,7 +435,7 @@ set_predicate(struct brw_context *brw, drm_bacon_bo *query_bo)
* query has not finished yet.
*/
static void
-store_query_result_reg(struct brw_context *brw, drm_bacon_bo *bo,
+store_query_result_reg(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, GLenum ptype, uint32_t reg,
const bool pipelined)
{
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index a75ea96fb4c..4c653341506 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -93,14 +93,14 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch,
bool has_llc)
{
if (batch->last_bo != NULL) {
- drm_bacon_bo_unreference(batch->last_bo);
+ brw_bo_unreference(batch->last_bo);
batch->last_bo = NULL;
}
batch->last_bo = batch->bo;
- batch->bo = drm_bacon_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
+ batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
if (has_llc) {
- drm_bacon_bo_map(batch->bo, true);
+ brw_bo_map(batch->bo, true);
batch->map = batch->bo->virtual;
}
batch->map_next = batch->map;
@@ -140,7 +140,7 @@ intel_batchbuffer_reset_to_saved(struct brw_context *brw)
for (int i = brw->batch.saved.exec_count;
i < brw->batch.exec_count; i++) {
if (brw->batch.exec_bos[i] != brw->batch.bo) {
- drm_bacon_bo_unreference(brw->batch.exec_bos[i]);
+ brw_bo_unreference(brw->batch.exec_bos[i]);
}
}
brw->batch.reloc_count = brw->batch.saved.reloc_count;
@@ -158,15 +158,15 @@ intel_batchbuffer_free(struct intel_batchbuffer *batch)
for (int i = 0; i < batch->exec_count; i++) {
if (batch->exec_bos[i] != batch->bo) {
- drm_bacon_bo_unreference(batch->exec_bos[i]);
+ brw_bo_unreference(batch->exec_bos[i]);
}
}
free(batch->relocs);
free(batch->exec_bos);
free(batch->exec_objects);
- drm_bacon_bo_unreference(batch->last_bo);
- drm_bacon_bo_unreference(batch->bo);
+ brw_bo_unreference(batch->last_bo);
+ brw_bo_unreference(batch->bo);
if (batch->state_batch_sizes)
_mesa_hash_table_destroy(batch->state_batch_sizes, NULL);
}
@@ -240,7 +240,7 @@ do_batch_dump(struct brw_context *brw)
if (batch->ring != RENDER_RING)
return;
- int ret = drm_bacon_bo_map(batch->bo, false);
+ int ret = brw_bo_map(batch->bo, false);
if (ret != 0) {
fprintf(stderr,
"WARNING: failed to map batchbuffer (%s), "
@@ -351,7 +351,7 @@ do_batch_dump(struct brw_context *brw)
}
if (ret == 0) {
- drm_bacon_bo_unmap(batch->bo);
+ brw_bo_unmap(batch->bo);
}
}
#else
@@ -367,7 +367,7 @@ brw_new_batch(struct brw_context *brw)
/* Unreference any BOs held by the previous batch, and reset counts. */
for (int i = 0; i < brw->batch.exec_count; i++) {
if (brw->batch.exec_bos[i] != brw->batch.bo) {
- drm_bacon_bo_unreference(brw->batch.exec_bos[i]);
+ brw_bo_unreference(brw->batch.exec_bos[i]);
}
brw->batch.exec_bos[i] = NULL;
}
@@ -475,8 +475,8 @@ throttle(struct brw_context *brw)
if (brw->need_swap_throttle && brw->throttle_batch[0]) {
if (brw->throttle_batch[1]) {
if (!brw->disable_throttling)
- drm_bacon_bo_wait_rendering(brw->throttle_batch[1]);
- drm_bacon_bo_unreference(brw->throttle_batch[1]);
+ brw_bo_wait_rendering(brw->throttle_batch[1]);
+ brw_bo_unreference(brw->throttle_batch[1]);
}
brw->throttle_batch[1] = brw->throttle_batch[0];
brw->throttle_batch[0] = NULL;
@@ -493,7 +493,7 @@ throttle(struct brw_context *brw)
}
static void
-add_exec_bo(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
+add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
if (bo != batch->bo) {
for (int i = 0; i < batch->exec_count; i++) {
@@ -501,7 +501,7 @@ add_exec_bo(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
return;
}
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
}
if (batch->exec_count == batch->exec_array_size) {
@@ -571,11 +571,11 @@ execbuffer(int fd,
ret = -errno;
for (int i = 0; i < batch->exec_count; i++) {
- drm_bacon_bo *bo = batch->exec_bos[i];
+ struct brw_bo *bo = batch->exec_bos[i];
bo->idle = false;
- /* Update drm_bacon_bo::offset64 */
+ /* Update brw_bo::offset64 */
if (batch->exec_objects[i].offset != bo->offset64) {
DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
bo->gem_handle, bo->offset64, batch->exec_objects[i].offset);
@@ -597,11 +597,11 @@ do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
int ret = 0;
if (brw->has_llc) {
- drm_bacon_bo_unmap(batch->bo);
+ brw_bo_unmap(batch->bo);
} else {
- ret = drm_bacon_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
+ ret = brw_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
- ret = drm_bacon_bo_subdata(batch->bo,
+ ret = brw_bo_subdata(batch->bo,
batch->state_batch_offset,
batch->bo->size - batch->state_batch_offset,
(char *)batch->map + batch->state_batch_offset);
@@ -666,7 +666,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
if (brw->throttle_batch[0] == NULL) {
brw->throttle_batch[0] = brw->batch.bo;
- drm_bacon_bo_reference(brw->throttle_batch[0]);
+ brw_bo_reference(brw->throttle_batch[0]);
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
@@ -700,7 +700,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
- drm_bacon_bo_wait_rendering(brw->batch.bo);
+ brw_bo_wait_rendering(brw->batch.bo);
}
/* Start a new batch buffer. */
@@ -717,7 +717,7 @@ brw_batch_has_aperture_space(struct brw_context *brw, unsigned extra_space)
}
bool
-brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
+brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
for (int i = 0; i < batch->exec_count; i++) {
if (batch->exec_bos[i] == bo)
@@ -730,7 +730,7 @@ brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
*/
uint64_t
brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
- drm_bacon_bo *target, uint32_t target_offset,
+ struct brw_bo *target, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
if (batch->reloc_count == batch->reloc_array_size) {
@@ -779,7 +779,7 @@ intel_batchbuffer_data(struct brw_context *brw,
static void
load_sized_register_mem(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset,
int size)
@@ -811,7 +811,7 @@ load_sized_register_mem(struct brw_context *brw,
void
brw_load_register_mem(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset)
{
@@ -821,7 +821,7 @@ brw_load_register_mem(struct brw_context *brw,
void
brw_load_register_mem64(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset)
{
@@ -833,7 +833,7 @@ brw_load_register_mem64(struct brw_context *brw,
*/
void
brw_store_register_mem32(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset)
+ struct brw_bo *bo, uint32_t reg, uint32_t offset)
{
assert(brw->gen >= 6);
@@ -859,7 +859,7 @@ brw_store_register_mem32(struct brw_context *brw,
*/
void
brw_store_register_mem64(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset)
+ struct brw_bo *bo, uint32_t reg, uint32_t offset)
{
assert(brw->gen >= 6);
@@ -960,7 +960,7 @@ brw_load_register_reg64(struct brw_context *brw, uint32_t src, uint32_t dest)
* Write 32-bits of immediate data to a GPU memory buffer.
*/
void
-brw_store_data_imm32(struct brw_context *brw, drm_bacon_bo *bo,
+brw_store_data_imm32(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint32_t imm)
{
assert(brw->gen >= 6);
@@ -983,7 +983,7 @@ brw_store_data_imm32(struct brw_context *brw, drm_bacon_bo *bo,
* Write 64-bits of immediate data to a GPU memory buffer.
*/
void
-brw_store_data_imm64(struct brw_context *brw, drm_bacon_bo *bo,
+brw_store_data_imm64(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint64_t imm)
{
assert(brw->gen >= 6);
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
index 4d7da02e043..2783ba3c0fb 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
@@ -68,10 +68,10 @@ void intel_batchbuffer_data(struct brw_context *brw,
bool brw_batch_has_aperture_space(struct brw_context *brw,
unsigned extra_space_in_bytes);
-bool brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo);
+bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
uint64_t brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
- drm_bacon_bo *target, uint32_t target_offset,
+ struct brw_bo *target, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
static inline uint32_t
diff --git a/src/mesa/drivers/dri/i965/intel_blit.c b/src/mesa/drivers/dri/i965/intel_blit.c
index ebd4c529e9f..568ed541528 100644
--- a/src/mesa/drivers/dri/i965/intel_blit.c
+++ b/src/mesa/drivers/dri/i965/intel_blit.c
@@ -486,11 +486,11 @@ bool
intelEmitCopyBlit(struct brw_context *brw,
GLuint cpp,
int32_t src_pitch,
- drm_bacon_bo *src_buffer,
+ struct brw_bo *src_buffer,
GLuint src_offset,
uint32_t src_tiling,
int32_t dst_pitch,
- drm_bacon_bo *dst_buffer,
+ struct brw_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort src_x, GLshort src_y,
@@ -625,7 +625,7 @@ intelEmitImmediateColorExpandBlit(struct brw_context *brw,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
GLshort dst_pitch,
- drm_bacon_bo *dst_buffer,
+ struct brw_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort x, GLshort y,
@@ -709,9 +709,9 @@ intelEmitImmediateColorExpandBlit(struct brw_context *brw,
*/
void
intel_emit_linear_blit(struct brw_context *brw,
- drm_bacon_bo *dst_bo,
+ struct brw_bo *dst_bo,
unsigned int dst_offset,
- drm_bacon_bo *src_bo,
+ struct brw_bo *src_bo,
unsigned int src_offset,
unsigned int size)
{
diff --git a/src/mesa/drivers/dri/i965/intel_blit.h b/src/mesa/drivers/dri/i965/intel_blit.h
index bd6486f0546..2604417e2d5 100644
--- a/src/mesa/drivers/dri/i965/intel_blit.h
+++ b/src/mesa/drivers/dri/i965/intel_blit.h
@@ -32,11 +32,11 @@ bool
intelEmitCopyBlit(struct brw_context *brw,
GLuint cpp,
int32_t src_pitch,
- drm_bacon_bo *src_buffer,
+ struct brw_bo *src_buffer,
GLuint src_offset,
uint32_t src_tiling,
int32_t dst_pitch,
- drm_bacon_bo *dst_buffer,
+ struct brw_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort srcx, GLshort srcy,
@@ -71,16 +71,16 @@ intelEmitImmediateColorExpandBlit(struct brw_context *brw,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
GLshort dst_pitch,
- drm_bacon_bo *dst_buffer,
+ struct brw_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort x, GLshort y,
GLshort w, GLshort h,
GLenum logic_op);
void intel_emit_linear_blit(struct brw_context *brw,
- drm_bacon_bo *dst_bo,
+ struct brw_bo *dst_bo,
unsigned int dst_offset,
- drm_bacon_bo *src_bo,
+ struct brw_bo *src_bo,
unsigned int src_offset,
unsigned int size);
diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.c b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
index e9bd8a095c9..e0cef91dbab 100644
--- a/src/mesa/drivers/dri/i965/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
@@ -54,12 +54,12 @@ mark_buffer_inactive(struct intel_buffer_object *intel_obj)
intel_obj->gpu_active_end = 0;
}
-/** Allocates a new drm_bacon_bo to store the data for the buffer object. */
+/** Allocates a new brw_bo to store the data for the buffer object. */
static void
alloc_buffer_object(struct brw_context *brw,
struct intel_buffer_object *intel_obj)
{
- intel_obj->buffer = drm_bacon_bo_alloc(brw->bufmgr, "bufferobj",
+ intel_obj->buffer = brw_bo_alloc(brw->bufmgr, "bufferobj",
intel_obj->Base.Size, 64);
/* the buffer might be bound as a uniform buffer, need to update it
@@ -79,7 +79,7 @@ alloc_buffer_object(struct brw_context *brw,
static void
release_buffer(struct intel_buffer_object *intel_obj)
{
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
intel_obj->buffer = NULL;
}
@@ -126,7 +126,7 @@ brw_delete_buffer(struct gl_context * ctx, struct gl_buffer_object *obj)
*/
_mesa_buffer_unmap_all_mappings(ctx, obj);
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
_mesa_delete_buffer_object(ctx, obj);
}
@@ -173,7 +173,7 @@ brw_buffer_data(struct gl_context *ctx,
return false;
if (data != NULL)
- drm_bacon_bo_subdata(intel_obj->buffer, 0, size, data);
+ brw_bo_subdata(intel_obj->buffer, 0, size, data);
}
return true;
@@ -217,9 +217,9 @@ brw_buffer_subdata(struct gl_context *ctx,
if (offset + size <= intel_obj->gpu_active_start ||
intel_obj->gpu_active_end <= offset) {
if (brw->has_llc) {
- drm_bacon_gem_bo_map_unsynchronized(intel_obj->buffer);
+ brw_bo_map_unsynchronized(intel_obj->buffer);
memcpy(intel_obj->buffer->virtual + offset, data, size);
- drm_bacon_bo_unmap(intel_obj->buffer);
+ brw_bo_unmap(intel_obj->buffer);
if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
intel_obj->prefer_stall_to_blit = true;
@@ -230,13 +230,13 @@ brw_buffer_subdata(struct gl_context *ctx,
}
busy =
- drm_bacon_bo_busy(intel_obj->buffer) ||
+ brw_bo_busy(intel_obj->buffer) ||
brw_batch_references(&brw->batch, intel_obj->buffer);
if (busy) {
if (size == intel_obj->Base.Size) {
/* Replace the current busy bo so the subdata doesn't stall. */
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
alloc_buffer_object(brw, intel_obj);
} else if (!intel_obj->prefer_stall_to_blit) {
perf_debug("Using a blit copy to avoid stalling on "
@@ -245,17 +245,17 @@ brw_buffer_subdata(struct gl_context *ctx,
(long)offset, (long)offset + size, (long)(size/1024),
intel_obj->gpu_active_start,
intel_obj->gpu_active_end);
- drm_bacon_bo *temp_bo =
- drm_bacon_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
+ struct brw_bo *temp_bo =
+ brw_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
- drm_bacon_bo_subdata(temp_bo, 0, size, data);
+ brw_bo_subdata(temp_bo, 0, size, data);
intel_emit_linear_blit(brw,
intel_obj->buffer, offset,
temp_bo, 0,
size);
- drm_bacon_bo_unreference(temp_bo);
+ brw_bo_unreference(temp_bo);
return;
} else {
perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
@@ -268,7 +268,7 @@ brw_buffer_subdata(struct gl_context *ctx,
}
}
- drm_bacon_bo_subdata(intel_obj->buffer, offset, size, data);
+ brw_bo_subdata(intel_obj->buffer, offset, size, data);
mark_buffer_inactive(intel_obj);
}
@@ -293,7 +293,7 @@ brw_get_buffer_subdata(struct gl_context *ctx,
if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
intel_batchbuffer_flush(brw);
}
- drm_bacon_bo_get_subdata(intel_obj->buffer, offset, size, data);
+ brw_bo_get_subdata(intel_obj->buffer, offset, size, data);
mark_buffer_inactive(intel_obj);
}
@@ -351,16 +351,16 @@ brw_map_buffer_range(struct gl_context *ctx,
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
alloc_buffer_object(brw, intel_obj);
} else {
perf_debug("Stalling on the GPU for mapping a busy buffer "
"object\n");
intel_batchbuffer_flush(brw);
}
- } else if (drm_bacon_bo_busy(intel_obj->buffer) &&
+ } else if (brw_bo_busy(intel_obj->buffer) &&
(access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
alloc_buffer_object(brw, intel_obj);
}
}
@@ -376,23 +376,23 @@ brw_map_buffer_range(struct gl_context *ctx,
*/
if (!(access & (GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_PERSISTENT_BIT)) &&
(access & GL_MAP_INVALIDATE_RANGE_BIT) &&
- drm_bacon_bo_busy(intel_obj->buffer)) {
+ brw_bo_busy(intel_obj->buffer)) {
/* Ensure that the base alignment of the allocation meets the alignment
* guarantees the driver has advertised to the application.
*/
const unsigned alignment = ctx->Const.MinMapBufferAlignment;
intel_obj->map_extra[index] = (uintptr_t) offset % alignment;
- intel_obj->range_map_bo[index] = drm_bacon_bo_alloc(brw->bufmgr,
+ intel_obj->range_map_bo[index] = brw_bo_alloc(brw->bufmgr,
"BO blit temp",
length +
intel_obj->map_extra[index],
alignment);
if (brw->has_llc) {
- drm_bacon_bo_map(intel_obj->range_map_bo[index],
+ brw_bo_map(intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0);
} else {
- drm_bacon_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
+ brw_bo_map_gtt(intel_obj->range_map_bo[index]);
}
obj->Mappings[index].Pointer =
intel_obj->range_map_bo[index]->virtual + intel_obj->map_extra[index];
@@ -401,16 +401,16 @@ brw_map_buffer_range(struct gl_context *ctx,
if (access & GL_MAP_UNSYNCHRONIZED_BIT) {
if (!brw->has_llc && brw->perf_debug &&
- drm_bacon_bo_busy(intel_obj->buffer)) {
+ brw_bo_busy(intel_obj->buffer)) {
perf_debug("MapBufferRange with GL_MAP_UNSYNCHRONIZED_BIT stalling (it's actually synchronized on non-LLC platforms)\n");
}
- drm_bacon_gem_bo_map_unsynchronized(intel_obj->buffer);
+ brw_bo_map_unsynchronized(intel_obj->buffer);
} else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) ||
(access & GL_MAP_PERSISTENT_BIT))) {
- drm_bacon_gem_bo_map_gtt(intel_obj->buffer);
+ brw_bo_map_gtt(intel_obj->buffer);
mark_buffer_inactive(intel_obj);
} else {
- drm_bacon_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
+ brw_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
mark_buffer_inactive(intel_obj);
}
@@ -502,7 +502,7 @@ brw_unmap_buffer(struct gl_context *ctx,
assert(intel_obj);
assert(obj->Mappings[index].Pointer);
if (intel_obj->range_map_bo[index] != NULL) {
- drm_bacon_bo_unmap(intel_obj->range_map_bo[index]);
+ brw_bo_unmap(intel_obj->range_map_bo[index]);
if (!(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT)) {
intel_emit_linear_blit(brw,
@@ -521,10 +521,10 @@ brw_unmap_buffer(struct gl_context *ctx,
*/
brw_emit_mi_flush(brw);
- drm_bacon_bo_unreference(intel_obj->range_map_bo[index]);
+ brw_bo_unreference(intel_obj->range_map_bo[index]);
intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
- drm_bacon_bo_unmap(intel_obj->buffer);
+ brw_bo_unmap(intel_obj->buffer);
}
obj->Mappings[index].Pointer = NULL;
obj->Mappings[index].Offset = 0;
@@ -540,7 +540,7 @@ brw_unmap_buffer(struct gl_context *ctx,
* Anywhere that uses buffer objects in the pipeline should be using this to
* mark the range of the buffer that is being accessed by the pipeline.
*/
-drm_bacon_bo *
+struct brw_bo *
intel_bufferobj_buffer(struct brw_context *brw,
struct intel_buffer_object *intel_obj,
uint32_t offset, uint32_t size)
@@ -574,7 +574,7 @@ brw_copy_buffer_subdata(struct gl_context *ctx,
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_src = intel_buffer_object(src);
struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
- drm_bacon_bo *src_bo, *dst_bo;
+ struct brw_bo *src_bo, *dst_bo;
if (size == 0)
return;
diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.h b/src/mesa/drivers/dri/i965/intel_buffer_objects.h
index 0f83fadd8bf..a1bfaa9ebc4 100644
--- a/src/mesa/drivers/dri/i965/intel_buffer_objects.h
+++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.h
@@ -38,9 +38,9 @@ struct gl_buffer_object;
struct intel_buffer_object
{
struct gl_buffer_object Base;
- drm_bacon_bo *buffer; /* the low-level buffer manager's buffer handle */
+ struct brw_bo *buffer; /* the low-level buffer manager's buffer handle */
- drm_bacon_bo *range_map_bo[MAP_COUNT];
+ struct brw_bo *range_map_bo[MAP_COUNT];
/**
* Alignment offset from the range_map_bo temporary mapping to the returned
@@ -82,7 +82,7 @@ struct intel_buffer_object
/* Get the bm buffer associated with a GL bufferobject:
*/
-drm_bacon_bo *intel_bufferobj_buffer(struct brw_context *brw,
+struct brw_bo *intel_bufferobj_buffer(struct brw_context *brw,
struct intel_buffer_object *obj,
uint32_t offset,
uint32_t size);
@@ -91,13 +91,13 @@ void intel_upload_data(struct brw_context *brw,
const void *data,
uint32_t size,
uint32_t alignment,
- drm_bacon_bo **out_bo,
+ struct brw_bo **out_bo,
uint32_t *out_offset);
void *intel_upload_space(struct brw_context *brw,
uint32_t size,
uint32_t alignment,
- drm_bacon_bo **out_bo,
+ struct brw_bo **out_bo,
uint32_t *out_offset);
void intel_upload_finish(struct brw_context *brw);
diff --git a/src/mesa/drivers/dri/i965/intel_bufmgr_gem.c b/src/mesa/drivers/dri/i965/intel_bufmgr_gem.c
index 885548f3d39..241c1376d85 100644
--- a/src/mesa/drivers/dri/i965/intel_bufmgr_gem.c
+++ b/src/mesa/drivers/dri/i965/intel_bufmgr_gem.c
@@ -116,9 +116,9 @@ struct brw_bufmgr {
};
static int
-bo_set_tiling_internal(drm_bacon_bo *bo, uint32_t tiling_mode, uint32_t stride);
+bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode, uint32_t stride);
-static void bo_free(drm_bacon_bo *bo);
+static void bo_free(struct brw_bo *bo);
static uint32_t
key_hash_uint(const void *key)
@@ -132,11 +132,11 @@ key_uint_equal(const void *a, const void *b)
return *((unsigned *) a) == *((unsigned *) b);
}
-static drm_bacon_bo *
+static struct brw_bo *
hash_find_bo(struct hash_table *ht, unsigned int key)
{
struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
- return entry ? (drm_bacon_bo *) entry->data : NULL;
+ return entry ? (struct brw_bo *) entry->data : NULL;
}
static unsigned long
@@ -193,13 +193,13 @@ bucket_for_size(struct brw_bufmgr *bufmgr, unsigned long size)
}
inline void
-drm_bacon_bo_reference(drm_bacon_bo *bo)
+brw_bo_reference(struct brw_bo *bo)
{
p_atomic_inc(&bo->refcount);
}
int
-drm_bacon_bo_busy(drm_bacon_bo *bo)
+brw_bo_busy(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_busy busy;
@@ -219,7 +219,7 @@ drm_bacon_bo_busy(drm_bacon_bo *bo)
}
int
-drm_bacon_bo_madvise(drm_bacon_bo *bo, int state)
+brw_bo_madvise(struct brw_bo *bo, int state)
{
struct drm_i915_gem_madvise madv;
@@ -234,14 +234,14 @@ drm_bacon_bo_madvise(drm_bacon_bo *bo, int state)
/* drop the oldest entries that have been purged by the kernel */
static void
-drm_bacon_gem_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
+brw_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
struct bo_cache_bucket *bucket)
{
while (!list_empty(&bucket->head)) {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.next, head);
- if (drm_bacon_bo_madvise(bo, I915_MADV_DONTNEED))
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
+ if (brw_bo_madvise(bo, I915_MADV_DONTNEED))
break;
list_del(&bo->head);
@@ -249,7 +249,7 @@ drm_bacon_gem_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
}
}
-static drm_bacon_bo *
+static struct brw_bo *
bo_alloc_internal(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
@@ -258,7 +258,7 @@ bo_alloc_internal(struct brw_bufmgr *bufmgr,
unsigned long stride,
unsigned int alignment)
{
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
unsigned int page_size = getpagesize();
int ret;
struct bo_cache_bucket *bucket;
@@ -293,7 +293,7 @@ retry:
* of the list, as it will likely be hot in the GPU
* cache and in the aperture for us.
*/
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.prev, head);
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
list_del(&bo->head);
alloc_from_cache = true;
bo->align = alignment;
@@ -306,17 +306,17 @@ retry:
* allocating a new buffer is probably faster than
* waiting for the GPU to finish.
*/
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.next, head);
- if (!drm_bacon_bo_busy(bo)) {
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
+ if (!brw_bo_busy(bo)) {
alloc_from_cache = true;
list_del(&bo->head);
}
}
if (alloc_from_cache) {
- if (!drm_bacon_bo_madvise(bo, I915_MADV_WILLNEED)) {
+ if (!brw_bo_madvise(bo, I915_MADV_WILLNEED)) {
bo_free(bo);
- drm_bacon_gem_bo_cache_purge_bucket(bufmgr,
+ brw_bo_cache_purge_bucket(bufmgr,
bucket);
goto retry;
}
@@ -385,8 +385,8 @@ err:
return NULL;
}
-drm_bacon_bo *
-drm_bacon_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
+struct brw_bo *
+brw_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
@@ -395,8 +395,8 @@ drm_bacon_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
I915_TILING_NONE, 0, alignment);
}
-drm_bacon_bo *
-drm_bacon_bo_alloc(struct brw_bufmgr *bufmgr,
+struct brw_bo *
+brw_bo_alloc(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
@@ -404,8 +404,8 @@ drm_bacon_bo_alloc(struct brw_bufmgr *bufmgr,
return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
}
-drm_bacon_bo *
-drm_bacon_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
+struct brw_bo *
+brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
{
@@ -451,17 +451,17 @@ drm_bacon_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
}
/**
- * Returns a drm_bacon_bo wrapping the given buffer object handle.
+ * Returns a brw_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
-drm_bacon_bo *
-drm_bacon_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
+struct brw_bo *
+brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
const char *name,
unsigned int handle)
{
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
int ret;
struct drm_gem_open open_arg;
struct drm_i915_gem_get_tiling get_tiling;
@@ -475,7 +475,7 @@ drm_bacon_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
pthread_mutex_lock(&bufmgr->lock);
bo = hash_find_bo(bufmgr->name_table, handle);
if (bo) {
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
goto out;
}
@@ -496,7 +496,7 @@ drm_bacon_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
*/
bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
if (bo) {
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
goto out;
}
@@ -543,7 +543,7 @@ err_unref:
}
static void
-bo_free(drm_bacon_bo *bo)
+bo_free(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_gem_close close;
@@ -587,7 +587,7 @@ bo_free(drm_bacon_bo *bo)
}
static void
-bo_mark_mmaps_incoherent(drm_bacon_bo *bo)
+bo_mark_mmaps_incoherent(struct brw_bo *bo)
{
#if HAVE_VALGRIND
if (bo->mem_virtual)
@@ -615,9 +615,9 @@ cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
&bufmgr->cache_bucket[i];
while (!list_empty(&bucket->head)) {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.next, head);
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
if (time - bo->free_time <= 1)
break;
@@ -647,9 +647,9 @@ bo_purge_vma_cache(struct brw_bufmgr *bufmgr)
limit = 0;
while (bufmgr->vma_count > limit) {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
- bo = LIST_ENTRY(drm_bacon_bo, bufmgr->vma_cache.next, vma_list);
+ bo = LIST_ENTRY(struct brw_bo, bufmgr->vma_cache.next, vma_list);
assert(bo->map_count == 0);
list_delinit(&bo->vma_list);
@@ -672,7 +672,7 @@ bo_purge_vma_cache(struct brw_bufmgr *bufmgr)
}
static void
-bo_close_vma(struct brw_bufmgr *bufmgr, drm_bacon_bo *bo)
+bo_close_vma(struct brw_bufmgr *bufmgr, struct brw_bo *bo)
{
bufmgr->vma_open--;
list_addtail(&bo->vma_list, &bufmgr->vma_cache);
@@ -686,7 +686,7 @@ bo_close_vma(struct brw_bufmgr *bufmgr, drm_bacon_bo *bo)
}
static void
-bo_open_vma(struct brw_bufmgr *bufmgr, drm_bacon_bo *bo)
+bo_open_vma(struct brw_bufmgr *bufmgr, struct brw_bo *bo)
{
bufmgr->vma_open++;
list_del(&bo->vma_list);
@@ -700,7 +700,7 @@ bo_open_vma(struct brw_bufmgr *bufmgr, drm_bacon_bo *bo)
}
static void
-bo_unreference_final(drm_bacon_bo *bo, time_t time)
+bo_unreference_final(struct brw_bo *bo, time_t time)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct bo_cache_bucket *bucket;
@@ -719,7 +719,7 @@ bo_unreference_final(drm_bacon_bo *bo, time_t time)
bucket = bucket_for_size(bufmgr, bo->size);
/* Put the buffer into our internal cache for reuse if we can. */
if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
- drm_bacon_bo_madvise(bo, I915_MADV_DONTNEED)) {
+ brw_bo_madvise(bo, I915_MADV_DONTNEED)) {
bo->free_time = time;
bo->name = NULL;
@@ -731,7 +731,7 @@ bo_unreference_final(drm_bacon_bo *bo, time_t time)
}
void
-drm_bacon_bo_unreference(drm_bacon_bo *bo)
+brw_bo_unreference(struct brw_bo *bo)
{
if (bo == NULL)
return;
@@ -756,7 +756,7 @@ drm_bacon_bo_unreference(drm_bacon_bo *bo)
}
int
-drm_bacon_bo_map(drm_bacon_bo *bo, int write_enable)
+brw_bo_map(struct brw_bo *bo, int write_enable)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_domain set_domain;
@@ -820,7 +820,7 @@ drm_bacon_bo_map(drm_bacon_bo *bo, int write_enable)
}
static int
-map_gtt(drm_bacon_bo *bo)
+map_gtt(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
int ret;
@@ -879,7 +879,7 @@ map_gtt(drm_bacon_bo *bo)
}
int
-drm_bacon_gem_bo_map_gtt(drm_bacon_bo *bo)
+brw_bo_map_gtt(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_domain set_domain;
@@ -937,7 +937,7 @@ drm_bacon_gem_bo_map_gtt(drm_bacon_bo *bo)
*/
int
-drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo)
+brw_bo_map_unsynchronized(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
int ret;
@@ -945,12 +945,12 @@ drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo)
/* If the CPU cache isn't coherent with the GTT, then use a
* regular synchronized mapping. The problem is that we don't
* track where the buffer was last used on the CPU side in
- * terms of drm_bacon_bo_map vs drm_bacon_gem_bo_map_gtt, so
+ * terms of brw_bo_map vs brw_bo_map_gtt, so
* we would potentially corrupt the buffer even when the user
* does reasonable things.
*/
if (!bufmgr->has_llc)
- return drm_bacon_gem_bo_map_gtt(bo);
+ return brw_bo_map_gtt(bo);
pthread_mutex_lock(&bufmgr->lock);
@@ -966,7 +966,7 @@ drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo)
}
int
-drm_bacon_bo_unmap(drm_bacon_bo *bo)
+brw_bo_unmap(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
int ret = 0;
@@ -1000,7 +1000,7 @@ drm_bacon_bo_unmap(drm_bacon_bo *bo)
}
int
-drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
+brw_bo_subdata(struct brw_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
@@ -1026,7 +1026,7 @@ drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
}
int
-drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
+brw_bo_get_subdata(struct brw_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
@@ -1053,9 +1053,9 @@ drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
/** Waits for all GPU rendering with the object to have completed. */
void
-drm_bacon_bo_wait_rendering(drm_bacon_bo *bo)
+brw_bo_wait_rendering(struct brw_bo *bo)
{
- drm_bacon_gem_bo_start_gtt_access(bo, 1);
+ brw_bo_start_gtt_access(bo, 1);
}
/**
@@ -1070,7 +1070,7 @@ drm_bacon_bo_wait_rendering(drm_bacon_bo *bo)
* value describes the error. Of particular interest is -ETIME when the wait has
* failed to yield the desired result.
*
- * Similar to drm_bacon_gem_bo_wait_rendering except a timeout parameter allows
+ * Similar to brw_bo_wait_rendering except a timeout parameter allows
* the operation to give up after a certain amount of time. Another subtle
* difference is the internal locking semantics are different (this variant does
* not hold the lock for the duration of the wait). This makes the wait subject
@@ -1086,7 +1086,7 @@ drm_bacon_bo_wait_rendering(drm_bacon_bo *bo)
* promise, upgrade to latest stable kernels if this is the case.
*/
int
-drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns)
+brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_wait wait;
@@ -1104,13 +1104,13 @@ drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns)
/**
* Sets the object to the GTT read and possibly write domain, used by the X
- * 2D driver in the absence of kernel support to do drm_bacon_gem_bo_map_gtt().
+ * 2D driver in the absence of kernel support to do brw_bo_map_gtt().
*
- * In combination with drm_bacon_gem_bo_pin() and manual fence management, we
+ * In combination with brw_bo_pin() and manual fence management, we
* can do tiled pixmaps this way.
*/
void
-drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo *bo, int write_enable)
+brw_bo_start_gtt_access(struct brw_bo *bo, int write_enable)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_domain set_domain;
@@ -1140,10 +1140,10 @@ brw_bufmgr_destroy(struct brw_bufmgr *bufmgr)
for (int i = 0; i < bufmgr->num_buckets; i++) {
struct bo_cache_bucket *bucket =
&bufmgr->cache_bucket[i];
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
while (!list_empty(&bucket->head)) {
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.next, head);
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
list_del(&bo->head);
bo_free(bo);
@@ -1157,7 +1157,7 @@ brw_bufmgr_destroy(struct brw_bufmgr *bufmgr)
}
static int
-bo_set_tiling_internal(drm_bacon_bo *bo, uint32_t tiling_mode, uint32_t stride)
+bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode, uint32_t stride)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_tiling set_tiling;
@@ -1192,7 +1192,7 @@ bo_set_tiling_internal(drm_bacon_bo *bo, uint32_t tiling_mode, uint32_t stride)
}
int
-drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
+brw_bo_set_tiling(struct brw_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
int ret;
@@ -1210,7 +1210,7 @@ drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
}
int
-drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
+brw_bo_get_tiling(struct brw_bo *bo, uint32_t * tiling_mode,
uint32_t *swizzle_mode)
{
*tiling_mode = bo->tiling_mode;
@@ -1218,12 +1218,12 @@ drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
return 0;
}
-drm_bacon_bo *
-drm_bacon_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd, int size)
+struct brw_bo *
+brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd, int size)
{
int ret;
uint32_t handle;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
struct drm_i915_gem_get_tiling get_tiling;
pthread_mutex_lock(&bufmgr->lock);
@@ -1241,7 +1241,7 @@ drm_bacon_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd, int
*/
bo = hash_find_bo(bufmgr->handle_table, handle);
if (bo) {
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
goto out;
}
@@ -1294,7 +1294,7 @@ err:
}
int
-drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd)
+brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
@@ -1308,7 +1308,7 @@ drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd)
}
int
-drm_bacon_bo_flink(drm_bacon_bo *bo, uint32_t *name)
+brw_bo_flink(struct brw_bo *bo, uint32_t *name)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
@@ -1353,14 +1353,14 @@ brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr)
* as scanout buffers
*/
int
-drm_bacon_bo_disable_reuse(drm_bacon_bo *bo)
+brw_bo_disable_reuse(struct brw_bo *bo)
{
bo->reusable = false;
return 0;
}
int
-drm_bacon_bo_is_reusable(drm_bacon_bo *bo)
+brw_bo_is_reusable(struct brw_bo *bo)
{
return bo->reusable;
}
@@ -1456,7 +1456,7 @@ brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
return ret;
}
-void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo *bo)
+void *brw_bo_map__gtt(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
@@ -1500,7 +1500,7 @@ void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo *bo)
return bo->gtt_virtual;
}
-void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo *bo)
+void *brw_bo_map__cpu(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
@@ -1538,7 +1538,7 @@ void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo *bo)
return bo->mem_virtual;
}
-void *drm_bacon_gem_bo_map__wc(drm_bacon_bo *bo)
+void *brw_bo_map__wc(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
diff --git a/src/mesa/drivers/dri/i965/intel_fbo.c b/src/mesa/drivers/dri/i965/intel_fbo.c
index dc47b29130c..21e8e86d2c1 100644
--- a/src/mesa/drivers/dri/i965/intel_fbo.c
+++ b/src/mesa/drivers/dri/i965/intel_fbo.c
@@ -1058,7 +1058,7 @@ brw_render_cache_set_clear(struct brw_context *brw)
}
void
-brw_render_cache_set_add_bo(struct brw_context *brw, drm_bacon_bo *bo)
+brw_render_cache_set_add_bo(struct brw_context *brw, struct brw_bo *bo)
{
_mesa_set_add(brw->render_cache, bo);
}
@@ -1076,7 +1076,7 @@ brw_render_cache_set_add_bo(struct brw_context *brw, drm_bacon_bo *bo)
* different caches within a batchbuffer, it's all our responsibility.
*/
void
-brw_render_cache_set_check_flush(struct brw_context *brw, drm_bacon_bo *bo)
+brw_render_cache_set_check_flush(struct brw_context *brw, struct brw_bo *bo)
{
if (!_mesa_set_search(brw->render_cache, bo))
return;
diff --git a/src/mesa/drivers/dri/i965/intel_fbo.h b/src/mesa/drivers/dri/i965/intel_fbo.h
index 9051f78d532..08b82e89348 100644
--- a/src/mesa/drivers/dri/i965/intel_fbo.h
+++ b/src/mesa/drivers/dri/i965/intel_fbo.h
@@ -236,8 +236,8 @@ intel_renderbuffer_upsample(struct brw_context *brw,
struct intel_renderbuffer *irb);
void brw_render_cache_set_clear(struct brw_context *brw);
-void brw_render_cache_set_add_bo(struct brw_context *brw, drm_bacon_bo *bo);
-void brw_render_cache_set_check_flush(struct brw_context *brw, drm_bacon_bo *bo);
+void brw_render_cache_set_add_bo(struct brw_context *brw, struct brw_bo *bo);
+void brw_render_cache_set_check_flush(struct brw_context *brw, struct brw_bo *bo);
unsigned
intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples);
diff --git a/src/mesa/drivers/dri/i965/intel_image.h b/src/mesa/drivers/dri/i965/intel_image.h
index 16872e3d545..ad426910e41 100644
--- a/src/mesa/drivers/dri/i965/intel_image.h
+++ b/src/mesa/drivers/dri/i965/intel_image.h
@@ -66,7 +66,7 @@ struct intel_image_format {
struct __DRIimageRec {
struct intel_screen *screen;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t pitch; /**< in bytes */
GLenum internal_format;
uint32_t dri_format;
diff --git a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c
index d970fa98be2..7dca6900dac 100644
--- a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c
+++ b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c
@@ -616,16 +616,16 @@ miptree_create(struct brw_context *brw,
if (format == MESA_FORMAT_S_UINT8) {
/* Align to size of W tile, 64x64. */
- mt->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "miptree",
- ALIGN(mt->total_width, 64),
- ALIGN(mt->total_height, 64),
- mt->cpp, &mt->tiling, &pitch,
- alloc_flags);
+ mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "miptree",
+ ALIGN(mt->total_width, 64),
+ ALIGN(mt->total_height, 64),
+ mt->cpp, &mt->tiling, &pitch,
+ alloc_flags);
} else {
- mt->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "miptree",
- mt->total_width, mt->total_height,
- mt->cpp, &mt->tiling, &pitch,
- alloc_flags);
+ mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "miptree",
+ mt->total_width, mt->total_height,
+ mt->cpp, &mt->tiling, &pitch,
+ alloc_flags);
}
mt->pitch = pitch;
@@ -665,8 +665,8 @@ intel_miptree_create(struct brw_context *brw,
mt->total_width, mt->total_height);
mt->tiling = I915_TILING_X;
- drm_bacon_bo_unreference(mt->bo);
- mt->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "miptree",
+ brw_bo_unreference(mt->bo);
+ mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "miptree",
mt->total_width, mt->total_height, mt->cpp,
&mt->tiling, &pitch, alloc_flags);
mt->pitch = pitch;
@@ -719,7 +719,7 @@ intel_miptree_create(struct brw_context *brw,
struct intel_mipmap_tree *
intel_miptree_create_for_bo(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
mesa_format format,
uint32_t offset,
uint32_t width,
@@ -732,7 +732,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
uint32_t tiling, swizzle;
GLenum target;
- drm_bacon_bo_get_tiling(bo, &tiling, &swizzle);
+ brw_bo_get_tiling(bo, &tiling, &swizzle);
/* Nothing will be able to use this miptree with the BO if the offset isn't
* aligned.
@@ -761,7 +761,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
if (!mt)
return NULL;
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
mt->bo = bo;
mt->pitch = pitch;
mt->offset = offset;
@@ -783,7 +783,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
void
intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
struct intel_renderbuffer *irb,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t width, uint32_t height,
uint32_t pitch)
{
@@ -914,7 +914,7 @@ intel_miptree_hiz_buffer_free(struct intel_miptree_hiz_buffer *hiz_buf)
if (hiz_buf->mt)
intel_miptree_release(&hiz_buf->mt);
else
- drm_bacon_bo_unreference(hiz_buf->aux_base.bo);
+ brw_bo_unreference(hiz_buf->aux_base.bo);
free(hiz_buf);
}
@@ -931,12 +931,12 @@ intel_miptree_release(struct intel_mipmap_tree **mt)
DBG("%s deleting %p\n", __func__, *mt);
- drm_bacon_bo_unreference((*mt)->bo);
+ brw_bo_unreference((*mt)->bo);
intel_miptree_release(&(*mt)->stencil_mt);
intel_miptree_release(&(*mt)->r8stencil_mt);
intel_miptree_hiz_buffer_free((*mt)->hiz_buf);
if ((*mt)->mcs_buf) {
- drm_bacon_bo_unreference((*mt)->mcs_buf->bo);
+ brw_bo_unreference((*mt)->mcs_buf->bo);
free((*mt)->mcs_buf);
}
intel_resolve_map_clear(&(*mt)->hiz_map);
@@ -1386,16 +1386,16 @@ intel_miptree_init_mcs(struct brw_context *brw,
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- const int ret = drm_bacon_gem_bo_map_gtt(mt->mcs_buf->bo);
+ const int ret = brw_bo_map_gtt(mt->mcs_buf->bo);
if (unlikely(ret)) {
fprintf(stderr, "Failed to map mcs buffer into GTT\n");
- drm_bacon_bo_unreference(mt->mcs_buf->bo);
+ brw_bo_unreference(mt->mcs_buf->bo);
free(mt->mcs_buf);
return;
}
void *data = mt->mcs_buf->bo->virtual;
memset(data, init_value, mt->mcs_buf->size);
- drm_bacon_bo_unmap(mt->mcs_buf->bo);
+ brw_bo_unmap(mt->mcs_buf->bo);
}
static struct intel_miptree_aux_buffer *
@@ -1442,7 +1442,7 @@ intel_mcs_miptree_buf_create(struct brw_context *brw,
* structure should go away. We use miptree create simply as a means to make
* sure all the constraints for the buffer are satisfied.
*/
- drm_bacon_bo_reference(temp_mt->bo);
+ brw_bo_reference(temp_mt->bo);
intel_miptree_release(&temp_mt);
return buf;
@@ -1551,7 +1551,7 @@ intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
* Therefore one can pass the ISL dimensions in terms of bytes instead of
* trying to recalculate based on different format block sizes.
*/
- buf->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "ccs-miptree",
+ buf->bo = brw_bo_alloc_tiled(brw->bufmgr, "ccs-miptree",
buf->pitch, buf->size / buf->pitch,
1, &tiling, &pitch, alloc_flags);
if (buf->bo) {
@@ -1688,7 +1688,7 @@ intel_gen7_hiz_buf_create(struct brw_context *brw,
unsigned long pitch;
uint32_t tiling = I915_TILING_Y;
- buf->aux_base.bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "hiz",
+ buf->aux_base.bo = brw_bo_alloc_tiled(brw->bufmgr, "hiz",
hz_width, hz_height, 1,
&tiling, &pitch,
BO_ALLOC_FOR_RENDER);
@@ -1696,7 +1696,7 @@ intel_gen7_hiz_buf_create(struct brw_context *brw,
free(buf);
return NULL;
} else if (tiling != I915_TILING_Y) {
- drm_bacon_bo_unreference(buf->aux_base.bo);
+ brw_bo_unreference(buf->aux_base.bo);
free(buf);
return NULL;
}
@@ -1785,7 +1785,7 @@ intel_gen8_hiz_buf_create(struct brw_context *brw,
unsigned long pitch;
uint32_t tiling = I915_TILING_Y;
- buf->aux_base.bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "hiz",
+ buf->aux_base.bo = brw_bo_alloc_tiled(brw->bufmgr, "hiz",
hz_width, hz_height, 1,
&tiling, &pitch,
BO_ALLOC_FOR_RENDER);
@@ -1793,7 +1793,7 @@ intel_gen8_hiz_buf_create(struct brw_context *brw,
free(buf);
return NULL;
} else if (tiling != I915_TILING_Y) {
- drm_bacon_bo_unreference(buf->aux_base.bo);
+ brw_bo_unreference(buf->aux_base.bo);
free(buf);
return NULL;
}
@@ -2272,7 +2272,7 @@ intel_miptree_make_shareable(struct brw_context *brw,
if (mt->mcs_buf) {
intel_miptree_all_slices_resolve_color(brw, mt, 0);
mt->aux_disable |= (INTEL_AUX_DISABLE_CCS | INTEL_AUX_DISABLE_MCS);
- drm_bacon_bo_unreference(mt->mcs_buf->bo);
+ brw_bo_unreference(mt->mcs_buf->bo);
free(mt->mcs_buf);
mt->mcs_buf = NULL;
@@ -2455,7 +2455,7 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
*/
intel_miptree_all_slices_resolve_color(brw, mt, 0);
- drm_bacon_bo *bo = mt->bo;
+ struct brw_bo *bo = mt->bo;
if (brw_batch_references(&brw->batch, bo))
intel_batchbuffer_flush(brw);
@@ -2473,9 +2473,9 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
* long as cache consistency is maintained).
*/
if (mt->tiling != I915_TILING_NONE || mt->is_scanout)
- drm_bacon_gem_bo_map_gtt(bo);
+ brw_bo_map_gtt(bo);
else
- drm_bacon_bo_map(bo, true);
+ brw_bo_map(bo, true);
return bo->virtual;
}
@@ -2483,7 +2483,7 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
static void
intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
{
- drm_bacon_bo_unmap(mt->bo);
+ brw_bo_unmap(mt->bo);
}
static void
diff --git a/src/mesa/drivers/dri/i965/intel_mipmap_tree.h b/src/mesa/drivers/dri/i965/intel_mipmap_tree.h
index 508f942fa54..7aabac006f3 100644
--- a/src/mesa/drivers/dri/i965/intel_mipmap_tree.h
+++ b/src/mesa/drivers/dri/i965/intel_mipmap_tree.h
@@ -31,7 +31,7 @@
* The hardware has a fixed layout of a texture depending on parameters such
* as the target/type (2D, 3D, CUBE), width, height, pitch, and number of
* mipmap levels. The individual level/layer slices are each 2D rectangles of
- * pixels at some x/y offset from the start of the drm_bacon_bo.
+ * pixels at some x/y offset from the start of the brw_bo.
*
* Original OpenGL allowed texture miplevels to be specified in arbitrary
* order, and a texture may change size over time. Thus, each
@@ -279,7 +279,7 @@ struct intel_miptree_aux_buffer
* @see RENDER_SURFACE_STATE.AuxiliarySurfaceBaseAddress
* @see 3DSTATE_HIER_DEPTH_BUFFER.AuxiliarySurfaceBaseAddress
*/
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/**
* Offset into bo where the surface starts.
@@ -345,7 +345,7 @@ struct intel_mipmap_tree
* @see 3DSTATE_HIER_DEPTH_BUFFER.SurfaceBaseAddress
* @see 3DSTATE_STENCIL_BUFFER.SurfaceBaseAddress
*/
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/**
* Pitch in bytes.
@@ -698,7 +698,7 @@ struct intel_mipmap_tree *intel_miptree_create(struct brw_context *brw,
struct intel_mipmap_tree *
intel_miptree_create_for_bo(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
mesa_format format,
uint32_t offset,
uint32_t width,
@@ -710,7 +710,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
void
intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
struct intel_renderbuffer *irb,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t width, uint32_t height,
uint32_t pitch);
diff --git a/src/mesa/drivers/dri/i965/intel_pixel_draw.c b/src/mesa/drivers/dri/i965/intel_pixel_draw.c
index cd435d5a596..e84e4739df4 100644
--- a/src/mesa/drivers/dri/i965/intel_pixel_draw.c
+++ b/src/mesa/drivers/dri/i965/intel_pixel_draw.c
@@ -57,7 +57,7 @@ do_blit_drawpixels(struct gl_context * ctx,
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *src = intel_buffer_object(unpack->BufferObj);
GLuint src_offset;
- drm_bacon_bo *src_buffer;
+ struct brw_bo *src_buffer;
DBG("%s\n", __func__);
diff --git a/src/mesa/drivers/dri/i965/intel_pixel_read.c b/src/mesa/drivers/dri/i965/intel_pixel_read.c
index bbd0cac0460..050d51735f7 100644
--- a/src/mesa/drivers/dri/i965/intel_pixel_read.c
+++ b/src/mesa/drivers/dri/i965/intel_pixel_read.c
@@ -82,7 +82,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx,
int dst_pitch;
/* The miptree's buffer. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
int error = 0;
@@ -147,7 +147,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx,
intel_batchbuffer_flush(brw);
}
- error = drm_bacon_bo_map(bo, false /* write enable */);
+ error = brw_bo_map(bo, false /* write enable */);
if (error) {
DBG("%s: failed to map bo\n", __func__);
return false;
@@ -195,7 +195,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx,
mem_copy
);
- drm_bacon_bo_unmap(bo);
+ brw_bo_unmap(bo);
return true;
}
diff --git a/src/mesa/drivers/dri/i965/intel_screen.c b/src/mesa/drivers/dri/i965/intel_screen.c
index b903cdef0d5..776f461a499 100644
--- a/src/mesa/drivers/dri/i965/intel_screen.c
+++ b/src/mesa/drivers/dri/i965/intel_screen.c
@@ -294,7 +294,7 @@ static void
intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
{
uint32_t tiling, swizzle;
- drm_bacon_bo_get_tiling(image->bo, &tiling, &swizzle);
+ brw_bo_get_tiling(image->bo, &tiling, &swizzle);
if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
_mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
@@ -375,9 +375,9 @@ intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
&image->tile_x,
&image->tile_y);
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
image->bo = mt->bo;
- drm_bacon_bo_reference(mt->bo);
+ brw_bo_reference(mt->bo);
}
static __DRIimage *
@@ -401,7 +401,7 @@ intel_create_image_from_name(__DRIscreen *dri_screen,
image->width = width;
image->height = height;
image->pitch = pitch * cpp;
- image->bo = drm_bacon_bo_gem_create_from_name(screen->bufmgr, "image",
+ image->bo = brw_bo_gem_create_from_name(screen->bufmgr, "image",
name);
if (!image->bo) {
free(image);
@@ -437,9 +437,9 @@ intel_create_image_from_renderbuffer(__DRIcontext *context,
image->format = rb->Format;
image->offset = 0;
image->data = loaderPrivate;
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
image->bo = irb->mt->bo;
- drm_bacon_bo_reference(irb->mt->bo);
+ brw_bo_reference(irb->mt->bo);
image->width = rb->Width;
image->height = rb->Height;
image->pitch = irb->mt->pitch;
@@ -513,7 +513,7 @@ intel_create_image_from_texture(__DRIcontext *context, int target,
static void
intel_destroy_image(__DRIimage *image)
{
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
free(image);
}
@@ -613,7 +613,7 @@ intel_create_image_common(__DRIscreen *dri_screen,
return NULL;
cpp = _mesa_get_format_bytes(image->format);
- image->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr, "image",
+ image->bo = brw_bo_alloc_tiled(screen->bufmgr, "image",
width, height, cpp, &tiling,
&pitch, 0);
if (image->bo == NULL) {
@@ -660,7 +660,7 @@ intel_query_image(__DRIimage *image, int attrib, int *value)
*value = image->bo->gem_handle;
return true;
case __DRI_IMAGE_ATTRIB_NAME:
- return !drm_bacon_bo_flink(image->bo, (uint32_t *) value);
+ return !brw_bo_flink(image->bo, (uint32_t *) value);
case __DRI_IMAGE_ATTRIB_FORMAT:
*value = image->dri_format;
return true;
@@ -676,7 +676,7 @@ intel_query_image(__DRIimage *image, int attrib, int *value)
*value = image->planar_format->components;
return true;
case __DRI_IMAGE_ATTRIB_FD:
- return !drm_bacon_bo_gem_export_to_prime(image->bo, value);
+ return !brw_bo_gem_export_to_prime(image->bo, value);
case __DRI_IMAGE_ATTRIB_FOURCC:
return intel_lookup_fourcc(image->dri_format, value);
case __DRI_IMAGE_ATTRIB_NUM_PLANES:
@@ -706,7 +706,7 @@ intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
if (image == NULL)
return NULL;
- drm_bacon_bo_reference(orig_image->bo);
+ brw_bo_reference(orig_image->bo);
image->bo = orig_image->bo;
image->internal_format = orig_image->internal_format;
image->planar_format = orig_image->planar_format;
@@ -824,7 +824,7 @@ intel_create_image_from_fds(__DRIscreen *dri_screen,
size = end;
}
- image->bo = drm_bacon_bo_gem_create_from_prime(screen->bufmgr,
+ image->bo = brw_bo_gem_create_from_prime(screen->bufmgr,
fds[0], size);
if (image->bo == NULL) {
free(image);
@@ -916,7 +916,7 @@ intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
}
image->bo = parent->bo;
- drm_bacon_bo_reference(parent->bo);
+ brw_bo_reference(parent->bo);
image->width = width;
image->height = height;
@@ -1291,20 +1291,20 @@ intel_init_bufmgr(struct intel_screen *screen)
static bool
intel_detect_swizzling(struct intel_screen *screen)
{
- drm_bacon_bo *buffer;
+ struct brw_bo *buffer;
unsigned long flags = 0;
unsigned long aligned_pitch;
uint32_t tiling = I915_TILING_X;
uint32_t swizzle_mode = 0;
- buffer = drm_bacon_bo_alloc_tiled(screen->bufmgr, "swizzle test",
+ buffer = brw_bo_alloc_tiled(screen->bufmgr, "swizzle test",
64, 64, 4,
&tiling, &aligned_pitch, flags);
if (buffer == NULL)
return false;
- drm_bacon_bo_get_tiling(buffer, &tiling, &swizzle_mode);
- drm_bacon_bo_unreference(buffer);
+ brw_bo_get_tiling(buffer, &tiling, &swizzle_mode);
+ brw_bo_unreference(buffer);
if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
return false;
@@ -1370,21 +1370,21 @@ intel_detect_pipelined_register(struct intel_screen *screen,
if (screen->no_hw)
return false;
- drm_bacon_bo *results, *bo;
+ struct brw_bo *results, *bo;
uint32_t *batch;
uint32_t offset = 0;
bool success = false;
/* Create a zero'ed temporary buffer for reading our results */
- results = drm_bacon_bo_alloc(screen->bufmgr, "registers", 4096, 0);
+ results = brw_bo_alloc(screen->bufmgr, "registers", 4096, 0);
if (results == NULL)
goto err;
- bo = drm_bacon_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
+ bo = brw_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
if (bo == NULL)
goto err_results;
- if (drm_bacon_bo_map(bo, 1))
+ if (brw_bo_map(bo, 1))
goto err_batch;
batch = bo->virtual;
@@ -1440,15 +1440,15 @@ intel_detect_pipelined_register(struct intel_screen *screen,
drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
/* Check whether the value got written. */
- if (drm_bacon_bo_map(results, false) == 0) {
+ if (brw_bo_map(results, false) == 0) {
success = *((uint32_t *)results->virtual + offset) == expected_value;
- drm_bacon_bo_unmap(results);
+ brw_bo_unmap(results);
}
err_batch:
- drm_bacon_bo_unreference(bo);
+ brw_bo_unreference(bo);
err_results:
- drm_bacon_bo_unreference(results);
+ brw_bo_unreference(results);
err:
return success;
}
@@ -1856,7 +1856,7 @@ __DRIconfig **intelInitScreen2(__DRIscreen *dri_screen)
* Currently the entire (global) address space for all GTT maps is
* limited to 64bits. That is all objects on the system that are
* setup for GTT mmapping must fit within 64bits. An attempt to use
- * one that exceeds the limit with fail in drm_bacon_bo_map_gtt().
+ * one that exceeds the limit with fail in brw_bo_map_gtt().
*
* Long before we hit that limit, we will be practically limited by
* that any single object must fit in physical memory (RAM). The upper
@@ -2077,7 +2077,7 @@ __DRIconfig **intelInitScreen2(__DRIscreen *dri_screen)
struct intel_buffer {
__DRIbuffer base;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
};
static __DRIbuffer *
@@ -2101,7 +2101,7 @@ intelAllocateBuffer(__DRIscreen *dri_screen,
uint32_t tiling = I915_TILING_X;
unsigned long pitch;
int cpp = format / 8;
- intelBuffer->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr,
+ intelBuffer->bo = brw_bo_alloc_tiled(screen->bufmgr,
"intelAllocateBuffer",
width,
height,
@@ -2114,7 +2114,7 @@ intelAllocateBuffer(__DRIscreen *dri_screen,
return NULL;
}
- drm_bacon_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
+ brw_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
intelBuffer->base.attachment = attachment;
intelBuffer->base.cpp = cpp;
@@ -2128,7 +2128,7 @@ intelReleaseBuffer(__DRIscreen *dri_screen, __DRIbuffer *buffer)
{
struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
- drm_bacon_bo_unreference(intelBuffer->bo);
+ brw_bo_unreference(intelBuffer->bo);
free(intelBuffer);
}
diff --git a/src/mesa/drivers/dri/i965/intel_tex.c b/src/mesa/drivers/dri/i965/intel_tex.c
index 0c75d5e56ad..6da666c8845 100644
--- a/src/mesa/drivers/dri/i965/intel_tex.c
+++ b/src/mesa/drivers/dri/i965/intel_tex.c
@@ -333,7 +333,7 @@ intel_set_texture_storage_for_buffer_object(struct gl_context *ctx,
assert(intel_texobj->mt == NULL);
- drm_bacon_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj,
+ struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj,
buffer_offset,
row_stride * image->Height);
intel_texobj->mt =
diff --git a/src/mesa/drivers/dri/i965/intel_tex_image.c b/src/mesa/drivers/dri/i965/intel_tex_image.c
index 3c1ff5372ce..c5f8c97dc91 100644
--- a/src/mesa/drivers/dri/i965/intel_tex_image.c
+++ b/src/mesa/drivers/dri/i965/intel_tex_image.c
@@ -128,7 +128,7 @@ intelTexImage(struct gl_context * ctx,
struct intel_texture_image *intelImage = intel_texture_image(texImage);
bool ok;
- bool tex_busy = intelImage->mt && drm_bacon_bo_busy(intelImage->mt->bo);
+ bool tex_busy = intelImage->mt && brw_bo_busy(intelImage->mt->bo);
DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
__func__, _mesa_get_format_name(texImage->TexFormat),
@@ -467,7 +467,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
int dst_pitch;
/* The miptree's buffer. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
int error = 0;
@@ -532,7 +532,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
intel_batchbuffer_flush(brw);
}
- error = drm_bacon_bo_map(bo, false /* write enable */);
+ error = brw_bo_map(bo, false /* write enable */);
if (error) {
DBG("%s: failed to map bo\n", __func__);
return false;
@@ -565,7 +565,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
mem_copy
);
- drm_bacon_bo_unmap(bo);
+ brw_bo_unmap(bo);
return true;
}
diff --git a/src/mesa/drivers/dri/i965/intel_tex_subimage.c b/src/mesa/drivers/dri/i965/intel_tex_subimage.c
index 43ef9085901..1d20ac327e5 100644
--- a/src/mesa/drivers/dri/i965/intel_tex_subimage.c
+++ b/src/mesa/drivers/dri/i965/intel_tex_subimage.c
@@ -84,7 +84,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
int src_pitch;
/* The miptree's buffer. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
int error = 0;
@@ -148,7 +148,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
intel_batchbuffer_flush(brw);
}
- error = drm_bacon_bo_map(bo, true /* write enable */);
+ error = brw_bo_map(bo, true /* write enable */);
if (error || bo->virtual == NULL) {
DBG("%s: failed to map bo\n", __func__);
return false;
@@ -185,7 +185,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
mem_copy
);
- drm_bacon_bo_unmap(bo);
+ brw_bo_unmap(bo);
return true;
}
@@ -202,7 +202,7 @@ intelTexSubImage(struct gl_context * ctx,
struct intel_mipmap_tree *mt = intel_texture_image(texImage)->mt;
bool ok;
- bool tex_busy = mt && drm_bacon_bo_busy(mt->bo);
+ bool tex_busy = mt && brw_bo_busy(mt->bo);
if (mt && mt->format == MESA_FORMAT_S_UINT8)
mt->r8stencil_needs_update = true;
diff --git a/src/mesa/drivers/dri/i965/intel_upload.c b/src/mesa/drivers/dri/i965/intel_upload.c
index 9535d35c1d7..a2b7ba375c9 100644
--- a/src/mesa/drivers/dri/i965/intel_upload.c
+++ b/src/mesa/drivers/dri/i965/intel_upload.c
@@ -50,8 +50,8 @@ intel_upload_finish(struct brw_context *brw)
if (!brw->upload.bo)
return;
- drm_bacon_bo_unmap(brw->upload.bo);
- drm_bacon_bo_unreference(brw->upload.bo);
+ brw_bo_unmap(brw->upload.bo);
+ brw_bo_unreference(brw->upload.bo);
brw->upload.bo = NULL;
brw->upload.next_offset = 0;
}
@@ -83,7 +83,7 @@ void *
intel_upload_space(struct brw_context *brw,
uint32_t size,
uint32_t alignment,
- drm_bacon_bo **out_bo,
+ struct brw_bo **out_bo,
uint32_t *out_offset)
{
uint32_t offset;
@@ -95,21 +95,21 @@ intel_upload_space(struct brw_context *brw,
}
if (!brw->upload.bo) {
- brw->upload.bo = drm_bacon_bo_alloc(brw->bufmgr, "streamed data",
- MAX2(INTEL_UPLOAD_SIZE, size), 4096);
+ brw->upload.bo = brw_bo_alloc(brw->bufmgr, "streamed data",
+ MAX2(INTEL_UPLOAD_SIZE, size), 4096);
if (brw->has_llc)
- drm_bacon_bo_map(brw->upload.bo, true);
+ brw_bo_map(brw->upload.bo, true);
else
- drm_bacon_gem_bo_map_gtt(brw->upload.bo);
+ brw_bo_map_gtt(brw->upload.bo);
}
brw->upload.next_offset = offset + size;
*out_offset = offset;
if (*out_bo != brw->upload.bo) {
- drm_bacon_bo_unreference(*out_bo);
+ brw_bo_unreference(*out_bo);
*out_bo = brw->upload.bo;
- drm_bacon_bo_reference(brw->upload.bo);
+ brw_bo_reference(brw->upload.bo);
}
return brw->upload.bo->virtual + offset;
@@ -125,7 +125,7 @@ intel_upload_data(struct brw_context *brw,
const void *data,
uint32_t size,
uint32_t alignment,
- drm_bacon_bo **out_bo,
+ struct brw_bo **out_bo,
uint32_t *out_offset)
{
void *dst = intel_upload_space(brw, size, alignment, out_bo, out_offset);