aboutsummaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri/i965
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/i965')
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw_upload.c5
-rw-r--r--src/mesa/drivers/dri/i965/intel_buffer_objects.c97
-rw-r--r--src/mesa/drivers/dri/i965/intel_buffer_objects.h5
-rw-r--r--src/mesa/drivers/dri/i965/intel_pixel_bitmap.c5
4 files changed, 62 insertions, 50 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_draw_upload.c b/src/mesa/drivers/dri/i965/brw_draw_upload.c
index cbaf67b4367..d42c074cd31 100644
--- a/src/mesa/drivers/dri/i965/brw_draw_upload.c
+++ b/src/mesa/drivers/dri/i965/brw_draw_upload.c
@@ -861,12 +861,13 @@ static void brw_upload_indices(struct brw_context *brw)
offset,
ib_size,
GL_MAP_READ_BIT,
- bufferobj);
+ bufferobj,
+ MAP_INTERNAL);
intel_upload_data(brw, map, ib_size, ib_type_size, &bo, &offset);
brw->ib.start_vertex_offset = offset / ib_type_size;
- ctx->Driver.UnmapBuffer(ctx, bufferobj);
+ ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
} else {
/* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
* the index buffer state when we're just moving the start index
diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.c b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
index c92ca2fcafe..bd7e88d8762 100644
--- a/src/mesa/drivers/dri/i965/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
@@ -82,7 +82,8 @@ brw_bo_map_gtt(struct brw_context *brw, drm_intel_bo *bo, const char *bo_name)
}
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
static void
intel_bufferobj_mark_gpu_usage(struct intel_buffer_object *intel_obj,
@@ -159,8 +160,7 @@ intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
* (though it does if you call glDeleteBuffers)
*/
- if (obj->Pointer)
- intel_bufferobj_unmap(ctx, obj);
+ _mesa_buffer_unmap_all_mappings(ctx, obj);
drm_intel_bo_unreference(intel_obj->buffer);
free(intel_obj);
@@ -197,7 +197,8 @@ intel_bufferobj_data(struct gl_context * ctx,
intel_obj->Base.Usage = usage;
intel_obj->Base.StorageFlags = storageFlags;
- assert(!obj->Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_INTERNAL].Pointer);
if (intel_obj->buffer != NULL)
release_buffer(intel_obj);
@@ -351,7 +352,8 @@ intel_bufferobj_get_subdata(struct gl_context * ctx,
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
- GLbitfield access, struct gl_buffer_object *obj)
+ GLbitfield access, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
@@ -361,12 +363,12 @@ intel_bufferobj_map_range(struct gl_context * ctx,
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
if (intel_obj->buffer == NULL) {
- obj->Pointer = NULL;
+ obj->Mappings[index].Pointer = NULL;
return NULL;
}
@@ -410,23 +412,25 @@ intel_bufferobj_map_range(struct gl_context * ctx,
const unsigned extra = (uintptr_t) offset % alignment;
if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
- intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
- alignment);
- obj->Pointer = intel_obj->range_map_buffer + extra;
+ intel_obj->range_map_buffer[index] = _mesa_align_malloc(length + extra,
+ alignment);
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_buffer[index] + extra;
} else {
- intel_obj->range_map_bo = drm_intel_bo_alloc(brw->bufmgr,
- "range map",
- length + extra,
- alignment);
+ intel_obj->range_map_bo[index] = drm_intel_bo_alloc(brw->bufmgr,
+ "range map",
+ length + extra,
+ alignment);
if (!(access & GL_MAP_READ_BIT)) {
- drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
} else {
- drm_intel_bo_map(intel_obj->range_map_bo,
+ drm_intel_bo_map(intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0);
}
- obj->Pointer = intel_obj->range_map_bo->virtual + extra;
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_bo[index]->virtual + extra;
}
- return obj->Pointer;
+ return obj->Mappings[index].Pointer;
}
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
@@ -439,8 +443,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
intel_bufferobj_mark_inactive(intel_obj);
}
- obj->Pointer = intel_obj->buffer->virtual + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Mappings[index].Pointer;
}
/**
@@ -459,7 +463,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
static void
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
@@ -468,7 +473,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
/* Unless we're in the range map using a temporary system buffer,
* there's no work to do.
*/
- if (intel_obj->range_map_buffer == NULL)
+ if (intel_obj->range_map_buffer[index] == NULL)
return;
if (length == 0)
@@ -480,13 +485,16 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
* former points to the actual mapping while the latter may be offset to
* meet alignment guarantees.
*/
- drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
+ drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer);
intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset + offset,
+ intel_obj->buffer,
+ obj->Mappings[index].Offset + offset,
temp_bo, 0,
length);
- intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset + offset, length);
+ intel_bufferobj_mark_gpu_usage(intel_obj,
+ obj->Mappings[index].Offset + offset,
+ length);
drm_intel_bo_unreference(temp_bo);
}
@@ -498,32 +506,35 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
* Implements glUnmapBuffer().
*/
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
- assert(obj->Pointer);
- if (intel_obj->range_map_buffer != NULL) {
+ assert(obj->Mappings[index].Pointer);
+ if (intel_obj->range_map_buffer[index] != NULL) {
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
intel_batchbuffer_emit_mi_flush(brw);
- _mesa_align_free(intel_obj->range_map_buffer);
- intel_obj->range_map_buffer = NULL;
+ _mesa_align_free(intel_obj->range_map_buffer[index]);
+ intel_obj->range_map_buffer[index] = NULL;
} else if (intel_obj->range_map_bo != NULL) {
- const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
+ const unsigned extra = obj->Mappings[index].Pointer -
+ intel_obj->range_map_bo[index]->virtual;
- drm_intel_bo_unmap(intel_obj->range_map_bo);
+ drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset,
- intel_obj->range_map_bo, extra,
- obj->Length);
- intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset, obj->Length);
+ intel_obj->buffer, obj->Mappings[index].Offset,
+ intel_obj->range_map_bo[index], extra,
+ obj->Mappings[index].Length);
+ intel_bufferobj_mark_gpu_usage(intel_obj, obj->Mappings[index].Offset,
+ obj->Mappings[index].Length);
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
@@ -532,14 +543,14 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
*/
intel_batchbuffer_emit_mi_flush(brw);
- drm_intel_bo_unreference(intel_obj->range_map_bo);
- intel_obj->range_map_bo = NULL;
+ drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
+ intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
drm_intel_bo_unmap(intel_obj->buffer);
}
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return true;
}
diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.h b/src/mesa/drivers/dri/i965/intel_buffer_objects.h
index 3b6d8352702..2197707c8cc 100644
--- a/src/mesa/drivers/dri/i965/intel_buffer_objects.h
+++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.h
@@ -42,9 +42,8 @@ struct intel_buffer_object
struct gl_buffer_object Base;
drm_intel_bo *buffer; /* the low-level buffer manager's buffer handle */
- drm_intel_bo *range_map_bo;
- void *range_map_buffer;
- unsigned int range_map_offset;
+ drm_intel_bo *range_map_bo[MAP_COUNT];
+ void *range_map_buffer[MAP_COUNT];
/** @{
* Tracking for what range of the BO may currently be in use by the GPU.
diff --git a/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c b/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
index a9674ca2db3..49a9bed1921 100644
--- a/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
+++ b/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
@@ -79,7 +79,8 @@ static const GLubyte *map_pbo( struct gl_context *ctx,
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
GL_MAP_READ_BIT,
- unpack->BufferObj);
+ unpack->BufferObj,
+ MAP_INTERNAL);
if (!buf) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
return NULL;
@@ -317,7 +318,7 @@ out:
if (_mesa_is_bufferobj(unpack->BufferObj)) {
/* done with PBO so unmap it now */
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
intel_check_front_buffer_rendering(brw);