summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2014-02-06 19:24:23 +0100
committerMarek Olšák <[email protected]>2014-02-25 16:07:33 +0100
commitdca350201e00c7cf1cfb009158f4abf27fbc96d2 (patch)
tree8e331dfaa4ab2b623ee559f14fb1f678be6ba5df /src/mesa/drivers/dri
parent86e68b0f1f7f5ff58b38653978acaa736ae3d01c (diff)
mesa: allow buffers to be mapped multiple times
OpenGL allows a buffer to be mapped only once, but we also map buffers internally, e.g. in the software primitive restart fallback, for PBOs, vbo_get_minmax_index, etc. This has always been a problem, but it will be a bigger problem with persistent buffer mappings, which will prevent all Mesa functions from mapping buffers for internal purposes. This adds a driver interface to core Mesa which supports multiple buffer mappings and allows 2 mappings: one for the GL user and one for Mesa. Note that Gallium supports an unlimited number of buffer and texture mappings, so it's not really an issue for Gallium. v2: fix unmapping in xm_dd.c, remove the GL errors there v3: fix the intel driver (by Fredrik) Reviewed-by: Fredrik Höglund <[email protected]>
Diffstat (limited to 'src/mesa/drivers/dri')
-rw-r--r--src/mesa/drivers/dri/i915/intel_buffer_objects.c110
-rw-r--r--src/mesa/drivers/dri/i915/intel_buffer_objects.h6
-rw-r--r--src/mesa/drivers/dri/i915/intel_pixel_bitmap.c5
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw_upload.c5
-rw-r--r--src/mesa/drivers/dri/i965/intel_buffer_objects.c97
-rw-r--r--src/mesa/drivers/dri/i965/intel_buffer_objects.h5
-rw-r--r--src/mesa/drivers/dri/i965/intel_pixel_bitmap.c5
-rw-r--r--src/mesa/drivers/dri/nouveau/nouveau_bufferobj.c28
-rw-r--r--src/mesa/drivers/dri/radeon/radeon_buffer_objects.c31
9 files changed, 160 insertions, 132 deletions
diff --git a/src/mesa/drivers/dri/i915/intel_buffer_objects.c b/src/mesa/drivers/dri/i915/intel_buffer_objects.c
index f4fb9998cde..b93dd20ed01 100644
--- a/src/mesa/drivers/dri/i915/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/i915/intel_buffer_objects.c
@@ -40,7 +40,8 @@
#include "intel_regions.h"
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
static void
@@ -93,8 +94,7 @@ intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
* (though it does if you call glDeleteBuffers)
*/
- if (obj->Pointer)
- intel_bufferobj_unmap(ctx, obj);
+ _mesa_buffer_unmap_all_mappings(ctx, obj);
free(intel_obj->sys_buffer);
@@ -127,7 +127,8 @@ intel_bufferobj_data(struct gl_context * ctx,
intel_obj->Base.Usage = usage;
intel_obj->Base.StorageFlags = storageFlags;
- assert(!obj->Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_INTERNAL].Pointer);
if (intel_obj->buffer != NULL)
release_buffer(intel_obj);
@@ -272,7 +273,8 @@ intel_bufferobj_get_subdata(struct gl_context * ctx,
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
- GLbitfield access, struct gl_buffer_object *obj)
+ GLbitfield access, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
@@ -282,9 +284,9 @@ intel_bufferobj_map_range(struct gl_context * ctx,
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
if (intel_obj->sys_buffer) {
const bool read_only =
@@ -294,8 +296,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
release_buffer(intel_obj);
if (!intel_obj->buffer || intel_obj->source) {
- obj->Pointer = intel_obj->sys_buffer + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = intel_obj->sys_buffer + offset;
+ return obj->Mappings[index].Pointer;
}
free(intel_obj->sys_buffer);
@@ -303,7 +305,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
}
if (intel_obj->buffer == NULL) {
- obj->Pointer = NULL;
+ obj->Mappings[index].Pointer = NULL;
return NULL;
}
@@ -346,23 +348,25 @@ intel_bufferobj_map_range(struct gl_context * ctx,
const unsigned extra = (uintptr_t) offset % alignment;
if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
- intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
- alignment);
- obj->Pointer = intel_obj->range_map_buffer + extra;
+ intel_obj->range_map_buffer[index] =
+ _mesa_align_malloc(length + extra, alignment);
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_buffer[index] + extra;
} else {
- intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
- "range map",
- length + extra,
- alignment);
+ intel_obj->range_map_bo[index] = drm_intel_bo_alloc(intel->bufmgr,
+ "range map",
+ length + extra,
+ alignment);
if (!(access & GL_MAP_READ_BIT)) {
- drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
} else {
- drm_intel_bo_map(intel_obj->range_map_bo,
+ drm_intel_bo_map(intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0);
}
- obj->Pointer = intel_obj->range_map_bo->virtual + extra;
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_bo[index]->virtual + extra;
}
- return obj->Pointer;
+ return obj->Mappings[index].Pointer;
}
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
@@ -373,8 +377,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
}
- obj->Pointer = intel_obj->buffer->virtual + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Mappings[index].Pointer;
}
/* Ideally we'd use a BO to avoid taking up cache space for the temporary
@@ -385,7 +389,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
static void
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
@@ -394,7 +399,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
/* Unless we're in the range map using a temporary system buffer,
* there's no work to do.
*/
- if (intel_obj->range_map_buffer == NULL)
+ if (intel_obj->range_map_buffer[index] == NULL)
return;
if (length == 0)
@@ -406,10 +411,11 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
* former points to the actual mapping while the latter may be offset to
* meet alignment guarantees.
*/
- drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
+ drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer);
intel_emit_linear_blit(intel,
- intel_obj->buffer, obj->Offset + offset,
+ intel_obj->buffer,
+ obj->Mappings[index].Offset + offset,
temp_bo, 0,
length);
@@ -421,33 +427,35 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
* Called via glUnmapBuffer().
*/
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
- assert(obj->Pointer);
+ assert(obj->Mappings[index].Pointer);
if (intel_obj->sys_buffer != NULL) {
/* always keep the mapping around. */
- } else if (intel_obj->range_map_buffer != NULL) {
+ } else if (intel_obj->range_map_buffer[index] != NULL) {
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
intel_batchbuffer_emit_mi_flush(intel);
- _mesa_align_free(intel_obj->range_map_buffer);
- intel_obj->range_map_buffer = NULL;
- } else if (intel_obj->range_map_bo != NULL) {
- const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
+ _mesa_align_free(intel_obj->range_map_buffer[index]);
+ intel_obj->range_map_buffer[index] = NULL;
+ } else if (intel_obj->range_map_bo[index] != NULL) {
+ const unsigned extra = obj->Mappings[index].Pointer -
+ intel_obj->range_map_bo[index]->virtual;
- drm_intel_bo_unmap(intel_obj->range_map_bo);
+ drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
intel_emit_linear_blit(intel,
- intel_obj->buffer, obj->Offset,
- intel_obj->range_map_bo, extra,
- obj->Length);
+ intel_obj->buffer, obj->Mappings[index].Offset,
+ intel_obj->range_map_bo[index], extra,
+ obj->Mappings[index].Length);
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
@@ -456,14 +464,14 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
*/
intel_batchbuffer_emit_mi_flush(intel);
- drm_intel_bo_unreference(intel_obj->range_map_bo);
- intel_obj->range_map_bo = NULL;
+ drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
+ intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
drm_intel_bo_unmap(intel_obj->buffer);
}
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return true;
}
@@ -607,22 +615,24 @@ intel_bufferobj_copy_subdata(struct gl_context *ctx,
char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
GL_MAP_READ_BIT |
GL_MAP_WRITE_BIT,
- dst);
+ dst, MAP_INTERNAL);
memmove(ptr + write_offset, ptr + read_offset, size);
- intel_bufferobj_unmap(ctx, dst);
+ intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
} else {
const char *src_ptr;
char *dst_ptr;
src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
- GL_MAP_READ_BIT, src);
+ GL_MAP_READ_BIT, src,
+ MAP_INTERNAL);
dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
- GL_MAP_WRITE_BIT, dst);
+ GL_MAP_WRITE_BIT, dst,
+ MAP_INTERNAL);
memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
- intel_bufferobj_unmap(ctx, src);
- intel_bufferobj_unmap(ctx, dst);
+ intel_bufferobj_unmap(ctx, src, MAP_INTERNAL);
+ intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
}
return;
}
diff --git a/src/mesa/drivers/dri/i915/intel_buffer_objects.h b/src/mesa/drivers/dri/i915/intel_buffer_objects.h
index d2671f98c45..9ebfe4844e7 100644
--- a/src/mesa/drivers/dri/i915/intel_buffer_objects.h
+++ b/src/mesa/drivers/dri/i915/intel_buffer_objects.h
@@ -46,10 +46,8 @@ struct intel_buffer_object
/** System memory buffer data, if not using a BO to store the data. */
void *sys_buffer;
- drm_intel_bo *range_map_bo;
- void *range_map_buffer;
- unsigned int range_map_offset;
- GLsizei range_map_size;
+ drm_intel_bo *range_map_bo[MAP_COUNT];
+ void *range_map_buffer[MAP_COUNT];
bool source;
};
diff --git a/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c b/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c
index f4a2293e6da..ce8ea4c07b3 100644
--- a/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c
+++ b/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c
@@ -78,7 +78,8 @@ static const GLubyte *map_pbo( struct gl_context *ctx,
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
GL_MAP_READ_BIT,
- unpack->BufferObj);
+ unpack->BufferObj,
+ MAP_INTERNAL);
if (!buf) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
return NULL;
@@ -311,7 +312,7 @@ out:
if (_mesa_is_bufferobj(unpack->BufferObj)) {
/* done with PBO so unmap it now */
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
intel_check_front_buffer_rendering(intel);
diff --git a/src/mesa/drivers/dri/i965/brw_draw_upload.c b/src/mesa/drivers/dri/i965/brw_draw_upload.c
index cbaf67b4367..d42c074cd31 100644
--- a/src/mesa/drivers/dri/i965/brw_draw_upload.c
+++ b/src/mesa/drivers/dri/i965/brw_draw_upload.c
@@ -861,12 +861,13 @@ static void brw_upload_indices(struct brw_context *brw)
offset,
ib_size,
GL_MAP_READ_BIT,
- bufferobj);
+ bufferobj,
+ MAP_INTERNAL);
intel_upload_data(brw, map, ib_size, ib_type_size, &bo, &offset);
brw->ib.start_vertex_offset = offset / ib_type_size;
- ctx->Driver.UnmapBuffer(ctx, bufferobj);
+ ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
} else {
/* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
* the index buffer state when we're just moving the start index
diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.c b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
index c92ca2fcafe..bd7e88d8762 100644
--- a/src/mesa/drivers/dri/i965/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.c
@@ -82,7 +82,8 @@ brw_bo_map_gtt(struct brw_context *brw, drm_intel_bo *bo, const char *bo_name)
}
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
static void
intel_bufferobj_mark_gpu_usage(struct intel_buffer_object *intel_obj,
@@ -159,8 +160,7 @@ intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
* (though it does if you call glDeleteBuffers)
*/
- if (obj->Pointer)
- intel_bufferobj_unmap(ctx, obj);
+ _mesa_buffer_unmap_all_mappings(ctx, obj);
drm_intel_bo_unreference(intel_obj->buffer);
free(intel_obj);
@@ -197,7 +197,8 @@ intel_bufferobj_data(struct gl_context * ctx,
intel_obj->Base.Usage = usage;
intel_obj->Base.StorageFlags = storageFlags;
- assert(!obj->Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_INTERNAL].Pointer);
if (intel_obj->buffer != NULL)
release_buffer(intel_obj);
@@ -351,7 +352,8 @@ intel_bufferobj_get_subdata(struct gl_context * ctx,
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
- GLbitfield access, struct gl_buffer_object *obj)
+ GLbitfield access, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
@@ -361,12 +363,12 @@ intel_bufferobj_map_range(struct gl_context * ctx,
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
if (intel_obj->buffer == NULL) {
- obj->Pointer = NULL;
+ obj->Mappings[index].Pointer = NULL;
return NULL;
}
@@ -410,23 +412,25 @@ intel_bufferobj_map_range(struct gl_context * ctx,
const unsigned extra = (uintptr_t) offset % alignment;
if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
- intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
- alignment);
- obj->Pointer = intel_obj->range_map_buffer + extra;
+ intel_obj->range_map_buffer[index] = _mesa_align_malloc(length + extra,
+ alignment);
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_buffer[index] + extra;
} else {
- intel_obj->range_map_bo = drm_intel_bo_alloc(brw->bufmgr,
- "range map",
- length + extra,
- alignment);
+ intel_obj->range_map_bo[index] = drm_intel_bo_alloc(brw->bufmgr,
+ "range map",
+ length + extra,
+ alignment);
if (!(access & GL_MAP_READ_BIT)) {
- drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
} else {
- drm_intel_bo_map(intel_obj->range_map_bo,
+ drm_intel_bo_map(intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0);
}
- obj->Pointer = intel_obj->range_map_bo->virtual + extra;
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_bo[index]->virtual + extra;
}
- return obj->Pointer;
+ return obj->Mappings[index].Pointer;
}
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
@@ -439,8 +443,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
intel_bufferobj_mark_inactive(intel_obj);
}
- obj->Pointer = intel_obj->buffer->virtual + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Mappings[index].Pointer;
}
/**
@@ -459,7 +463,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
static void
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
@@ -468,7 +473,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
/* Unless we're in the range map using a temporary system buffer,
* there's no work to do.
*/
- if (intel_obj->range_map_buffer == NULL)
+ if (intel_obj->range_map_buffer[index] == NULL)
return;
if (length == 0)
@@ -480,13 +485,16 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
* former points to the actual mapping while the latter may be offset to
* meet alignment guarantees.
*/
- drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
+ drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer);
intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset + offset,
+ intel_obj->buffer,
+ obj->Mappings[index].Offset + offset,
temp_bo, 0,
length);
- intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset + offset, length);
+ intel_bufferobj_mark_gpu_usage(intel_obj,
+ obj->Mappings[index].Offset + offset,
+ length);
drm_intel_bo_unreference(temp_bo);
}
@@ -498,32 +506,35 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
* Implements glUnmapBuffer().
*/
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
- assert(obj->Pointer);
- if (intel_obj->range_map_buffer != NULL) {
+ assert(obj->Mappings[index].Pointer);
+ if (intel_obj->range_map_buffer[index] != NULL) {
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
intel_batchbuffer_emit_mi_flush(brw);
- _mesa_align_free(intel_obj->range_map_buffer);
- intel_obj->range_map_buffer = NULL;
+ _mesa_align_free(intel_obj->range_map_buffer[index]);
+ intel_obj->range_map_buffer[index] = NULL;
} else if (intel_obj->range_map_bo != NULL) {
- const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
+ const unsigned extra = obj->Mappings[index].Pointer -
+ intel_obj->range_map_bo[index]->virtual;
- drm_intel_bo_unmap(intel_obj->range_map_bo);
+ drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset,
- intel_obj->range_map_bo, extra,
- obj->Length);
- intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset, obj->Length);
+ intel_obj->buffer, obj->Mappings[index].Offset,
+ intel_obj->range_map_bo[index], extra,
+ obj->Mappings[index].Length);
+ intel_bufferobj_mark_gpu_usage(intel_obj, obj->Mappings[index].Offset,
+ obj->Mappings[index].Length);
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
@@ -532,14 +543,14 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
*/
intel_batchbuffer_emit_mi_flush(brw);
- drm_intel_bo_unreference(intel_obj->range_map_bo);
- intel_obj->range_map_bo = NULL;
+ drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
+ intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
drm_intel_bo_unmap(intel_obj->buffer);
}
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return true;
}
diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.h b/src/mesa/drivers/dri/i965/intel_buffer_objects.h
index 3b6d8352702..2197707c8cc 100644
--- a/src/mesa/drivers/dri/i965/intel_buffer_objects.h
+++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.h
@@ -42,9 +42,8 @@ struct intel_buffer_object
struct gl_buffer_object Base;
drm_intel_bo *buffer; /* the low-level buffer manager's buffer handle */
- drm_intel_bo *range_map_bo;
- void *range_map_buffer;
- unsigned int range_map_offset;
+ drm_intel_bo *range_map_bo[MAP_COUNT];
+ void *range_map_buffer[MAP_COUNT];
/** @{
* Tracking for what range of the BO may currently be in use by the GPU.
diff --git a/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c b/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
index a9674ca2db3..49a9bed1921 100644
--- a/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
+++ b/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
@@ -79,7 +79,8 @@ static const GLubyte *map_pbo( struct gl_context *ctx,
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
GL_MAP_READ_BIT,
- unpack->BufferObj);
+ unpack->BufferObj,
+ MAP_INTERNAL);
if (!buf) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
return NULL;
@@ -317,7 +318,7 @@ out:
if (_mesa_is_bufferobj(unpack->BufferObj)) {
/* done with PBO so unmap it now */
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
intel_check_front_buffer_rendering(brw);
diff --git a/src/mesa/drivers/dri/nouveau/nouveau_bufferobj.c b/src/mesa/drivers/dri/nouveau/nouveau_bufferobj.c
index 783a152955f..9a3d41739b9 100644
--- a/src/mesa/drivers/dri/nouveau/nouveau_bufferobj.c
+++ b/src/mesa/drivers/dri/nouveau/nouveau_bufferobj.c
@@ -127,12 +127,13 @@ nouveau_bufferobj_get_subdata(struct gl_context *ctx, GLintptrARB offset,
static void *
nouveau_bufferobj_map_range(struct gl_context *ctx, GLintptr offset,
GLsizeiptr length, GLbitfield access,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
unsigned flags = 0;
char *map;
- assert(!obj->Pointer);
+ assert(!obj->Mappings[index].Pointer);
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
if (access & GL_MAP_READ_BIT)
@@ -145,23 +146,24 @@ nouveau_bufferobj_map_range(struct gl_context *ctx, GLintptr offset,
if (!map)
return NULL;
- obj->Pointer = map + offset;
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Pointer = map + offset;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
- return obj->Pointer;
+ return obj->Mappings[index].Pointer;
}
static GLboolean
-nouveau_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj)
+nouveau_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
- assert(obj->Pointer);
+ assert(obj->Mappings[index].Pointer);
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
- obj->AccessFlags = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
+ obj->Mappings[index].AccessFlags = 0;
return GL_TRUE;
}
diff --git a/src/mesa/drivers/dri/radeon/radeon_buffer_objects.c b/src/mesa/drivers/dri/radeon/radeon_buffer_objects.c
index 7663178d63d..6294dbe1f98 100644
--- a/src/mesa/drivers/dri/radeon/radeon_buffer_objects.c
+++ b/src/mesa/drivers/dri/radeon/radeon_buffer_objects.c
@@ -60,9 +60,12 @@ radeonDeleteBufferObject(struct gl_context * ctx,
struct gl_buffer_object *obj)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
+ int i;
- if (obj->Pointer) {
- radeon_bo_unmap(radeon_obj->bo);
+ for (i = 0; i < MAP_COUNT; i++) {
+ if (obj->Mappings[i].Pointer) {
+ radeon_bo_unmap(radeon_obj->bo);
+ }
}
if (radeon_obj->bo) {
@@ -175,7 +178,8 @@ radeonGetBufferSubData(struct gl_context * ctx,
static void *
radeonMapBufferRange(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
- GLbitfield access, struct gl_buffer_object *obj)
+ GLbitfield access, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
const GLboolean write_only =
@@ -186,18 +190,18 @@ radeonMapBufferRange(struct gl_context * ctx,
}
if (radeon_obj->bo == NULL) {
- obj->Pointer = NULL;
+ obj->Mappings[index].Pointer = NULL;
return NULL;
}
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
radeon_bo_map(radeon_obj->bo, write_only);
- obj->Pointer = radeon_obj->bo->ptr + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = radeon_obj->bo->ptr + offset;
+ return obj->Mappings[index].Pointer;
}
@@ -206,7 +210,8 @@ radeonMapBufferRange(struct gl_context * ctx,
*/
static GLboolean
radeonUnmapBuffer(struct gl_context * ctx,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
@@ -214,9 +219,9 @@ radeonUnmapBuffer(struct gl_context * ctx,
radeon_bo_unmap(radeon_obj->bo);
}
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return GL_TRUE;
}