diff options
author | Marek Olšák <[email protected]> | 2014-02-06 19:24:23 +0100 |
---|---|---|
committer | Marek Olšák <[email protected]> | 2014-02-25 16:07:33 +0100 |
commit | dca350201e00c7cf1cfb009158f4abf27fbc96d2 (patch) | |
tree | 8e331dfaa4ab2b623ee559f14fb1f678be6ba5df /src/mesa/drivers/dri/i915 | |
parent | 86e68b0f1f7f5ff58b38653978acaa736ae3d01c (diff) |
mesa: allow buffers to be mapped multiple times
OpenGL allows a buffer to be mapped only once, but we also map buffers
internally, e.g. in the software primitive restart fallback, for PBOs,
vbo_get_minmax_index, etc. This has always been a problem, but it will
be a bigger problem with persistent buffer mappings, which will prevent
all Mesa functions from mapping buffers for internal purposes.
This adds a driver interface to core Mesa which supports multiple buffer
mappings and allows 2 mappings: one for the GL user and one for Mesa.
Note that Gallium supports an unlimited number of buffer and texture
mappings, so it's not really an issue for Gallium.
v2: fix unmapping in xm_dd.c, remove the GL errors there
v3: fix the intel driver (by Fredrik)
Reviewed-by: Fredrik Höglund <[email protected]>
Diffstat (limited to 'src/mesa/drivers/dri/i915')
-rw-r--r-- | src/mesa/drivers/dri/i915/intel_buffer_objects.c | 110 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i915/intel_buffer_objects.h | 6 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i915/intel_pixel_bitmap.c | 5 |
3 files changed, 65 insertions, 56 deletions
diff --git a/src/mesa/drivers/dri/i915/intel_buffer_objects.c b/src/mesa/drivers/dri/i915/intel_buffer_objects.c index f4fb9998cde..b93dd20ed01 100644 --- a/src/mesa/drivers/dri/i915/intel_buffer_objects.c +++ b/src/mesa/drivers/dri/i915/intel_buffer_objects.c @@ -40,7 +40,8 @@ #include "intel_regions.h" static GLboolean -intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj); +intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj, + gl_map_buffer_index index); /** Allocates a new drm_intel_bo to store the data for the buffer object. */ static void @@ -93,8 +94,7 @@ intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj) * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy * (though it does if you call glDeleteBuffers) */ - if (obj->Pointer) - intel_bufferobj_unmap(ctx, obj); + _mesa_buffer_unmap_all_mappings(ctx, obj); free(intel_obj->sys_buffer); @@ -127,7 +127,8 @@ intel_bufferobj_data(struct gl_context * ctx, intel_obj->Base.Usage = usage; intel_obj->Base.StorageFlags = storageFlags; - assert(!obj->Pointer); /* Mesa should have unmapped it */ + assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */ + assert(!obj->Mappings[MAP_INTERNAL].Pointer); if (intel_obj->buffer != NULL) release_buffer(intel_obj); @@ -272,7 +273,8 @@ intel_bufferobj_get_subdata(struct gl_context * ctx, static void * intel_bufferobj_map_range(struct gl_context * ctx, GLintptr offset, GLsizeiptr length, - GLbitfield access, struct gl_buffer_object *obj) + GLbitfield access, struct gl_buffer_object *obj, + gl_map_buffer_index index) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); @@ -282,9 +284,9 @@ intel_bufferobj_map_range(struct gl_context * ctx, /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also * internally uses our functions directly. */ - obj->Offset = offset; - obj->Length = length; - obj->AccessFlags = access; + obj->Mappings[index].Offset = offset; + obj->Mappings[index].Length = length; + obj->Mappings[index].AccessFlags = access; if (intel_obj->sys_buffer) { const bool read_only = @@ -294,8 +296,8 @@ intel_bufferobj_map_range(struct gl_context * ctx, release_buffer(intel_obj); if (!intel_obj->buffer || intel_obj->source) { - obj->Pointer = intel_obj->sys_buffer + offset; - return obj->Pointer; + obj->Mappings[index].Pointer = intel_obj->sys_buffer + offset; + return obj->Mappings[index].Pointer; } free(intel_obj->sys_buffer); @@ -303,7 +305,7 @@ intel_bufferobj_map_range(struct gl_context * ctx, } if (intel_obj->buffer == NULL) { - obj->Pointer = NULL; + obj->Mappings[index].Pointer = NULL; return NULL; } @@ -346,23 +348,25 @@ intel_bufferobj_map_range(struct gl_context * ctx, const unsigned extra = (uintptr_t) offset % alignment; if (access & GL_MAP_FLUSH_EXPLICIT_BIT) { - intel_obj->range_map_buffer = _mesa_align_malloc(length + extra, - alignment); - obj->Pointer = intel_obj->range_map_buffer + extra; + intel_obj->range_map_buffer[index] = + _mesa_align_malloc(length + extra, alignment); + obj->Mappings[index].Pointer = + intel_obj->range_map_buffer[index] + extra; } else { - intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr, - "range map", - length + extra, - alignment); + intel_obj->range_map_bo[index] = drm_intel_bo_alloc(intel->bufmgr, + "range map", + length + extra, + alignment); if (!(access & GL_MAP_READ_BIT)) { - drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo); + drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]); } else { - drm_intel_bo_map(intel_obj->range_map_bo, + drm_intel_bo_map(intel_obj->range_map_bo[index], (access & GL_MAP_WRITE_BIT) != 0); } - obj->Pointer = intel_obj->range_map_bo->virtual + extra; + obj->Mappings[index].Pointer = + intel_obj->range_map_bo[index]->virtual + extra; } - return obj->Pointer; + return obj->Mappings[index].Pointer; } if (access & GL_MAP_UNSYNCHRONIZED_BIT) @@ -373,8 +377,8 @@ intel_bufferobj_map_range(struct gl_context * ctx, drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0); } - obj->Pointer = intel_obj->buffer->virtual + offset; - return obj->Pointer; + obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset; + return obj->Mappings[index].Pointer; } /* Ideally we'd use a BO to avoid taking up cache space for the temporary @@ -385,7 +389,8 @@ intel_bufferobj_map_range(struct gl_context * ctx, static void intel_bufferobj_flush_mapped_range(struct gl_context *ctx, GLintptr offset, GLsizeiptr length, - struct gl_buffer_object *obj) + struct gl_buffer_object *obj, + gl_map_buffer_index index) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); @@ -394,7 +399,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx, /* Unless we're in the range map using a temporary system buffer, * there's no work to do. */ - if (intel_obj->range_map_buffer == NULL) + if (intel_obj->range_map_buffer[index] == NULL) return; if (length == 0) @@ -406,10 +411,11 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx, * former points to the actual mapping while the latter may be offset to * meet alignment guarantees. */ - drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer); + drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer); intel_emit_linear_blit(intel, - intel_obj->buffer, obj->Offset + offset, + intel_obj->buffer, + obj->Mappings[index].Offset + offset, temp_bo, 0, length); @@ -421,33 +427,35 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx, * Called via glUnmapBuffer(). */ static GLboolean -intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj) +intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj, + gl_map_buffer_index index) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); assert(intel_obj); - assert(obj->Pointer); + assert(obj->Mappings[index].Pointer); if (intel_obj->sys_buffer != NULL) { /* always keep the mapping around. */ - } else if (intel_obj->range_map_buffer != NULL) { + } else if (intel_obj->range_map_buffer[index] != NULL) { /* Since we've emitted some blits to buffers that will (likely) be used * in rendering operations in other cache domains in this batch, emit a * flush. Once again, we wish for a domain tracker in libdrm to cover * usage inside of a batchbuffer. */ intel_batchbuffer_emit_mi_flush(intel); - _mesa_align_free(intel_obj->range_map_buffer); - intel_obj->range_map_buffer = NULL; - } else if (intel_obj->range_map_bo != NULL) { - const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual; + _mesa_align_free(intel_obj->range_map_buffer[index]); + intel_obj->range_map_buffer[index] = NULL; + } else if (intel_obj->range_map_bo[index] != NULL) { + const unsigned extra = obj->Mappings[index].Pointer - + intel_obj->range_map_bo[index]->virtual; - drm_intel_bo_unmap(intel_obj->range_map_bo); + drm_intel_bo_unmap(intel_obj->range_map_bo[index]); intel_emit_linear_blit(intel, - intel_obj->buffer, obj->Offset, - intel_obj->range_map_bo, extra, - obj->Length); + intel_obj->buffer, obj->Mappings[index].Offset, + intel_obj->range_map_bo[index], extra, + obj->Mappings[index].Length); /* Since we've emitted some blits to buffers that will (likely) be used * in rendering operations in other cache domains in this batch, emit a @@ -456,14 +464,14 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj) */ intel_batchbuffer_emit_mi_flush(intel); - drm_intel_bo_unreference(intel_obj->range_map_bo); - intel_obj->range_map_bo = NULL; + drm_intel_bo_unreference(intel_obj->range_map_bo[index]); + intel_obj->range_map_bo[index] = NULL; } else if (intel_obj->buffer != NULL) { drm_intel_bo_unmap(intel_obj->buffer); } - obj->Pointer = NULL; - obj->Offset = 0; - obj->Length = 0; + obj->Mappings[index].Pointer = NULL; + obj->Mappings[index].Offset = 0; + obj->Mappings[index].Length = 0; return true; } @@ -607,22 +615,24 @@ intel_bufferobj_copy_subdata(struct gl_context *ctx, char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT, - dst); + dst, MAP_INTERNAL); memmove(ptr + write_offset, ptr + read_offset, size); - intel_bufferobj_unmap(ctx, dst); + intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL); } else { const char *src_ptr; char *dst_ptr; src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size, - GL_MAP_READ_BIT, src); + GL_MAP_READ_BIT, src, + MAP_INTERNAL); dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size, - GL_MAP_WRITE_BIT, dst); + GL_MAP_WRITE_BIT, dst, + MAP_INTERNAL); memcpy(dst_ptr + write_offset, src_ptr + read_offset, size); - intel_bufferobj_unmap(ctx, src); - intel_bufferobj_unmap(ctx, dst); + intel_bufferobj_unmap(ctx, src, MAP_INTERNAL); + intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL); } return; } diff --git a/src/mesa/drivers/dri/i915/intel_buffer_objects.h b/src/mesa/drivers/dri/i915/intel_buffer_objects.h index d2671f98c45..9ebfe4844e7 100644 --- a/src/mesa/drivers/dri/i915/intel_buffer_objects.h +++ b/src/mesa/drivers/dri/i915/intel_buffer_objects.h @@ -46,10 +46,8 @@ struct intel_buffer_object /** System memory buffer data, if not using a BO to store the data. */ void *sys_buffer; - drm_intel_bo *range_map_bo; - void *range_map_buffer; - unsigned int range_map_offset; - GLsizei range_map_size; + drm_intel_bo *range_map_bo[MAP_COUNT]; + void *range_map_buffer[MAP_COUNT]; bool source; }; diff --git a/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c b/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c index f4a2293e6da..ce8ea4c07b3 100644 --- a/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c +++ b/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c @@ -78,7 +78,8 @@ static const GLubyte *map_pbo( struct gl_context *ctx, buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size, GL_MAP_READ_BIT, - unpack->BufferObj); + unpack->BufferObj, + MAP_INTERNAL); if (!buf) { _mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)"); return NULL; @@ -311,7 +312,7 @@ out: if (_mesa_is_bufferobj(unpack->BufferObj)) { /* done with PBO so unmap it now */ - ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj); + ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL); } intel_check_front_buffer_rendering(intel); |