diff options
Diffstat (limited to 'src/gallium/auxiliary')
20 files changed, 855 insertions, 517 deletions
diff --git a/src/gallium/auxiliary/Makefile b/src/gallium/auxiliary/Makefile index 8f937e3b4e9..9b6babce33f 100644 --- a/src/gallium/auxiliary/Makefile +++ b/src/gallium/auxiliary/Makefile @@ -48,12 +48,10 @@ C_SOURCES = \ draw/draw_vs_sse.c \ indices/u_indices_gen.c \ indices/u_unfilled_gen.c \ - pipebuffer/pb_buffer_fenced.c \ pipebuffer/pb_buffer_malloc.c \ pipebuffer/pb_bufmgr_alt.c \ pipebuffer/pb_bufmgr_cache.c \ pipebuffer/pb_bufmgr_debug.c \ - pipebuffer/pb_bufmgr_fenced.c \ pipebuffer/pb_bufmgr_mm.c \ pipebuffer/pb_bufmgr_ondemand.c \ pipebuffer/pb_bufmgr_pool.c \ diff --git a/src/gallium/auxiliary/draw/draw_context.c b/src/gallium/auxiliary/draw/draw_context.c index e90dfc5aec4..fb1bc05af4a 100644 --- a/src/gallium/auxiliary/draw/draw_context.c +++ b/src/gallium/auxiliary/draw/draw_context.c @@ -34,7 +34,6 @@ #include "util/u_memory.h" #include "util/u_math.h" #include "draw_context.h" -#include "draw_vbuf.h" #include "draw_vs.h" #include "draw_gs.h" #include "draw_pt.h" diff --git a/src/gallium/auxiliary/draw/draw_pipe.c b/src/gallium/auxiliary/draw/draw_pipe.c index 1c6d657297c..11d6485dcf0 100644 --- a/src/gallium/auxiliary/draw/draw_pipe.c +++ b/src/gallium/auxiliary/draw/draw_pipe.c @@ -106,10 +106,9 @@ void draw_pipeline_destroy( struct draw_context *draw ) - - - - +/** + * Build primitive to render a point with vertex at v0. + */ static void do_point( struct draw_context *draw, const char *v0 ) { @@ -123,6 +122,10 @@ static void do_point( struct draw_context *draw, } +/** + * Build primitive to render a line with vertices at v0, v1. + * \param flags bitmask of DRAW_PIPE_EDGE_x, DRAW_PIPE_RESET_STIPPLE + */ static void do_line( struct draw_context *draw, ushort flags, const char *v0, @@ -139,6 +142,10 @@ static void do_line( struct draw_context *draw, } +/** + * Build primitive to render a triangle with vertices at v0, v1, v2. + * \param flags bitmask of DRAW_PIPE_EDGE_x, DRAW_PIPE_RESET_STIPPLE + */ static void do_triangle( struct draw_context *draw, ushort flags, char *v0, @@ -157,7 +164,10 @@ static void do_triangle( struct draw_context *draw, } - +/* + * Set up macros for draw_pt_decompose.h template code. + * This code uses vertex indexes / elements. + */ #define QUAD(i0,i1,i2,i3) \ do_triangle( draw, \ ( DRAW_PIPE_RESET_STIPPLE | \ @@ -175,16 +185,16 @@ static void do_triangle( struct draw_context *draw, #define TRIANGLE(flags,i0,i1,i2) \ do_triangle( draw, \ - elts[i0], /* flags */ \ + elts[i0], /* flags */ \ verts + stride * (elts[i0] & ~DRAW_PIPE_FLAG_MASK), \ - verts + stride * elts[i1], \ - verts + stride * elts[i2]) + verts + stride * (elts[i1] & ~DRAW_PIPE_FLAG_MASK), \ + verts + stride * (elts[i2] & ~DRAW_PIPE_FLAG_MASK) ); #define LINE(flags,i0,i1) \ do_line( draw, \ - elts[i0], \ + elts[i0], \ verts + stride * (elts[i0] & ~DRAW_PIPE_FLAG_MASK), \ - verts + stride * elts[i1]) + verts + stride * (elts[i1] & ~DRAW_PIPE_FLAG_MASK) ); #define POINT(i0) \ do_point( draw, \ @@ -213,7 +223,9 @@ static void do_triangle( struct draw_context *draw, -/* Code to run the pipeline on a fairly arbitary collection of vertices. +/** + * Code to run the pipeline on a fairly arbitary collection of vertices. + * For drawing indexed primitives. * * Vertex headers must be pre-initialized with the * UNDEFINED_VERTEX_ID, this code will cause that id to become @@ -243,6 +255,12 @@ void draw_pipeline_run( struct draw_context *draw, draw->pipeline.vertex_count = 0; } + + +/* + * Set up macros for draw_pt_decompose.h template code. + * This code is for non-indexed rendering (no elts). + */ #define QUAD(i0,i1,i2,i3) \ do_triangle( draw, \ ( DRAW_PIPE_RESET_STIPPLE | \ @@ -293,6 +311,10 @@ void draw_pipeline_run( struct draw_context *draw, #include "draw_pt_decompose.h" + +/* + * For drawing non-indexed primitives. + */ void draw_pipeline_run_linear( struct draw_context *draw, unsigned prim, struct vertex_header *vertices, diff --git a/src/gallium/auxiliary/draw/draw_pt.c b/src/gallium/auxiliary/draw/draw_pt.c index a5ddec52863..f5ed32d0b05 100644 --- a/src/gallium/auxiliary/draw/draw_pt.c +++ b/src/gallium/auxiliary/draw/draw_pt.c @@ -33,7 +33,6 @@ #include "draw/draw_context.h" #include "draw/draw_private.h" #include "draw/draw_pt.h" -#include "draw/draw_vs.h" #include "tgsi/tgsi_dump.h" #include "util/u_math.h" #include "util/u_prim.h" diff --git a/src/gallium/auxiliary/draw/draw_pt_fetch.c b/src/gallium/auxiliary/draw/draw_pt_fetch.c index 55e7a7b81ad..252be5053e4 100644 --- a/src/gallium/auxiliary/draw/draw_pt_fetch.c +++ b/src/gallium/auxiliary/draw/draw_pt_fetch.c @@ -30,7 +30,6 @@ #include "draw/draw_context.h" #include "draw/draw_private.h" #include "draw/draw_vbuf.h" -#include "draw/draw_vertex.h" #include "draw/draw_pt.h" #include "translate/translate.h" #include "translate/translate_cache.h" diff --git a/src/gallium/auxiliary/draw/draw_pt_fetch_shade_emit.c b/src/gallium/auxiliary/draw/draw_pt_fetch_shade_emit.c index 734c05f0688..c5dfbcfa3cb 100644 --- a/src/gallium/auxiliary/draw/draw_pt_fetch_shade_emit.c +++ b/src/gallium/auxiliary/draw/draw_pt_fetch_shade_emit.c @@ -40,7 +40,6 @@ #include "draw/draw_pt.h" #include "draw/draw_vs.h" -#include "translate/translate.h" struct fetch_shade_emit; diff --git a/src/gallium/auxiliary/draw/draw_pt_post_vs.c b/src/gallium/auxiliary/draw/draw_pt_post_vs.c index 55151823a14..9728d5c2bdf 100644 --- a/src/gallium/auxiliary/draw/draw_pt_post_vs.c +++ b/src/gallium/auxiliary/draw/draw_pt_post_vs.c @@ -30,7 +30,6 @@ #include "draw/draw_context.h" #include "draw/draw_private.h" #include "draw/draw_vbuf.h" -#include "draw/draw_vertex.h" #include "draw/draw_pt.h" struct pt_post_vs { diff --git a/src/gallium/auxiliary/draw/draw_vs_varient.c b/src/gallium/auxiliary/draw/draw_vs_varient.c index 9f40030f39f..b87a465f6be 100644 --- a/src/gallium/auxiliary/draw/draw_vs_varient.c +++ b/src/gallium/auxiliary/draw/draw_vs_varient.c @@ -38,7 +38,6 @@ #include "draw/draw_vertex.h" #include "draw/draw_vs.h" #include "translate/translate.h" -#include "translate/translate_cache.h" /* A first pass at incorporating vertex fetch/emit functionality into */ diff --git a/src/gallium/auxiliary/pipebuffer/Makefile b/src/gallium/auxiliary/pipebuffer/Makefile new file mode 100644 index 00000000000..21d25d24748 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/Makefile @@ -0,0 +1,18 @@ +TOP = ../../../.. +include $(TOP)/configs/current + +LIBNAME = pipebuffer + +C_SOURCES = \ + pb_buffer_fenced.c \ + pb_buffer_malloc.c \ + pb_bufmgr_alt.c \ + pb_bufmgr_cache.c \ + pb_bufmgr_debug.c \ + pb_bufmgr_mm.c \ + pb_bufmgr_ondemand.c \ + pb_bufmgr_pool.c \ + pb_bufmgr_slab.c \ + pb_validate.c + +include ../../Makefile.template diff --git a/src/gallium/auxiliary/pipebuffer/SConscript b/src/gallium/auxiliary/pipebuffer/SConscript new file mode 100644 index 00000000000..a074a554717 --- /dev/null +++ b/src/gallium/auxiliary/pipebuffer/SConscript @@ -0,0 +1,18 @@ +Import('*') + +pipebuffer = env.ConvenienceLibrary( + target = 'pipebuffer', + source = [ + 'pb_buffer_fenced.c', + 'pb_buffer_malloc.c', + 'pb_bufmgr_alt.c', + 'pb_bufmgr_cache.c', + 'pb_bufmgr_debug.c', + 'pb_bufmgr_mm.c', + 'pb_bufmgr_ondemand.c', + 'pb_bufmgr_pool.c', + 'pb_bufmgr_slab.c', + 'pb_validate.c', + ]) + +auxiliaries.insert(0, pipebuffer) diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c index a4b78f14943..1ac424d678d 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c @@ -1,6 +1,7 @@ /************************************************************************** * * Copyright 2007-2009 VMware, Inc. + * Copyright 2007-2010 VMware, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -28,9 +29,9 @@ /** * \file * Implementation of fenced buffers. - * - * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com> - * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com> + * + * \author Jose Fonseca <jfonseca-at-vmware-dot-com> + * \author Thomas Hellström <thellstrom-at-vmware-dot-com> */ @@ -50,6 +51,7 @@ #include "pb_buffer.h" #include "pb_buffer_fenced.h" +#include "pb_bufmgr.h" @@ -59,32 +61,79 @@ #define SUPER(__derived) (&(__derived)->base) -struct fenced_buffer_list +struct fenced_manager { - pipe_mutex mutex; - + struct pb_manager base; + struct pb_manager *provider; struct pb_fence_ops *ops; - - pb_size numDelayed; - struct list_head delayed; - -#ifdef DEBUG - pb_size numUnfenced; + + /** + * Maximum buffer size that can be safely allocated. + */ + pb_size max_buffer_size; + + /** + * Maximum cpu memory we can allocate before we start waiting for the + * GPU to idle. + */ + pb_size max_cpu_total_size; + + /** + * Following members are mutable and protected by this mutex. + */ + pipe_mutex mutex; + + /** + * Fenced buffer list. + * + * All fenced buffers are placed in this listed, ordered from the oldest + * fence to the newest fence. + */ + struct list_head fenced; + pb_size num_fenced; + struct list_head unfenced; -#endif + pb_size num_unfenced; + + /** + * How much temporary CPU memory is being used to hold unvalidated buffers. + */ + pb_size cpu_total_size; }; /** + * Fenced buffer. + * * Wrapper around a pipe buffer which adds fencing and reference counting. */ struct fenced_buffer { + /* + * Immutable members. + */ + struct pb_buffer base; - + struct fenced_manager *mgr; + + /* + * Following members are mutable and protected by fenced_manager::mutex. + */ + + struct list_head head; + + /** + * Buffer with storage. + */ struct pb_buffer *buffer; + pb_size size; + struct pb_desc desc; - /* FIXME: protect access with mutex */ + /** + * Temporary CPU storage data. Used when there isn't enough GPU memory to + * store the buffer. + */ + void *data; /** * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current @@ -93,15 +142,22 @@ struct fenced_buffer unsigned flags; unsigned mapcount; + struct pb_validate *vl; unsigned validation_flags; - struct pipe_fence_handle *fence; - struct list_head head; - struct fenced_buffer_list *list; + struct pipe_fence_handle *fence; }; +static INLINE struct fenced_manager * +fenced_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct fenced_manager *)mgr; +} + + static INLINE struct fenced_buffer * fenced_buffer(struct pb_buffer *buf) { @@ -110,221 +166,569 @@ fenced_buffer(struct pb_buffer *buf) } -static INLINE void -_fenced_buffer_add(struct fenced_buffer *fenced_buf) -{ - struct fenced_buffer_list *fenced_list = fenced_buf->list; +static void +fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf); - assert(pipe_is_referenced(&fenced_buf->base.base.reference)); - assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE); - assert(fenced_buf->fence); +static enum pipe_error +fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf); +static void +fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf); + +static enum pipe_error +fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf, + boolean wait); + +static enum pipe_error +fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf); + +static enum pipe_error +fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf); + + +/** + * Dump the fenced buffer list. + * + * Useful to understand failures to allocate buffers. + */ +static void +fenced_manager_dump_locked(struct fenced_manager *fenced_mgr) +{ #ifdef DEBUG - LIST_DEL(&fenced_buf->head); - assert(fenced_list->numUnfenced); - --fenced_list->numUnfenced; + struct pb_fence_ops *ops = fenced_mgr->ops; + struct list_head *curr, *next; + struct fenced_buffer *fenced_buf; + + debug_printf("%10s %7s %8s %7s %10s %s\n", + "buffer", "size", "refcount", "storage", "fence", "signalled"); + + curr = fenced_mgr->unfenced.next; + next = curr->next; + while(curr != &fenced_mgr->unfenced) { + fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); + assert(!fenced_buf->fence); + debug_printf("%10p %7u %8u %7s\n", + (void *) fenced_buf, + fenced_buf->base.base.size, + p_atomic_read(&fenced_buf->base.base.reference.count), + fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none")); + curr = next; + next = curr->next; + } + + curr = fenced_mgr->fenced.next; + next = curr->next; + while(curr != &fenced_mgr->fenced) { + int signaled; + fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); + assert(fenced_buf->buffer); + signaled = ops->fence_signalled(ops, fenced_buf->fence, 0); + debug_printf("%10p %7u %8u %7s %10p %s\n", + (void *) fenced_buf, + fenced_buf->base.base.size, + p_atomic_read(&fenced_buf->base.base.reference.count), + "gpu", + (void *) fenced_buf->fence, + signaled == 0 ? "y" : "n"); + curr = next; + next = curr->next; + } +#else + (void)fenced_mgr; #endif - LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed); - ++fenced_list->numDelayed; } -/** - * Actually destroy the buffer. - */ static INLINE void -_fenced_buffer_destroy(struct fenced_buffer *fenced_buf) +fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) { - struct fenced_buffer_list *fenced_list = fenced_buf->list; - assert(!pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(!fenced_buf->fence); -#ifdef DEBUG assert(fenced_buf->head.prev); assert(fenced_buf->head.next); LIST_DEL(&fenced_buf->head); - assert(fenced_list->numUnfenced); - --fenced_list->numUnfenced; -#else - (void)fenced_list; -#endif - pb_reference(&fenced_buf->buffer, NULL); + assert(fenced_mgr->num_unfenced); + --fenced_mgr->num_unfenced; + + fenced_buffer_destroy_gpu_storage_locked(fenced_buf); + fenced_buffer_destroy_cpu_storage_locked(fenced_buf); + FREE(fenced_buf); } +/** + * Add the buffer to the fenced list. + * + * Reference count should be incremented before calling this function. + */ static INLINE void -_fenced_buffer_remove(struct fenced_buffer_list *fenced_list, - struct fenced_buffer *fenced_buf) +fenced_buffer_add_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) +{ + assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE); + assert(fenced_buf->fence); + + p_atomic_inc(&fenced_buf->base.base.reference.count); + + LIST_DEL(&fenced_buf->head); + assert(fenced_mgr->num_unfenced); + --fenced_mgr->num_unfenced; + LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced); + ++fenced_mgr->num_fenced; +} + + +/** + * Remove the buffer from the fenced list, and potentially destroy the buffer + * if the reference count reaches zero. + * + * Returns TRUE if the buffer was detroyed. + */ +static INLINE boolean +fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) { - struct pb_fence_ops *ops = fenced_list->ops; + struct pb_fence_ops *ops = fenced_mgr->ops; assert(fenced_buf->fence); - assert(fenced_buf->list == fenced_list); - + assert(fenced_buf->mgr == fenced_mgr); + ops->fence_reference(ops, &fenced_buf->fence, NULL); fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE; - + assert(fenced_buf->head.prev); assert(fenced_buf->head.next); - + LIST_DEL(&fenced_buf->head); - assert(fenced_list->numDelayed); - --fenced_list->numDelayed; - -#ifdef DEBUG - LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced); - ++fenced_list->numUnfenced; -#endif - - /** - * FIXME!!! - */ + assert(fenced_mgr->num_fenced); + --fenced_mgr->num_fenced; + + LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); + ++fenced_mgr->num_unfenced; + + if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) { + fenced_buffer_destroy_locked(fenced_mgr, fenced_buf); + return TRUE; + } - if(!pipe_is_referenced(&fenced_buf->base.base.reference)) - _fenced_buffer_destroy(fenced_buf); + return FALSE; } +/** + * Wait for the fence to expire, and remove it from the fenced list. + * + * This function will release and re-aquire the mutex, so any copy of mutable + * state must be discarded after calling it. + */ static INLINE enum pipe_error -_fenced_buffer_finish(struct fenced_buffer *fenced_buf) +fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) { - struct fenced_buffer_list *fenced_list = fenced_buf->list; - struct pb_fence_ops *ops = fenced_list->ops; + struct pb_fence_ops *ops = fenced_mgr->ops; + enum pipe_error ret = PIPE_ERROR; #if 0 debug_warning("waiting for GPU"); #endif + assert(pipe_is_referenced(&fenced_buf->base.base.reference)); assert(fenced_buf->fence); + if(fenced_buf->fence) { - if(ops->fence_finish(ops, fenced_buf->fence, 0) != 0) { - return PIPE_ERROR; + struct pipe_fence_handle *fence = NULL; + int finished; + boolean proceed; + + ops->fence_reference(ops, &fence, fenced_buf->fence); + + pipe_mutex_unlock(fenced_mgr->mutex); + + finished = ops->fence_finish(ops, fenced_buf->fence, 0); + + pipe_mutex_lock(fenced_mgr->mutex); + + assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + + /* + * Only proceed if the fence object didn't change in the meanwhile. + * Otherwise assume the work has been already carried out by another + * thread that re-aquired the lock before us. + */ + proceed = fence == fenced_buf->fence ? TRUE : FALSE; + + ops->fence_reference(ops, &fence, NULL); + + if(proceed && finished == 0) { + /* + * Remove from the fenced list + */ + + boolean destroyed; + + destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); + + /* TODO: remove consequents buffers with the same fence? */ + + assert(!destroyed); + + fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE; + + ret = PIPE_OK; } - /* Remove from the fenced list */ - /* TODO: remove consequents */ - _fenced_buffer_remove(fenced_list, fenced_buf); } - fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE; - return PIPE_OK; + return ret; } /** - * Free as many fenced buffers from the list head as possible. + * Remove as many fenced buffers from the fenced list as possible. + * + * Returns TRUE if at least one buffer was removed. */ -static void -_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list, - int wait) +static boolean +fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr, + boolean wait) { - struct pb_fence_ops *ops = fenced_list->ops; + struct pb_fence_ops *ops = fenced_mgr->ops; struct list_head *curr, *next; struct fenced_buffer *fenced_buf; struct pb_buffer *pb_buf; struct pipe_fence_handle *prev_fence = NULL; + boolean ret = FALSE; - curr = fenced_list->delayed.next; + curr = fenced_mgr->fenced.next; next = curr->next; - while(curr != &fenced_list->delayed) { + while(curr != &fenced_mgr->fenced) { fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); if(fenced_buf->fence != prev_fence) { int signaled; - if (wait) + + if (wait) { signaled = ops->fence_finish(ops, fenced_buf->fence, 0); - else + + /* + * Don't return just now. Instead preemptively check if the + * following buffers' fences already expired, without further waits. + */ + wait = FALSE; + } + else { signaled = ops->fence_signalled(ops, fenced_buf->fence, 0); - if (signaled != 0) - break; + } + + if (signaled != 0) { + return ret; + } + prev_fence = fenced_buf->fence; } else { + /* This buffer's fence object is identical to the previous buffer's + * fence object, so no need to check the fence again. + */ assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0); } - _fenced_buffer_remove(fenced_list, fenced_buf); + fenced_buffer_remove_locked(fenced_mgr, fenced_buf); + + ret = TRUE; + + curr = next; + next = curr->next; + } + + return ret; +} + + +/** + * Try to free some GPU memory by backing it up into CPU memory. + * + * Returns TRUE if at least one buffer was freed. + */ +static boolean +fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr) +{ + struct list_head *curr, *next; + struct fenced_buffer *fenced_buf; - curr = next; + curr = fenced_mgr->unfenced.next; + next = curr->next; + while(curr != &fenced_mgr->unfenced) { + fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); + + /* + * We can only move storage if the buffer is not mapped and not + * validated. + */ + if(fenced_buf->buffer && + !fenced_buf->mapcount && + !fenced_buf->vl) { + enum pipe_error ret; + + ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf); + if(ret == PIPE_OK) { + ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf); + if(ret == PIPE_OK) { + fenced_buffer_destroy_gpu_storage_locked(fenced_buf); + return TRUE; + } + fenced_buffer_destroy_cpu_storage_locked(fenced_buf); + } + } + + curr = next; next = curr->next; } + + return FALSE; } +/** + * Destroy CPU storage for this buffer. + */ static void -fenced_buffer_destroy(struct pb_buffer *buf) +fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf) { - struct fenced_buffer *fenced_buf = fenced_buffer(buf); - struct fenced_buffer_list *fenced_list = fenced_buf->list; + if(fenced_buf->data) { + align_free(fenced_buf->data); + fenced_buf->data = NULL; + assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size); + fenced_buf->mgr->cpu_total_size -= fenced_buf->size; + } +} - pipe_mutex_lock(fenced_list->mutex); - assert(!pipe_is_referenced(&fenced_buf->base.base.reference)); - if (fenced_buf->fence) { - struct pb_fence_ops *ops = fenced_list->ops; - if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) { - struct list_head *curr, *prev; - curr = &fenced_buf->head; - prev = curr->prev; - do { - fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); - assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0); - _fenced_buffer_remove(fenced_list, fenced_buf); - curr = prev; - prev = curr->prev; - } while (curr != &fenced_list->delayed); - } - else { - /* delay destruction */ + +/** + * Create CPU storage for this buffer. + */ +static enum pipe_error +fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) +{ + assert(!fenced_buf->data); + if(fenced_buf->data) + return PIPE_OK; + + if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size) + return PIPE_ERROR_OUT_OF_MEMORY; + + fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment); + if(!fenced_buf->data) + return PIPE_ERROR_OUT_OF_MEMORY; + + fenced_mgr->cpu_total_size += fenced_buf->size; + + return PIPE_OK; +} + + +/** + * Destroy the GPU storage. + */ +static void +fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf) +{ + if(fenced_buf->buffer) { + pb_reference(&fenced_buf->buffer, NULL); + } +} + + +/** + * Try to create GPU storage for this buffer. + * + * This function is a shorthand around pb_manager::create_buffer for + * fenced_buffer_create_gpu_storage_locked()'s benefit. + */ +static INLINE boolean +fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf) +{ + struct pb_manager *provider = fenced_mgr->provider; + + assert(!fenced_buf->buffer); + + fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider, + fenced_buf->size, + &fenced_buf->desc); + return fenced_buf->buffer ? TRUE : FALSE; +} + + +/** + * Create GPU storage for this buffer. + */ +static enum pipe_error +fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr, + struct fenced_buffer *fenced_buf, + boolean wait) +{ + assert(!fenced_buf->buffer); + + /* + * Check for signaled buffers before trying to allocate. + */ + fenced_manager_check_signalled_locked(fenced_mgr, FALSE); + + fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf); + + /* + * Keep trying while there is some sort of progress: + * - fences are expiring, + * - or buffers are being being swapped out from GPU memory into CPU memory. + */ + while(!fenced_buf->buffer && + (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) || + fenced_manager_free_gpu_storage_locked(fenced_mgr))) { + fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf); + } + + if(!fenced_buf->buffer && wait) { + /* + * Same as before, but this time around, wait to free buffers if + * necessary. + */ + while(!fenced_buf->buffer && + (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) || + fenced_manager_free_gpu_storage_locked(fenced_mgr))) { + fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf); } } - else { - _fenced_buffer_destroy(fenced_buf); + + if(!fenced_buf->buffer) { + if(0) + fenced_manager_dump_locked(fenced_mgr); + + /* give up */ + return PIPE_ERROR_OUT_OF_MEMORY; } - pipe_mutex_unlock(fenced_list->mutex); + + return PIPE_OK; +} + + +static enum pipe_error +fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf) +{ + uint8_t *map; + + assert(fenced_buf->data); + assert(fenced_buf->buffer); + + map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE); + if(!map) + return PIPE_ERROR; + + memcpy(map, fenced_buf->data, fenced_buf->size); + + pb_unmap(fenced_buf->buffer); + + return PIPE_OK; +} + + +static enum pipe_error +fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf) +{ + const uint8_t *map; + + assert(fenced_buf->data); + assert(fenced_buf->buffer); + + map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_READ); + if(!map) + return PIPE_ERROR; + + memcpy(fenced_buf->data, map, fenced_buf->size); + + pb_unmap(fenced_buf->buffer); + + return PIPE_OK; +} + + +static void +fenced_buffer_destroy(struct pb_buffer *buf) +{ + struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + + assert(!pipe_is_referenced(&fenced_buf->base.base.reference)); + + pipe_mutex_lock(fenced_mgr->mutex); + + fenced_buffer_destroy_locked(fenced_mgr, fenced_buf); + + pipe_mutex_unlock(fenced_mgr->mutex); } static void * -fenced_buffer_map(struct pb_buffer *buf, +fenced_buffer_map(struct pb_buffer *buf, unsigned flags) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); - struct fenced_buffer_list *fenced_list = fenced_buf->list; - struct pb_fence_ops *ops = fenced_list->ops; - void *map; + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + struct pb_fence_ops *ops = fenced_mgr->ops; + void *map = NULL; + + pipe_mutex_lock(fenced_mgr->mutex); assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE)); - - /* Serialize writes */ - if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) || - ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) { - if(flags & PIPE_BUFFER_USAGE_DONTBLOCK) { - /* Don't wait for the GPU to finish writing */ - if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) - _fenced_buffer_remove(fenced_list, fenced_buf); - else - return NULL; + + /* + * Serialize writes. + */ + while((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) || + ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && + (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) { + + /* + * Don't wait for the GPU to finish accessing it, if blocking is forbidden. + */ + if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) && + ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) { + goto done; } - else { - /* Wait for the GPU to finish writing */ - _fenced_buffer_finish(fenced_buf); + + if (flags & PIPE_BUFFER_USAGE_UNSYNCHRONIZED) { + break; } + + /* + * Wait for the GPU to finish accessing. This will release and re-acquire + * the mutex, so all copies of mutable state must be discarded. + */ + fenced_buffer_finish_locked(fenced_mgr, fenced_buf); } -#if 0 - /* Check for CPU write access (read is OK) */ - if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) { - /* this is legal -- just for debugging */ - debug_warning("concurrent CPU writes"); + if(fenced_buf->buffer) { + map = pb_map(fenced_buf->buffer, flags); } -#endif - - map = pb_map(fenced_buf->buffer, flags); + else { + assert(fenced_buf->data); + map = fenced_buf->data; + } + if(map) { ++fenced_buf->mapcount; fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE; } +done: + pipe_mutex_unlock(fenced_mgr->mutex); + return map; } @@ -333,13 +737,20 @@ static void fenced_buffer_unmap(struct pb_buffer *buf) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + + pipe_mutex_lock(fenced_mgr->mutex); + assert(fenced_buf->mapcount); if(fenced_buf->mapcount) { - pb_unmap(fenced_buf->buffer); + if (fenced_buf->buffer) + pb_unmap(fenced_buf->buffer); --fenced_buf->mapcount; if(!fenced_buf->mapcount) fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE; } + + pipe_mutex_unlock(fenced_mgr->mutex); } @@ -349,48 +760,72 @@ fenced_buffer_validate(struct pb_buffer *buf, unsigned flags) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; enum pipe_error ret; - + + pipe_mutex_lock(fenced_mgr->mutex); + if(!vl) { /* invalidate */ fenced_buf->vl = NULL; fenced_buf->validation_flags = 0; - return PIPE_OK; + ret = PIPE_OK; + goto done; } - + assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE); assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE)); flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE; - /* Buffer cannot be validated in two different lists */ - if(fenced_buf->vl && fenced_buf->vl != vl) - return PIPE_ERROR_RETRY; - -#if 0 - /* Do not validate if buffer is still mapped */ - if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) { - /* TODO: wait for the thread that mapped the buffer to unmap it */ - return PIPE_ERROR_RETRY; + /* Buffer cannot be validated in two different lists */ + if(fenced_buf->vl && fenced_buf->vl != vl) { + ret = PIPE_ERROR_RETRY; + goto done; } - /* Final sanity checking */ - assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE)); - assert(!fenced_buf->mapcount); -#endif if(fenced_buf->vl == vl && (fenced_buf->validation_flags & flags) == flags) { /* Nothing to do -- buffer already validated */ - return PIPE_OK; + ret = PIPE_OK; + goto done; + } + + /* + * Create and update GPU storage. + */ + if(!fenced_buf->buffer) { + assert(!fenced_buf->mapcount); + + ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE); + if(ret != PIPE_OK) { + goto done; + } + + ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf); + if(ret != PIPE_OK) { + fenced_buffer_destroy_gpu_storage_locked(fenced_buf); + goto done; + } + + if(fenced_buf->mapcount) { + debug_printf("warning: validating a buffer while it is still mapped\n"); + } + else { + fenced_buffer_destroy_cpu_storage_locked(fenced_buf); + } } - + ret = pb_validate(fenced_buf->buffer, vl, flags); if (ret != PIPE_OK) - return ret; - + goto done; + fenced_buf->vl = vl; fenced_buf->validation_flags |= flags; - - return PIPE_OK; + +done: + pipe_mutex_unlock(fenced_mgr->mutex); + + return ret; } @@ -398,36 +833,37 @@ static void fenced_buffer_fence(struct pb_buffer *buf, struct pipe_fence_handle *fence) { - struct fenced_buffer *fenced_buf; - struct fenced_buffer_list *fenced_list; - struct pb_fence_ops *ops; + struct fenced_buffer *fenced_buf = fenced_buffer(buf); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + struct pb_fence_ops *ops = fenced_mgr->ops; - fenced_buf = fenced_buffer(buf); - fenced_list = fenced_buf->list; - ops = fenced_list->ops; - - if(fence == fenced_buf->fence) { - /* Nothing to do */ - return; - } + pipe_mutex_lock(fenced_mgr->mutex); - assert(fenced_buf->vl); - assert(fenced_buf->validation_flags); - - pipe_mutex_lock(fenced_list->mutex); - if (fenced_buf->fence) - _fenced_buffer_remove(fenced_list, fenced_buf); - if (fence) { - ops->fence_reference(ops, &fenced_buf->fence, fence); - fenced_buf->flags |= fenced_buf->validation_flags; - _fenced_buffer_add(fenced_buf); + assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(fenced_buf->buffer); + + if(fence != fenced_buf->fence) { + assert(fenced_buf->vl); + assert(fenced_buf->validation_flags); + + if (fenced_buf->fence) { + boolean destroyed; + destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); + assert(!destroyed); + } + if (fence) { + ops->fence_reference(ops, &fenced_buf->fence, fence); + fenced_buf->flags |= fenced_buf->validation_flags; + fenced_buffer_add_locked(fenced_mgr, fenced_buf); + } + + pb_fence(fenced_buf->buffer, fence); + + fenced_buf->vl = NULL; + fenced_buf->validation_flags = 0; } - pipe_mutex_unlock(fenced_list->mutex); - - pb_fence(fenced_buf->buffer, fence); - fenced_buf->vl = NULL; - fenced_buf->validation_flags = 0; + pipe_mutex_unlock(fenced_mgr->mutex); } @@ -437,11 +873,29 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf, pb_size *offset) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); - pb_get_base_buffer(fenced_buf->buffer, base_buf, offset); + struct fenced_manager *fenced_mgr = fenced_buf->mgr; + + pipe_mutex_lock(fenced_mgr->mutex); + + /* + * This should only be called when the buffer is validated. Typically + * when processing relocations. + */ + assert(fenced_buf->vl); + assert(fenced_buf->buffer); + + if(fenced_buf->buffer) + pb_get_base_buffer(fenced_buf->buffer, base_buf, offset); + else { + *base_buf = buf; + *offset = 0; + } + + pipe_mutex_unlock(fenced_mgr->mutex); } -static const struct pb_vtbl +static const struct pb_vtbl fenced_buffer_vtbl = { fenced_buffer_destroy, fenced_buffer_map, @@ -452,147 +906,166 @@ fenced_buffer_vtbl = { }; -struct pb_buffer * -fenced_buffer_create(struct fenced_buffer_list *fenced_list, - struct pb_buffer *buffer) +/** + * Wrap a buffer in a fenced buffer. + */ +static struct pb_buffer * +fenced_bufmgr_create_buffer(struct pb_manager *mgr, + pb_size size, + const struct pb_desc *desc) { - struct fenced_buffer *buf; - - if(!buffer) - return NULL; - - buf = CALLOC_STRUCT(fenced_buffer); - if(!buf) { - pb_reference(&buffer, NULL); - return NULL; + struct fenced_manager *fenced_mgr = fenced_manager(mgr); + struct fenced_buffer *fenced_buf; + enum pipe_error ret; + + /* + * Don't stall the GPU, waste time evicting buffers, or waste memory + * trying to create a buffer that will most likely never fit into the + * graphics aperture. + */ + if(size > fenced_mgr->max_buffer_size) { + goto no_buffer; } - - pipe_reference_init(&buf->base.base.reference, 1); - buf->base.base.alignment = buffer->base.alignment; - buf->base.base.usage = buffer->base.usage; - buf->base.base.size = buffer->base.size; - - buf->base.vtbl = &fenced_buffer_vtbl; - buf->buffer = buffer; - buf->list = fenced_list; - -#ifdef DEBUG - pipe_mutex_lock(fenced_list->mutex); - LIST_ADDTAIL(&buf->head, &fenced_list->unfenced); - ++fenced_list->numUnfenced; - pipe_mutex_unlock(fenced_list->mutex); -#endif - return &buf->base; -} + fenced_buf = CALLOC_STRUCT(fenced_buffer); + if(!fenced_buf) + goto no_buffer; + pipe_reference_init(&fenced_buf->base.base.reference, 1); + fenced_buf->base.base.alignment = desc->alignment; + fenced_buf->base.base.usage = desc->usage; + fenced_buf->base.base.size = size; + fenced_buf->size = size; + fenced_buf->desc = *desc; -struct fenced_buffer_list * -fenced_buffer_list_create(struct pb_fence_ops *ops) -{ - struct fenced_buffer_list *fenced_list; + fenced_buf->base.vtbl = &fenced_buffer_vtbl; + fenced_buf->mgr = fenced_mgr; - fenced_list = CALLOC_STRUCT(fenced_buffer_list); - if (!fenced_list) - return NULL; + pipe_mutex_lock(fenced_mgr->mutex); + + /* + * Try to create GPU storage without stalling, + */ + ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE); - fenced_list->ops = ops; + /* + * Attempt to use CPU memory to avoid stalling the GPU. + */ + if(ret != PIPE_OK) { + ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf); + } - LIST_INITHEAD(&fenced_list->delayed); - fenced_list->numDelayed = 0; - -#ifdef DEBUG - LIST_INITHEAD(&fenced_list->unfenced); - fenced_list->numUnfenced = 0; -#endif + /* + * Create GPU storage, waiting for some to be available. + */ + if(ret != PIPE_OK) { + ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE); + } + + /* + * Give up. + */ + if(ret != PIPE_OK) { + goto no_storage; + } - pipe_mutex_init(fenced_list->mutex); + assert(fenced_buf->buffer || fenced_buf->data); - return fenced_list; -} + LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); + ++fenced_mgr->num_unfenced; + pipe_mutex_unlock(fenced_mgr->mutex); + return &fenced_buf->base; -void -fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list, - int wait) -{ - pipe_mutex_lock(fenced_list->mutex); - _fenced_buffer_list_check_free(fenced_list, wait); - pipe_mutex_unlock(fenced_list->mutex); +no_storage: + pipe_mutex_unlock(fenced_mgr->mutex); + FREE(fenced_buf); +no_buffer: + return NULL; } -#ifdef DEBUG -void -fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list) +static void +fenced_bufmgr_flush(struct pb_manager *mgr) { - struct pb_fence_ops *ops = fenced_list->ops; - struct list_head *curr, *next; - struct fenced_buffer *fenced_buf; + struct fenced_manager *fenced_mgr = fenced_manager(mgr); - pipe_mutex_lock(fenced_list->mutex); + pipe_mutex_lock(fenced_mgr->mutex); + while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) + ; + pipe_mutex_unlock(fenced_mgr->mutex); - debug_printf("%10s %7s %7s %10s %s\n", - "buffer", "size", "refcount", "fence", "signalled"); - - curr = fenced_list->unfenced.next; - next = curr->next; - while(curr != &fenced_list->unfenced) { - fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); - assert(!fenced_buf->fence); - debug_printf("%10p %7u %7u\n", - (void *) fenced_buf, - fenced_buf->base.base.size, - p_atomic_read(&fenced_buf->base.base.reference.count)); - curr = next; - next = curr->next; - } - - curr = fenced_list->delayed.next; - next = curr->next; - while(curr != &fenced_list->delayed) { - int signaled; - fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); - signaled = ops->fence_signalled(ops, fenced_buf->fence, 0); - debug_printf("%10p %7u %7u %10p %s\n", - (void *) fenced_buf, - fenced_buf->base.base.size, - p_atomic_read(&fenced_buf->base.base.reference.count), - (void *) fenced_buf->fence, - signaled == 0 ? "y" : "n"); - curr = next; - next = curr->next; - } - - pipe_mutex_unlock(fenced_list->mutex); + assert(fenced_mgr->provider->flush); + if(fenced_mgr->provider->flush) + fenced_mgr->provider->flush(fenced_mgr->provider); } -#endif -void -fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list) +static void +fenced_bufmgr_destroy(struct pb_manager *mgr) { - pipe_mutex_lock(fenced_list->mutex); + struct fenced_manager *fenced_mgr = fenced_manager(mgr); + + pipe_mutex_lock(fenced_mgr->mutex); /* Wait on outstanding fences */ - while (fenced_list->numDelayed) { - pipe_mutex_unlock(fenced_list->mutex); + while (fenced_mgr->num_fenced) { + pipe_mutex_unlock(fenced_mgr->mutex); #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) sched_yield(); #endif - _fenced_buffer_list_check_free(fenced_list, 1); - pipe_mutex_lock(fenced_list->mutex); + pipe_mutex_lock(fenced_mgr->mutex); + while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) + ; } #ifdef DEBUG - /*assert(!fenced_list->numUnfenced);*/ + /*assert(!fenced_mgr->num_unfenced);*/ #endif - - pipe_mutex_unlock(fenced_list->mutex); - - fenced_list->ops->destroy(fenced_list->ops); - - FREE(fenced_list); + + pipe_mutex_unlock(fenced_mgr->mutex); + pipe_mutex_destroy(fenced_mgr->mutex); + + if(fenced_mgr->provider) + fenced_mgr->provider->destroy(fenced_mgr->provider); + + fenced_mgr->ops->destroy(fenced_mgr->ops); + + FREE(fenced_mgr); } +struct pb_manager * +fenced_bufmgr_create(struct pb_manager *provider, + struct pb_fence_ops *ops, + pb_size max_buffer_size, + pb_size max_cpu_total_size) +{ + struct fenced_manager *fenced_mgr; + + if(!provider) + return NULL; + + fenced_mgr = CALLOC_STRUCT(fenced_manager); + if (!fenced_mgr) + return NULL; + + fenced_mgr->base.destroy = fenced_bufmgr_destroy; + fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer; + fenced_mgr->base.flush = fenced_bufmgr_flush; + + fenced_mgr->provider = provider; + fenced_mgr->ops = ops; + fenced_mgr->max_buffer_size = max_buffer_size; + fenced_mgr->max_cpu_total_size = max_cpu_total_size; + + LIST_INITHEAD(&fenced_mgr->fenced); + fenced_mgr->num_fenced = 0; + + LIST_INITHEAD(&fenced_mgr->unfenced); + fenced_mgr->num_unfenced = 0; + + pipe_mutex_init(fenced_mgr->mutex); + + return &fenced_mgr->base; +} diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h index 034ca1e024a..0372f81d0a1 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h @@ -98,43 +98,6 @@ struct pb_fence_ops }; -/** - * Create a fenced buffer list. - * - * See also fenced_bufmgr_create for a more convenient way to use this. - */ -struct fenced_buffer_list * -fenced_buffer_list_create(struct pb_fence_ops *ops); - - -/** - * Walk the fenced buffer list to check and free signalled buffers. - */ -void -fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list, - int wait); - - -#ifdef DEBUG -void -fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list); -#endif - - -void -fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list); - - -/** - * Wrap a buffer in a fenced buffer. - * - * NOTE: this will not increase the buffer reference count. - */ -struct pb_buffer * -fenced_buffer_create(struct fenced_buffer_list *fenced, - struct pb_buffer *buffer); - - #ifdef __cplusplus } #endif diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h index 8c8d7130781..06669917ff6 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h @@ -175,7 +175,9 @@ struct pb_fence_ops; */ struct pb_manager * fenced_bufmgr_create(struct pb_manager *provider, - struct pb_fence_ops *ops); + struct pb_fence_ops *ops, + pb_size max_buffer_size, + pb_size max_cpu_total_size); struct pb_manager * diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c index 6e3214ca9c9..8f74180a111 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c @@ -371,6 +371,9 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr, struct pb_desc real_desc; pb_size real_size; + assert(size); + assert(desc->alignment); + buf = CALLOC_STRUCT(pb_debug_buffer); if(!buf) return NULL; diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c deleted file mode 100644 index 97dd1427fda..00000000000 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c +++ /dev/null @@ -1,152 +0,0 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * - **************************************************************************/ - -/** - * \file - * A buffer manager that wraps buffers in fenced buffers. - * - * \author Jose Fonseca <[email protected]> - */ - - -#include "util/u_debug.h" -#include "util/u_memory.h" - -#include "pb_buffer.h" -#include "pb_buffer_fenced.h" -#include "pb_bufmgr.h" - - -struct fenced_pb_manager -{ - struct pb_manager base; - - struct pb_manager *provider; - - struct fenced_buffer_list *fenced_list; -}; - - -static INLINE struct fenced_pb_manager * -fenced_pb_manager(struct pb_manager *mgr) -{ - assert(mgr); - return (struct fenced_pb_manager *)mgr; -} - - -static struct pb_buffer * -fenced_bufmgr_create_buffer(struct pb_manager *mgr, - pb_size size, - const struct pb_desc *desc) -{ - struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr); - struct pb_buffer *buf; - struct pb_buffer *fenced_buf; - - /* check for free buffers before allocating new ones */ - fenced_buffer_list_check_free(fenced_mgr->fenced_list, 0); - - buf = fenced_mgr->provider->create_buffer(fenced_mgr->provider, size, desc); - if(!buf) { - /* try harder to get a buffer */ - fenced_buffer_list_check_free(fenced_mgr->fenced_list, 1); - - buf = fenced_mgr->provider->create_buffer(fenced_mgr->provider, size, desc); - if(!buf) { -#if 0 - fenced_buffer_list_dump(fenced_mgr->fenced_list); -#endif - - /* give up */ - return NULL; - } - } - - fenced_buf = fenced_buffer_create(fenced_mgr->fenced_list, buf); - if(!fenced_buf) { - pb_reference(&buf, NULL); - } - - return fenced_buf; -} - - -static void -fenced_bufmgr_flush(struct pb_manager *mgr) -{ - struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr); - - fenced_buffer_list_check_free(fenced_mgr->fenced_list, TRUE); - - assert(fenced_mgr->provider->flush); - if(fenced_mgr->provider->flush) - fenced_mgr->provider->flush(fenced_mgr->provider); -} - - -static void -fenced_bufmgr_destroy(struct pb_manager *mgr) -{ - struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr); - - fenced_buffer_list_destroy(fenced_mgr->fenced_list); - - if(fenced_mgr->provider) - fenced_mgr->provider->destroy(fenced_mgr->provider); - - FREE(fenced_mgr); -} - - -struct pb_manager * -fenced_bufmgr_create(struct pb_manager *provider, - struct pb_fence_ops *ops) -{ - struct fenced_pb_manager *fenced_mgr; - - if(!provider) - return NULL; - - fenced_mgr = CALLOC_STRUCT(fenced_pb_manager); - if (!fenced_mgr) - return NULL; - - fenced_mgr->base.destroy = fenced_bufmgr_destroy; - fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer; - fenced_mgr->base.flush = fenced_bufmgr_flush; - - fenced_mgr->provider = provider; - fenced_mgr->fenced_list = fenced_buffer_list_create(ops); - if(!fenced_mgr->fenced_list) { - FREE(fenced_mgr); - return NULL; - } - - return &fenced_mgr->base; -} diff --git a/src/gallium/auxiliary/pipebuffer/pb_validate.c b/src/gallium/auxiliary/pipebuffer/pb_validate.c index ce40c0cf0e6..903afc749d3 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_validate.c +++ b/src/gallium/auxiliary/pipebuffer/pb_validate.c @@ -39,7 +39,6 @@ #include "util/u_debug.h" #include "pb_buffer.h" -#include "pb_buffer_fenced.h" #include "pb_validate.h" diff --git a/src/gallium/auxiliary/tgsi/tgsi_scan.c b/src/gallium/auxiliary/tgsi/tgsi_scan.c index a6cc773003a..b9be8dc0a31 100644 --- a/src/gallium/auxiliary/tgsi/tgsi_scan.c +++ b/src/gallium/auxiliary/tgsi/tgsi_scan.c @@ -101,12 +101,10 @@ tgsi_scan_shader(const struct tgsi_token *tokens, src->Register.File == TGSI_FILE_SYSTEM_VALUE) { const int ind = src->Register.Index; if (info->input_semantic_name[ind] == TGSI_SEMANTIC_FOG) { - if (src->Register.SwizzleX == TGSI_SWIZZLE_X) { - info->uses_fogcoord = TRUE; - } - else if (src->Register.SwizzleX == TGSI_SWIZZLE_Y) { - info->uses_frontfacing = TRUE; - } + info->uses_fogcoord = TRUE; + } + else if (info->input_semantic_name[ind] == TGSI_SEMANTIC_FACE) { + info->uses_frontfacing = TRUE; } } } diff --git a/src/gallium/auxiliary/util/u_blit.c b/src/gallium/auxiliary/util/u_blit.c index 9725890bd4a..236f1e4feee 100644 --- a/src/gallium/auxiliary/util/u_blit.c +++ b/src/gallium/auxiliary/util/u_blit.c @@ -226,8 +226,8 @@ setup_vertex_data_tex(struct blit_state *ctx, offset = get_next_slot( ctx ); - pipe_buffer_write(ctx->pipe->screen, ctx->vbuf, - offset, sizeof(ctx->vertices), ctx->vertices); + pipe_buffer_write_nooverlap(ctx->pipe->screen, ctx->vbuf, + offset, sizeof(ctx->vertices), ctx->vertices); return offset; } diff --git a/src/gallium/auxiliary/util/u_gen_mipmap.c b/src/gallium/auxiliary/util/u_gen_mipmap.c index 76023794dcd..5426c91152d 100644 --- a/src/gallium/auxiliary/util/u_gen_mipmap.c +++ b/src/gallium/auxiliary/util/u_gen_mipmap.c @@ -1411,8 +1411,8 @@ set_vertex_data(struct gen_mipmap_state *ctx, offset = get_next_slot( ctx ); - pipe_buffer_write(ctx->pipe->screen, ctx->vbuf, - offset, sizeof(ctx->vertices), ctx->vertices); + pipe_buffer_write_nooverlap(ctx->pipe->screen, ctx->vbuf, + offset, sizeof(ctx->vertices), ctx->vertices); return offset; } diff --git a/src/gallium/auxiliary/util/u_upload_mgr.c b/src/gallium/auxiliary/util/u_upload_mgr.c index 975ee89c455..55a65375c81 100644 --- a/src/gallium/auxiliary/util/u_upload_mgr.c +++ b/src/gallium/auxiliary/util/u_upload_mgr.c @@ -85,7 +85,9 @@ my_buffer_write(struct pipe_screen *screen, map = pipe_buffer_map_range(screen, buf, offset, size, PIPE_BUFFER_USAGE_CPU_WRITE | - PIPE_BUFFER_USAGE_FLUSH_EXPLICIT); + PIPE_BUFFER_USAGE_FLUSH_EXPLICIT | + PIPE_BUFFER_USAGE_DISCARD | + PIPE_BUFFER_USAGE_UNSYNCHRONIZED); if (map == NULL) return PIPE_ERROR_OUT_OF_MEMORY; |