summaryrefslogtreecommitdiffstats
path: root/src/mesa
diff options
context:
space:
mode:
authorChad Versace <[email protected]>2015-05-05 19:05:32 -0700
committerChad Versace <[email protected]>2015-05-07 08:11:21 -0700
commit2516d835b17563b097efa3a980c3b9b5e77d7f00 (patch)
tree15ddd9ac21a2860cd683dafcaad7daaaf4bb7fb7 /src/mesa
parent19b5a82fdafd583317265e86fd13c23e52839131 (diff)
i965/sync: Replace prefix 'intel_sync' -> 'intel_gl_sync'
I'm about to implement DRI2_Fenc in intel_syncobj.c. To prevent madness, we need to prefix functions for GL_ARB_sync with 'gl' and functions for DRI2_Fence with 'dri'. Otherwise, the file will become a jumble of similiarly named functions. For example: old-name: intel_client_wait_sync() new-name: intel_gl_client_wait_sync() soon-to-come: intel_dri_client_wait_sync() I wrote this renaming commit separately from the commit that implements DRI2_Fence because I wanted the latter diff to be reviewable. Reviewed-by: Daniel Stone <[email protected]> Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/mesa')
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.h7
-rw-r--r--src/mesa/drivers/dri/i965/intel_syncobj.c52
2 files changed, 31 insertions, 28 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h
index 2fcdcfadc9a..834aaa45737 100644
--- a/src/mesa/drivers/dri/i965/brw_context.h
+++ b/src/mesa/drivers/dri/i965/brw_context.h
@@ -869,13 +869,6 @@ struct brw_query_object {
bool flushed;
};
-struct intel_sync_object {
- struct gl_sync_object Base;
-
- /** Batch associated with this sync object */
- drm_intel_bo *bo;
-};
-
enum brw_gpu_ring {
UNKNOWN_RING,
RENDER_RING,
diff --git a/src/mesa/drivers/dri/i965/intel_syncobj.c b/src/mesa/drivers/dri/i965/intel_syncobj.c
index a425b9eefd7..dea6dba340f 100644
--- a/src/mesa/drivers/dri/i965/intel_syncobj.c
+++ b/src/mesa/drivers/dri/i965/intel_syncobj.c
@@ -44,12 +44,19 @@
#include "intel_batchbuffer.h"
#include "intel_reg.h"
+struct intel_gl_sync_object {
+ struct gl_sync_object Base;
+
+ /** Batch associated with this sync object */
+ drm_intel_bo *bo;
+};
+
static struct gl_sync_object *
-intel_new_sync_object(struct gl_context *ctx, GLuint id)
+intel_gl_new_sync_object(struct gl_context *ctx, GLuint id)
{
- struct intel_sync_object *sync;
+ struct intel_gl_sync_object *sync;
- sync = calloc(1, sizeof(struct intel_sync_object));
+ sync = calloc(1, sizeof(*sync));
if (!sync)
return NULL;
@@ -57,9 +64,9 @@ intel_new_sync_object(struct gl_context *ctx, GLuint id)
}
static void
-intel_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
+intel_gl_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
{
- struct intel_sync_object *sync = (struct intel_sync_object *)s;
+ struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (sync->bo)
drm_intel_bo_unreference(sync->bo);
@@ -68,11 +75,11 @@ intel_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
}
static void
-intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
- GLenum condition, GLbitfield flags)
+intel_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
+ GLenum condition, GLbitfield flags)
{
struct brw_context *brw = brw_context(ctx);
- struct intel_sync_object *sync = (struct intel_sync_object *)s;
+ struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
intel_batchbuffer_emit_mi_flush(brw);
@@ -83,10 +90,11 @@ intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
intel_batchbuffer_flush(brw);
}
-static void intel_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
- GLbitfield flags, GLuint64 timeout)
+static void
+intel_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
+ GLbitfield flags, GLuint64 timeout)
{
- struct intel_sync_object *sync = (struct intel_sync_object *)s;
+ struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
* immediately for timeouts <= 0. The best we can do is to clamp the
@@ -108,14 +116,15 @@ static void intel_client_wait_sync(struct gl_context *ctx, struct gl_sync_object
* any batchbuffers coming after this waitsync will naturally not occur until
* the previous one is done.
*/
-static void intel_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
- GLbitfield flags, GLuint64 timeout)
+static void
+intel_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
+ GLbitfield flags, GLuint64 timeout)
{
}
static void intel_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
{
- struct intel_sync_object *sync = (struct intel_sync_object *)s;
+ struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (sync->bo && !drm_intel_bo_busy(sync->bo)) {
drm_intel_bo_unreference(sync->bo);
@@ -124,12 +133,13 @@ static void intel_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
}
}
-void intel_init_syncobj_functions(struct dd_function_table *functions)
+void
+intel_init_syncobj_functions(struct dd_function_table *functions)
{
- functions->NewSyncObject = intel_new_sync_object;
- functions->DeleteSyncObject = intel_delete_sync_object;
- functions->FenceSync = intel_fence_sync;
- functions->CheckSync = intel_check_sync;
- functions->ClientWaitSync = intel_client_wait_sync;
- functions->ServerWaitSync = intel_server_wait_sync;
+ functions->NewSyncObject = intel_gl_new_sync_object;
+ functions->DeleteSyncObject = intel_gl_delete_sync_object;
+ functions->FenceSync = intel_gl_fence_sync;
+ functions->CheckSync = intel_gl_check_sync;
+ functions->ClientWaitSync = intel_gl_client_wait_sync;
+ functions->ServerWaitSync = intel_gl_server_wait_sync;
}