summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>2008-05-15 17:10:30 +0000
committerbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>2008-05-15 17:10:30 +0000
commit4efd41189af62958f2aa5cf48941dd718d563d11 (patch)
treea3cc73bd2e187629d9f800d9a86bcb00c38f49ce /include
parenta97df54e839fa7f823fdca2a814427c9e4db204f (diff)
Rework condition variable implementation to be consistent with
other primitive implementations. Additionally ensure that GFP_ATOMIC is use for allocations when in interrupt context. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@108 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
Diffstat (limited to 'include')
-rw-r--r--include/sys/condvar.h176
1 files changed, 23 insertions, 153 deletions
diff --git a/include/sys/condvar.h b/include/sys/condvar.h
index 2878b68a8..ce9a19147 100644
--- a/include/sys/condvar.h
+++ b/include/sys/condvar.h
@@ -7,6 +7,7 @@ extern "C" {
#include <linux/module.h>
#include <linux/wait.h>
+#include <sys/mutex.h>
/* The kcondvar_t struct is protected by mutex taken externally before
* calling any of the wait/signal funs, and passed into the wait funs.
@@ -17,165 +18,34 @@ extern "C" {
typedef struct {
int cv_magic;
char *cv_name;
+ int cv_name_size;
wait_queue_head_t cv_event;
atomic_t cv_waiters;
- kmutex_t *cv_mutex; /* only for verification purposes */
+ kmutex_t *cv_mutex;
spinlock_t cv_lock;
} kcondvar_t;
typedef enum { CV_DEFAULT=0, CV_DRIVER } kcv_type_t;
-static __inline__ void
-cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
-{
- ENTRY;
- ASSERT(cvp);
- ASSERT(type == CV_DEFAULT);
- ASSERT(arg == NULL);
+extern void __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg);
+extern void __cv_destroy(kcondvar_t *cvp);
+extern void __cv_wait(kcondvar_t *cvp, kmutex_t *mp);
+extern clock_t __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp,
+ clock_t expire_time);
+extern void __cv_signal(kcondvar_t *cvp);
+extern void __cv_broadcast(kcondvar_t *cvp);
+
+#define cv_init(cvp, name, type, arg) \
+({ \
+ if ((name) == NULL) \
+ __cv_init(cvp, #cvp, type, arg); \
+ else \
+ __cv_init(cvp, name, type, arg); \
+})
+#define cv_destroy(cvp) __cv_destroy(cvp)
+#define cv_wait(cvp, mp) __cv_wait(cvp, mp)
+#define cv_timedwait(cvp, mp, t) __cv_timedwait(cvp, mp, t)
+#define cv_signal(cvp) __cv_signal(cvp)
+#define cv_broadcast(cvp) __cv_broadcast(cvp)
- cvp->cv_magic = CV_MAGIC;
- init_waitqueue_head(&cvp->cv_event);
- spin_lock_init(&cvp->cv_lock);
- atomic_set(&cvp->cv_waiters, 0);
- cvp->cv_mutex = NULL;
- cvp->cv_name = NULL;
-
- if (name) {
- cvp->cv_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
- if (cvp->cv_name)
- strcpy(cvp->cv_name, name);
- }
-
- EXIT;
-}
-
-static __inline__ void
-cv_destroy(kcondvar_t *cvp)
-{
- ENTRY;
- ASSERT(cvp);
- ASSERT(cvp->cv_magic == CV_MAGIC);
- spin_lock(&cvp->cv_lock);
- ASSERT(atomic_read(&cvp->cv_waiters) == 0);
- ASSERT(!waitqueue_active(&cvp->cv_event));
-
- if (cvp->cv_name)
- kfree(cvp->cv_name);
-
- memset(cvp, CV_POISON, sizeof(*cvp));
- spin_unlock(&cvp->cv_lock);
- EXIT;
-}
-
-static __inline__ void
-cv_wait(kcondvar_t *cvp, kmutex_t *mtx)
-{
- DEFINE_WAIT(wait);
- ENTRY;
-
- ASSERT(cvp);
- ASSERT(mtx);
- ASSERT(cvp->cv_magic == CV_MAGIC);
- spin_lock(&cvp->cv_lock);
- ASSERT(mutex_owned(mtx));
-
- if (cvp->cv_mutex == NULL)
- cvp->cv_mutex = mtx;
-
- /* Ensure the same mutex is used by all callers */
- ASSERT(cvp->cv_mutex == mtx);
- spin_unlock(&cvp->cv_lock);
-
- prepare_to_wait_exclusive(&cvp->cv_event, &wait,
- TASK_UNINTERRUPTIBLE);
- atomic_inc(&cvp->cv_waiters);
-
- /* Mutex should be dropped after prepare_to_wait() this
- * ensures we're linked in to the waiters list and avoids the
- * race where 'cvp->cv_waiters > 0' but the list is empty. */
- mutex_exit(mtx);
- schedule();
- mutex_enter(mtx);
-
- atomic_dec(&cvp->cv_waiters);
- finish_wait(&cvp->cv_event, &wait);
- EXIT;
-}
-
-/* 'expire_time' argument is an absolute wall clock time in jiffies.
- * Return value is time left (expire_time - now) or -1 if timeout occurred.
- */
-static __inline__ clock_t
-cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time)
-{
- DEFINE_WAIT(wait);
- clock_t time_left;
- ENTRY;
-
- ASSERT(cvp);
- ASSERT(mtx);
- ASSERT(cvp->cv_magic == CV_MAGIC);
- spin_lock(&cvp->cv_lock);
- ASSERT(mutex_owned(mtx));
-
- if (cvp->cv_mutex == NULL)
- cvp->cv_mutex = mtx;
-
- /* Ensure the same mutex is used by all callers */
- ASSERT(cvp->cv_mutex == mtx);
- spin_unlock(&cvp->cv_lock);
-
- /* XXX - Does not handle jiffie wrap properly */
- time_left = expire_time - jiffies;
- if (time_left <= 0)
- RETURN(-1);
-
- prepare_to_wait_exclusive(&cvp->cv_event, &wait,
- TASK_UNINTERRUPTIBLE);
- atomic_inc(&cvp->cv_waiters);
-
- /* Mutex should be dropped after prepare_to_wait() this
- * ensures we're linked in to the waiters list and avoids the
- * race where 'cvp->cv_waiters > 0' but the list is empty. */
- mutex_exit(mtx);
- time_left = schedule_timeout(time_left);
- mutex_enter(mtx);
-
- atomic_dec(&cvp->cv_waiters);
- finish_wait(&cvp->cv_event, &wait);
-
- RETURN(time_left > 0 ? time_left : -1);
-}
-
-static __inline__ void
-cv_signal(kcondvar_t *cvp)
-{
- ENTRY;
- ASSERT(cvp);
- ASSERT(cvp->cv_magic == CV_MAGIC);
-
- /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
- * waiter will be set runable with each call to wake_up().
- * Additionally wake_up() holds a spin_lock assoicated with
- * the wait queue to ensure we don't race waking up processes. */
- if (atomic_read(&cvp->cv_waiters) > 0)
- wake_up(&cvp->cv_event);
-
- EXIT;
-}
-
-static __inline__ void
-cv_broadcast(kcondvar_t *cvp)
-{
- ASSERT(cvp);
- ASSERT(cvp->cv_magic == CV_MAGIC);
- ENTRY;
-
- /* Wake_up_all() will wake up all waiters even those which
- * have the WQ_FLAG_EXCLUSIVE flag set. */
- if (atomic_read(&cvp->cv_waiters) > 0)
- wake_up_all(&cvp->cv_event);
-
- EXIT;
-}
#endif /* _SPL_CONDVAR_H */