diff options
Diffstat (limited to 'patches/pthreads.patch')
-rw-r--r-- | patches/pthreads.patch | 924 |
1 files changed, 924 insertions, 0 deletions
diff --git a/patches/pthreads.patch b/patches/pthreads.patch new file mode 100644 index 000000000..ec29eb915 --- /dev/null +++ b/patches/pthreads.patch @@ -0,0 +1,924 @@ +Use POSIX threads in userspace. + +Index: zfs+chaos4/cmd/lztest/ztest.c +=================================================================== +--- zfs+chaos4.orig/cmd/lztest/ztest.c ++++ zfs+chaos4/cmd/lztest/ztest.c +@@ -141,7 +141,7 @@ typedef struct ztest_args { + spa_t *za_spa; + objset_t *za_os; + zilog_t *za_zilog; +- thread_t za_thread; ++ pthread_t za_thread; + uint64_t za_instance; + uint64_t za_random; + uint64_t za_diroff; +@@ -224,17 +224,17 @@ ztest_info_t ztest_info[] = { + * Stuff we need to share writably between parent and child. + */ + typedef struct ztest_shared { +- mutex_t zs_vdev_lock; +- rwlock_t zs_name_lock; +- uint64_t zs_vdev_primaries; +- uint64_t zs_enospc_count; +- hrtime_t zs_start_time; +- hrtime_t zs_stop_time; +- uint64_t zs_alloc; +- uint64_t zs_space; +- ztest_info_t zs_info[ZTEST_FUNCS]; +- mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS]; +- uint64_t zs_seq[ZTEST_SYNC_LOCKS]; ++ pthread_mutex_t zs_vdev_lock; ++ pthread_rwlock_t zs_name_lock; ++ uint64_t zs_vdev_primaries; ++ uint64_t zs_enospc_count; ++ hrtime_t zs_start_time; ++ hrtime_t zs_stop_time; ++ uint64_t zs_alloc; ++ uint64_t zs_space; ++ ztest_info_t zs_info[ZTEST_FUNCS]; ++ pthread_mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS]; ++ uint64_t zs_seq[ZTEST_SYNC_LOCKS]; + } ztest_shared_t; + + static char ztest_dev_template[] = "%s/%s.%llua"; +@@ -818,7 +818,7 @@ ztest_spa_create_destroy(ztest_args_t *z + * Attempt to create an existing pool. It shouldn't matter + * what's in the nvroot; we should fail with EEXIST. + */ +- (void) rw_rdlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + nvroot = make_vdev_root(0, 0, 0, 0, 1); + error = spa_create(za->za_pool, nvroot, NULL, NULL); + nvlist_free(nvroot); +@@ -834,7 +834,7 @@ ztest_spa_create_destroy(ztest_args_t *z + fatal(0, "spa_destroy() = %d", error); + + spa_close(spa, FTAG); +- (void) rw_unlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + } + + /* +@@ -851,7 +851,7 @@ ztest_vdev_add_remove(ztest_args_t *za) + if (zopt_verbose >= 6) + (void) printf("adding vdev\n"); + +- (void) mutex_lock(&ztest_shared->zs_vdev_lock); ++ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock); + + spa_config_enter(spa, RW_READER, FTAG); + +@@ -869,7 +869,7 @@ ztest_vdev_add_remove(ztest_args_t *za) + error = spa_vdev_add(spa, nvroot); + nvlist_free(nvroot); + +- (void) mutex_unlock(&ztest_shared->zs_vdev_lock); ++ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + + if (error == ENOSPC) + ztest_record_enospc("spa_vdev_add"); +@@ -927,7 +927,7 @@ ztest_vdev_attach_detach(ztest_args_t *z + int error, expected_error; + int fd; + +- (void) mutex_lock(&ztest_shared->zs_vdev_lock); ++ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock); + + spa_config_enter(spa, RW_READER, FTAG); + +@@ -1054,7 +1054,7 @@ ztest_vdev_attach_detach(ztest_args_t *z + oldpath, newpath, replacing, error, expected_error); + } + +- (void) mutex_unlock(&ztest_shared->zs_vdev_lock); ++ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + } + + /* +@@ -1071,7 +1071,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za) + size_t fsize; + int fd; + +- (void) mutex_lock(&ztest_shared->zs_vdev_lock); ++ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock); + + /* + * Pick a random leaf vdev. +@@ -1102,7 +1102,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za) + (void) close(fd); + } + +- (void) mutex_unlock(&ztest_shared->zs_vdev_lock); ++ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + } + + /* ARGSUSED */ +@@ -1198,7 +1198,7 @@ ztest_dmu_objset_create_destroy(ztest_ar + uint64_t objects; + ztest_replay_t zr; + +- (void) rw_rdlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + (void) snprintf(name, 100, "%s/%s_temp_%llu", za->za_pool, za->za_pool, + (u_longlong_t)za->za_instance); + +@@ -1242,7 +1242,7 @@ ztest_dmu_objset_create_destroy(ztest_ar + if (error) { + if (error == ENOSPC) { + ztest_record_enospc("dmu_objset_create"); +- (void) rw_unlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + return; + } + fatal(0, "dmu_objset_create(%s) = %d", name, error); +@@ -1321,7 +1321,7 @@ ztest_dmu_objset_create_destroy(ztest_ar + if (error) + fatal(0, "dmu_objset_destroy(%s) = %d", name, error); + +- (void) rw_unlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + } + + /* +@@ -1335,7 +1335,7 @@ ztest_dmu_snapshot_create_destroy(ztest_ + char snapname[100]; + char osname[MAXNAMELEN]; + +- (void) rw_rdlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + dmu_objset_name(os, osname); + (void) snprintf(snapname, 100, "%s@%llu", osname, + (u_longlong_t)za->za_instance); +@@ -1348,7 +1348,7 @@ ztest_dmu_snapshot_create_destroy(ztest_ + ztest_record_enospc("dmu_take_snapshot"); + else if (error != 0 && error != EEXIST) + fatal(0, "dmu_take_snapshot() = %d", error); +- (void) rw_unlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + } + + #define ZTEST_TRAVERSE_BLOCKS 1000 +@@ -1992,7 +1992,7 @@ ztest_dmu_write_parallel(ztest_args_t *z + int bs = ZTEST_DIROBJ_BLOCKSIZE; + int do_free = 0; + uint64_t off, txg_how; +- mutex_t *lp; ++ pthread_mutex_t *lp; + char osname[MAXNAMELEN]; + char iobuf[SPA_MAXBLOCKSIZE]; + blkptr_t blk = { 0 }; +@@ -2041,7 +2041,7 @@ ztest_dmu_write_parallel(ztest_args_t *z + } + + lp = &ztest_shared->zs_sync_lock[b]; +- (void) mutex_lock(lp); ++ (void) pthread_mutex_lock(lp); + + wbt->bt_objset = dmu_objset_id(os); + wbt->bt_object = ZTEST_DIROBJ; +@@ -2087,7 +2087,7 @@ ztest_dmu_write_parallel(ztest_args_t *z + dmu_write(os, ZTEST_DIROBJ, off, btsize, wbt, tx); + } + +- (void) mutex_unlock(lp); ++ (void) pthread_mutex_unlock(lp); + + if (ztest_random(1000) == 0) + (void) poll(NULL, 0, 1); /* open dn_notxholds window */ +@@ -2106,7 +2106,7 @@ ztest_dmu_write_parallel(ztest_args_t *z + /* + * dmu_sync() the block we just wrote. + */ +- (void) mutex_lock(lp); ++ (void) pthread_mutex_lock(lp); + + blkoff = P2ALIGN_TYPED(off, bs, uint64_t); + error = dmu_buf_hold(os, ZTEST_DIROBJ, blkoff, FTAG, &db); +@@ -2114,7 +2114,7 @@ ztest_dmu_write_parallel(ztest_args_t *z + if (error) { + dprintf("dmu_buf_hold(%s, %d, %llx) = %d\n", + osname, ZTEST_DIROBJ, blkoff, error); +- (void) mutex_unlock(lp); ++ (void) pthread_mutex_unlock(lp); + return; + } + blkoff = off - blkoff; +@@ -2122,7 +2122,7 @@ ztest_dmu_write_parallel(ztest_args_t *z + dmu_buf_rele(db, FTAG); + za->za_dbuf = NULL; + +- (void) mutex_unlock(lp); ++ (void) pthread_mutex_unlock(lp); + + if (error) { + dprintf("dmu_sync(%s, %d, %llx) = %d\n", +@@ -2502,7 +2502,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za) + char osname[MAXNAMELEN]; + int error; + +- (void) rw_rdlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + + dmu_objset_name(os, osname); + +@@ -2541,7 +2541,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za) + } + } + +- (void) rw_unlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + } + + static void +@@ -2693,7 +2693,7 @@ ztest_spa_rename(ztest_args_t *za) + int error; + spa_t *spa; + +- (void) rw_wrlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_wrlock(&ztest_shared->zs_name_lock); + + oldname = za->za_pool; + newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); +@@ -2745,7 +2745,7 @@ ztest_spa_rename(ztest_args_t *za) + + umem_free(newname, strlen(newname) + 1); + +- (void) rw_unlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + } + + +@@ -3090,13 +3090,13 @@ ztest_run(char *pool) + ztest_args_t *za; + spa_t *spa; + char name[100]; +- thread_t tid; ++ pthread_t tid; + +- (void) _mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL); +- (void) rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL); ++ (void) pthread_mutex_init(&zs->zs_vdev_lock, NULL); ++ (void) pthread_rwlock_init(&zs->zs_name_lock, NULL); + + for (t = 0; t < ZTEST_SYNC_LOCKS; t++) +- (void) _mutex_init(&zs->zs_sync_lock[t], USYNC_THREAD, NULL); ++ (void) pthread_mutex_init(&zs->zs_sync_lock[t], NULL); + + /* + * Destroy one disk before we even start. +@@ -3153,7 +3153,7 @@ ztest_run(char *pool) + * start the thread before setting the zio_io_fail_shift, which + * will indicate our failure rate. + */ +- error = thr_create(0, 0, ztest_suspend_monitor, NULL, THR_BOUND, &tid); ++ error = pthread_create(&tid, NULL, ztest_suspend_monitor, NULL); + if (error) { + fatal(0, "can't create suspend monitor thread: error %d", + t, error); +@@ -3217,7 +3217,7 @@ ztest_run(char *pool) + if (t < zopt_datasets) { + ztest_replay_t zr; + int test_future = FALSE; +- (void) rw_rdlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + (void) snprintf(name, 100, "%s/%s_%d", pool, pool, d); + error = dmu_objset_create(name, DMU_OST_OTHER, NULL, 0, + ztest_create_cb, NULL); +@@ -3225,7 +3225,7 @@ ztest_run(char *pool) + test_future = TRUE; + } else if (error == ENOSPC) { + zs->zs_enospc_count++; +- (void) rw_unlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + break; + } else if (error != 0) { + fatal(0, "dmu_objset_create(%s) = %d", +@@ -3236,7 +3236,7 @@ ztest_run(char *pool) + if (error) + fatal(0, "dmu_objset_open('%s') = %d", + name, error); +- (void) rw_unlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + if (test_future) + ztest_dmu_check_future_leak(&za[t]); + zr.zr_os = za[d].za_os; +@@ -3245,15 +3245,15 @@ ztest_run(char *pool) + za[d].za_zilog = zil_open(za[d].za_os, NULL); + } + +- error = thr_create(0, 0, ztest_thread, &za[t], THR_BOUND, +- &za[t].za_thread); ++ error = pthread_create(&za[t].za_thread, NULL, ztest_thread, ++ &za[t]); + if (error) + fatal(0, "can't create thread %d: error %d", + t, error); + } + + while (--t >= 0) { +- error = thr_join(za[t].za_thread, NULL, NULL); ++ error = pthread_join(za[t].za_thread, NULL); + if (error) + fatal(0, "thr_join(%d) = %d", t, error); + if (za[t].za_th) +@@ -3276,14 +3276,14 @@ ztest_run(char *pool) + * If we had out-of-space errors, destroy a random objset. + */ + if (zs->zs_enospc_count != 0) { +- (void) rw_rdlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + d = (int)ztest_random(zopt_datasets); + (void) snprintf(name, 100, "%s/%s_%d", pool, pool, d); + if (zopt_verbose >= 3) + (void) printf("Destroying %s to free up space\n", name); + (void) dmu_objset_find(name, ztest_destroy_cb, &za[d], + DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); +- (void) rw_unlock(&ztest_shared->zs_name_lock); ++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + } + + txg_wait_synced(spa_get_dsl(spa), 0); +@@ -3301,7 +3301,7 @@ ztest_run(char *pool) + mutex_enter(&spa->spa_zio_lock); + cv_broadcast(&spa->spa_zio_cv); + mutex_exit(&spa->spa_zio_lock); +- error = thr_join(tid, NULL, NULL); ++ error = pthread_join(tid, NULL); + if (error) + fatal(0, "thr_join(%d) = %d", tid, error); + +Index: zfs+chaos4/lib/libuutil/uu_misc.c +=================================================================== +--- zfs+chaos4.orig/lib/libuutil/uu_misc.c ++++ zfs+chaos4/lib/libuutil/uu_misc.c +@@ -37,7 +37,6 @@ + #include <stdlib.h> + #include <string.h> + #include <sys/debug.h> +-#include <thread.h> + #include <unistd.h> + + #if !defined(TEXT_DOMAIN) +@@ -70,11 +69,12 @@ static va_list uu_panic_args; + static pthread_t uu_panic_thread; + + static uint32_t _uu_main_error; ++static __thread int _uu_main_thread = 0; + + void + uu_set_error(uint_t code) + { +- if (thr_main() != 0) { ++ if (_uu_main_thread) { + _uu_main_error = code; + return; + } +@@ -103,7 +103,7 @@ uu_set_error(uint_t code) + uint32_t + uu_error(void) + { +- if (thr_main() != 0) ++ if (_uu_main_thread) + return (_uu_main_error); + + if (uu_error_key_setup < 0) /* can't happen? */ +@@ -255,5 +255,6 @@ uu_release_child(void) + static void + uu_init(void) + { ++ _uu_main_thread = 1; + (void) pthread_atfork(uu_lockup, uu_release, uu_release_child); + } +Index: zfs+chaos4/lib/libzfscommon/include/sys/zfs_context_user.h +=================================================================== +--- zfs+chaos4.orig/lib/libzfscommon/include/sys/zfs_context_user.h ++++ zfs+chaos4/lib/libzfscommon/include/sys/zfs_context_user.h +@@ -52,8 +52,7 @@ extern "C" { + #include <errno.h> + #include <string.h> + #include <strings.h> +-#include <synch.h> +-#include <thread.h> ++#include <pthread.h> + #include <assert.h> + #include <alloca.h> + #include <umem.h> +@@ -191,13 +190,15 @@ _NOTE(CONSTCOND) } while (0) + /* + * Threads + */ +-#define curthread ((void *)(uintptr_t)thr_self()) ++ ++/* XXX: not portable */ ++#define curthread ((void *)(uintptr_t)pthread_self()) + + typedef struct kthread kthread_t; + + #define thread_create(stk, stksize, func, arg, len, pp, state, pri) \ + zk_thread_create(func, arg) +-#define thread_exit() thr_exit(NULL) ++#define thread_exit() pthread_exit(NULL) + + extern kthread_t *zk_thread_create(void (*func)(), void *arg); + +@@ -207,28 +208,18 @@ extern kthread_t *zk_thread_create(void + /* + * Mutexes + */ ++#define MTX_MAGIC 0x9522f51362a6e326ull + typedef struct kmutex { + void *m_owner; +- boolean_t initialized; +- mutex_t m_lock; ++ uint64_t m_magic; ++ pthread_mutex_t m_lock; + } kmutex_t; + +-#define MUTEX_DEFAULT USYNC_THREAD +-#undef MUTEX_HELD +-#define MUTEX_HELD(m) _mutex_held(&(m)->m_lock) +- +-/* +- * Argh -- we have to get cheesy here because the kernel and userland +- * have different signatures for the same routine. +- */ +-extern int _mutex_init(mutex_t *mp, int type, void *arg); +-extern int _mutex_destroy(mutex_t *mp); +- +-#define mutex_init(mp, b, c, d) zmutex_init((kmutex_t *)(mp)) +-#define mutex_destroy(mp) zmutex_destroy((kmutex_t *)(mp)) ++#define MUTEX_DEFAULT 0 ++#define MUTEX_HELD(m) ((m)->m_owner == curthread) + +-extern void zmutex_init(kmutex_t *mp); +-extern void zmutex_destroy(kmutex_t *mp); ++extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie); ++extern void mutex_destroy(kmutex_t *mp); + extern void mutex_enter(kmutex_t *mp); + extern void mutex_exit(kmutex_t *mp); + extern int mutex_tryenter(kmutex_t *mp); +@@ -237,23 +228,24 @@ extern void *mutex_owner(kmutex_t *mp); + /* + * RW locks + */ ++#define RW_MAGIC 0x4d31fb123648e78aull + typedef struct krwlock { +- void *rw_owner; +- boolean_t initialized; +- rwlock_t rw_lock; ++ void *rw_owner; ++ void *rw_wr_owner; ++ uint64_t rw_magic; ++ pthread_rwlock_t rw_lock; ++ uint_t rw_readers; + } krwlock_t; + + typedef int krw_t; + + #define RW_READER 0 + #define RW_WRITER 1 +-#define RW_DEFAULT USYNC_THREAD +- +-#undef RW_READ_HELD +-#define RW_READ_HELD(x) _rw_read_held(&(x)->rw_lock) ++#define RW_DEFAULT 0 + +-#undef RW_WRITE_HELD +-#define RW_WRITE_HELD(x) _rw_write_held(&(x)->rw_lock) ++#define RW_READ_HELD(x) ((x)->rw_readers > 0) ++#define RW_WRITE_HELD(x) ((x)->rw_wr_owner == curthread) ++#define RW_LOCK_HELD(x) (RW_READ_HELD(x) || RW_WRITE_HELD(x)) + + extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg); + extern void rw_destroy(krwlock_t *rwlp); +@@ -271,9 +263,13 @@ extern gid_t *crgetgroups(cred_t *cr); + /* + * Condition variables + */ +-typedef cond_t kcondvar_t; ++#define CV_MAGIC 0xd31ea9a83b1b30c4ull ++typedef struct kcondvar { ++ uint64_t cv_magic; ++ pthread_cond_t cv; ++} kcondvar_t; + +-#define CV_DEFAULT USYNC_THREAD ++#define CV_DEFAULT 0 + + extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg); + extern void cv_destroy(kcondvar_t *cv); +@@ -444,7 +440,8 @@ extern void delay(clock_t ticks); + #define minclsyspri 60 + #define maxclsyspri 99 + +-#define CPU_SEQID (thr_self() & (max_ncpus - 1)) ++/* XXX: not portable */ ++#define CPU_SEQID (pthread_self() & (max_ncpus - 1)) + + #define kcred NULL + #define CRED() NULL +Index: zfs+chaos4/lib/libzpool/kernel.c +=================================================================== +--- zfs+chaos4.orig/lib/libzpool/kernel.c ++++ zfs+chaos4/lib/libzpool/kernel.c +@@ -38,6 +38,7 @@ + #include <sys/zfs_context.h> + #include <sys/zmod.h> + #include <sys/utsname.h> ++#include <sys/time.h> + + /* + * Emulation of kernel services in userland. +@@ -60,11 +61,15 @@ struct utsname utsname = { + kthread_t * + zk_thread_create(void (*func)(), void *arg) + { +- thread_t tid; ++ pthread_t tid; + +- VERIFY(thr_create(0, 0, (void *(*)(void *))func, arg, THR_DETACHED, +- &tid) == 0); ++ pthread_attr_t attr; ++ VERIFY(pthread_attr_init(&attr) == 0); ++ VERIFY(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0); + ++ VERIFY(pthread_create(&tid, &attr, (void *(*)(void *))func, arg) == 0); ++ ++ /* XXX: not portable */ + return ((void *)(uintptr_t)tid); + } + +@@ -97,30 +102,37 @@ kstat_delete(kstat_t *ksp) + * ========================================================================= + */ + void +-zmutex_init(kmutex_t *mp) ++mutex_init(kmutex_t *mp, char *name, int type, void *cookie) + { ++ ASSERT(type == MUTEX_DEFAULT); ++ ASSERT(cookie == NULL); ++ ++#ifdef IM_FEELING_LUCKY ++ ASSERT(mp->m_magic != MTX_MAGIC); ++#endif ++ + mp->m_owner = NULL; +- mp->initialized = B_TRUE; +- (void) _mutex_init(&mp->m_lock, USYNC_THREAD, NULL); ++ mp->m_magic = MTX_MAGIC; ++ VERIFY3S(pthread_mutex_init(&mp->m_lock, NULL), ==, 0); + } + + void +-zmutex_destroy(kmutex_t *mp) ++mutex_destroy(kmutex_t *mp) + { +- ASSERT(mp->initialized == B_TRUE); ++ ASSERT(mp->m_magic == MTX_MAGIC); + ASSERT(mp->m_owner == NULL); +- (void) _mutex_destroy(&(mp)->m_lock); ++ VERIFY3S(pthread_mutex_destroy(&(mp)->m_lock), ==, 0); + mp->m_owner = (void *)-1UL; +- mp->initialized = B_FALSE; ++ mp->m_magic = 0; + } + + void + mutex_enter(kmutex_t *mp) + { +- ASSERT(mp->initialized == B_TRUE); ++ ASSERT(mp->m_magic == MTX_MAGIC); + ASSERT(mp->m_owner != (void *)-1UL); + ASSERT(mp->m_owner != curthread); +- VERIFY(mutex_lock(&mp->m_lock) == 0); ++ VERIFY3S(pthread_mutex_lock(&mp->m_lock), ==, 0); + ASSERT(mp->m_owner == NULL); + mp->m_owner = curthread; + } +@@ -128,9 +140,9 @@ mutex_enter(kmutex_t *mp) + int + mutex_tryenter(kmutex_t *mp) + { +- ASSERT(mp->initialized == B_TRUE); ++ ASSERT(mp->m_magic == MTX_MAGIC); + ASSERT(mp->m_owner != (void *)-1UL); +- if (0 == mutex_trylock(&mp->m_lock)) { ++ if (0 == pthread_mutex_trylock(&mp->m_lock)) { + ASSERT(mp->m_owner == NULL); + mp->m_owner = curthread; + return (1); +@@ -142,16 +154,16 @@ mutex_tryenter(kmutex_t *mp) + void + mutex_exit(kmutex_t *mp) + { +- ASSERT(mp->initialized == B_TRUE); ++ ASSERT(mp->m_magic == MTX_MAGIC); + ASSERT(mutex_owner(mp) == curthread); + mp->m_owner = NULL; +- VERIFY(mutex_unlock(&mp->m_lock) == 0); ++ VERIFY3S(pthread_mutex_unlock(&mp->m_lock), ==, 0); + } + + void * + mutex_owner(kmutex_t *mp) + { +- ASSERT(mp->initialized == B_TRUE); ++ ASSERT(mp->m_magic == MTX_MAGIC); + return (mp->m_owner); + } + +@@ -164,31 +176,48 @@ mutex_owner(kmutex_t *mp) + void + rw_init(krwlock_t *rwlp, char *name, int type, void *arg) + { +- rwlock_init(&rwlp->rw_lock, USYNC_THREAD, NULL); ++ ASSERT(type == RW_DEFAULT); ++ ASSERT(arg == NULL); ++ ++#ifdef IM_FEELING_LUCKY ++ ASSERT(rwlp->rw_magic != RW_MAGIC); ++#endif ++ ++ VERIFY3S(pthread_rwlock_init(&rwlp->rw_lock, NULL), ==, 0); + rwlp->rw_owner = NULL; +- rwlp->initialized = B_TRUE; ++ rwlp->rw_wr_owner = NULL; ++ rwlp->rw_readers = 0; ++ rwlp->rw_magic = RW_MAGIC; + } + + void + rw_destroy(krwlock_t *rwlp) + { +- rwlock_destroy(&rwlp->rw_lock); +- rwlp->rw_owner = (void *)-1UL; +- rwlp->initialized = B_FALSE; ++ ASSERT(rwlp->rw_magic == RW_MAGIC); ++ ++ VERIFY3S(pthread_rwlock_destroy(&rwlp->rw_lock), ==, 0); ++ rwlp->rw_magic = 0; + } + + void + rw_enter(krwlock_t *rwlp, krw_t rw) + { +- ASSERT(!RW_LOCK_HELD(rwlp)); +- ASSERT(rwlp->initialized == B_TRUE); +- ASSERT(rwlp->rw_owner != (void *)-1UL); ++ ASSERT(rwlp->rw_magic == RW_MAGIC); + ASSERT(rwlp->rw_owner != curthread); ++ ASSERT(rwlp->rw_wr_owner != curthread); + +- if (rw == RW_READER) +- (void) rw_rdlock(&rwlp->rw_lock); +- else +- (void) rw_wrlock(&rwlp->rw_lock); ++ if (rw == RW_READER) { ++ VERIFY3S(pthread_rwlock_rdlock(&rwlp->rw_lock), ==, 0); ++ ASSERT(rwlp->rw_wr_owner == NULL); ++ ++ atomic_inc_uint(&rwlp->rw_readers); ++ } else { ++ VERIFY3S(pthread_rwlock_wrlock(&rwlp->rw_lock), ==, 0); ++ ASSERT(rwlp->rw_wr_owner == NULL); ++ ASSERT3U(rwlp->rw_readers, ==, 0); ++ ++ rwlp->rw_wr_owner = curthread; ++ } + + rwlp->rw_owner = curthread; + } +@@ -196,11 +225,16 @@ rw_enter(krwlock_t *rwlp, krw_t rw) + void + rw_exit(krwlock_t *rwlp) + { +- ASSERT(rwlp->initialized == B_TRUE); +- ASSERT(rwlp->rw_owner != (void *)-1UL); ++ ASSERT(rwlp->rw_magic == RW_MAGIC); ++ ASSERT(RW_LOCK_HELD(rwlp)); ++ ++ if (RW_READ_HELD(rwlp)) ++ atomic_dec_uint(&rwlp->rw_readers); ++ else ++ rwlp->rw_wr_owner = NULL; + + rwlp->rw_owner = NULL; +- (void) rw_unlock(&rwlp->rw_lock); ++ VERIFY3S(pthread_rwlock_unlock(&rwlp->rw_lock), ==, 0); + } + + int +@@ -208,19 +242,29 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw) + { + int rv; + +- ASSERT(rwlp->initialized == B_TRUE); +- ASSERT(rwlp->rw_owner != (void *)-1UL); ++ ASSERT(rwlp->rw_magic == RW_MAGIC); + + if (rw == RW_READER) +- rv = rw_tryrdlock(&rwlp->rw_lock); ++ rv = pthread_rwlock_tryrdlock(&rwlp->rw_lock); + else +- rv = rw_trywrlock(&rwlp->rw_lock); ++ rv = pthread_rwlock_trywrlock(&rwlp->rw_lock); + + if (rv == 0) { ++ ASSERT(rwlp->rw_wr_owner == NULL); ++ ++ if (rw == RW_READER) ++ atomic_inc_uint(&rwlp->rw_readers); ++ else { ++ ASSERT3U(rwlp->rw_readers, ==, 0); ++ rwlp->rw_wr_owner = curthread; ++ } ++ + rwlp->rw_owner = curthread; + return (1); + } + ++ VERIFY3S(rv, ==, EBUSY); ++ + return (0); + } + +@@ -228,8 +272,7 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw) + int + rw_tryupgrade(krwlock_t *rwlp) + { +- ASSERT(rwlp->initialized == B_TRUE); +- ASSERT(rwlp->rw_owner != (void *)-1UL); ++ ASSERT(rwlp->rw_magic == RW_MAGIC); + + return (0); + } +@@ -243,22 +286,34 @@ rw_tryupgrade(krwlock_t *rwlp) + void + cv_init(kcondvar_t *cv, char *name, int type, void *arg) + { +- VERIFY(cond_init(cv, type, NULL) == 0); ++ ASSERT(type == CV_DEFAULT); ++ ++#ifdef IM_FEELING_LUCKY ++ ASSERT(cv->cv_magic != CV_MAGIC); ++#endif ++ ++ cv->cv_magic = CV_MAGIC; ++ ++ VERIFY3S(pthread_cond_init(&cv->cv, NULL), ==, 0); + } + + void + cv_destroy(kcondvar_t *cv) + { +- VERIFY(cond_destroy(cv) == 0); ++ ASSERT(cv->cv_magic == CV_MAGIC); ++ VERIFY3S(pthread_cond_destroy(&cv->cv), ==, 0); ++ cv->cv_magic = 0; + } + + void + cv_wait(kcondvar_t *cv, kmutex_t *mp) + { ++ ASSERT(cv->cv_magic == CV_MAGIC); + ASSERT(mutex_owner(mp) == curthread); + mp->m_owner = NULL; +- int ret = cond_wait(cv, &mp->m_lock); +- VERIFY(ret == 0 || ret == EINTR); ++ int ret = pthread_cond_wait(&cv->cv, &mp->m_lock); ++ if (ret != 0) ++ VERIFY3S(ret, ==, EINTR); + mp->m_owner = curthread; + } + +@@ -266,29 +321,38 @@ clock_t + cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime) + { + int error; ++ struct timeval tv; + timestruc_t ts; + clock_t delta; + ++ ASSERT(cv->cv_magic == CV_MAGIC); ++ + top: + delta = abstime - lbolt; + if (delta <= 0) + return (-1); + +- ts.tv_sec = delta / hz; +- ts.tv_nsec = (delta % hz) * (NANOSEC / hz); ++ VERIFY(gettimeofday(&tv, NULL) == 0); ++ ++ ts.tv_sec = tv.tv_sec + delta / hz; ++ ts.tv_nsec = tv.tv_usec * 1000 + (delta % hz) * (NANOSEC / hz); ++ if (ts.tv_nsec >= NANOSEC) { ++ ts.tv_sec++; ++ ts.tv_nsec -= NANOSEC; ++ } + + ASSERT(mutex_owner(mp) == curthread); + mp->m_owner = NULL; +- error = cond_reltimedwait(cv, &mp->m_lock, &ts); ++ error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts); + mp->m_owner = curthread; + +- if (error == ETIME) ++ if (error == ETIMEDOUT) + return (-1); + + if (error == EINTR) + goto top; + +- ASSERT(error == 0); ++ VERIFY3S(error, ==, 0); + + return (1); + } +@@ -296,13 +360,15 @@ top: + void + cv_signal(kcondvar_t *cv) + { +- VERIFY(cond_signal(cv) == 0); ++ ASSERT(cv->cv_magic == CV_MAGIC); ++ VERIFY3S(pthread_cond_signal(&cv->cv), ==, 0); + } + + void + cv_broadcast(kcondvar_t *cv) + { +- VERIFY(cond_broadcast(cv) == 0); ++ ASSERT(cv->cv_magic == CV_MAGIC); ++ VERIFY3S(pthread_cond_broadcast(&cv->cv), ==, 0); + } + + /* +@@ -549,11 +615,11 @@ __dprintf(const char *file, const char * + dprintf_find_string(func)) { + /* Print out just the function name if requested */ + flockfile(stdout); +- /* XXX: the following printf may not be portable */ ++ /* XXX: the following 2 printfs may not be portable */ + if (dprintf_find_string("pid")) + (void) printf("%llu ", (u_longlong_t) getpid()); + if (dprintf_find_string("tid")) +- (void) printf("%u ", (uint_t) thr_self()); ++ (void) printf("%u ", (uint_t) pthread_self()); + if (dprintf_find_string("cpu")) + (void) printf("%u ", getcpuid()); + if (dprintf_find_string("time")) +Index: zfs+chaos4/lib/libzpool/taskq.c +=================================================================== +--- zfs+chaos4.orig/lib/libzpool/taskq.c ++++ zfs+chaos4/lib/libzpool/taskq.c +@@ -43,7 +43,7 @@ struct taskq { + krwlock_t tq_threadlock; + kcondvar_t tq_dispatch_cv; + kcondvar_t tq_wait_cv; +- thread_t *tq_threadlist; ++ pthread_t *tq_threadlist; + int tq_flags; + int tq_active; + int tq_nthreads; +@@ -186,7 +186,7 @@ taskq_create(const char *name, int nthre + tq->tq_maxalloc = maxalloc; + tq->tq_task.task_next = &tq->tq_task; + tq->tq_task.task_prev = &tq->tq_task; +- tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP); ++ tq->tq_threadlist = kmem_alloc(nthreads * sizeof (pthread_t), KM_SLEEP); + + if (flags & TASKQ_PREPOPULATE) { + mutex_enter(&tq->tq_lock); +@@ -196,8 +196,8 @@ taskq_create(const char *name, int nthre + } + + for (t = 0; t < nthreads; t++) +- VERIFY(thr_create(0, 0, taskq_thread, +- tq, THR_BOUND, &tq->tq_threadlist[t]) == 0); ++ VERIFY(pthread_create(&tq->tq_threadlist[t], ++ NULL, taskq_thread, tq) == 0); + + return (tq); + } +@@ -227,9 +227,9 @@ taskq_destroy(taskq_t *tq) + mutex_exit(&tq->tq_lock); + + for (t = 0; t < nthreads; t++) +- VERIFY(thr_join(tq->tq_threadlist[t], NULL, NULL) == 0); ++ VERIFY(pthread_join(tq->tq_threadlist[t], NULL) == 0); + +- kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t)); ++ kmem_free(tq->tq_threadlist, nthreads * sizeof (pthread_t)); + + rw_destroy(&tq->tq_threadlock); + mutex_destroy(&tq->tq_lock); +@@ -248,7 +248,7 @@ taskq_member(taskq_t *tq, void *t) + return (1); + + for (i = 0; i < tq->tq_nthreads; i++) +- if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t) ++ if (tq->tq_threadlist[i] == (pthread_t)(uintptr_t)t) + return (1); + + return (0); |