summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.topmsg8
-rw-r--r--cmd/zdb/zdb.c1
-rw-r--r--cmd/zinject/translate.c2
-rw-r--r--cmd/ztest/ztest.c115
-rw-r--r--lib/libuutil/uu_misc.c7
-rw-r--r--lib/libzfs/libzfs_config.c2
-rw-r--r--lib/libzfs/libzfs_sendrecv.c2
-rw-r--r--lib/libzpool/include/sys/zfs_context.h78
-rw-r--r--lib/libzpool/kernel.c194
-rw-r--r--lib/libzpool/taskq.c14
-rw-r--r--module/zfs/arc.c13
-rw-r--r--module/zfs/dbuf.c18
-rw-r--r--module/zfs/dnode.c8
-rw-r--r--module/zfs/dnode_sync.c8
-rw-r--r--module/zfs/dsl_dataset.c3
-rw-r--r--module/zfs/dsl_pool.c1
-rw-r--r--module/zfs/gzip.c27
-rw-r--r--module/zfs/include/sys/dbuf.h3
-rw-r--r--module/zfs/vdev.c2
-rw-r--r--module/zfs/zap.c4
-rw-r--r--module/zfs/zap_micro.c2
-rw-r--r--module/zfs/zfs_znode.c2
-rw-r--r--module/zfs/zil.c5
-rw-r--r--module/zfs/zio.c2
24 files changed, 310 insertions, 211 deletions
diff --git a/.topmsg b/.topmsg
index b7a46338e..55b2a3f8f 100644
--- a/.topmsg
+++ b/.topmsg
@@ -1,8 +1,8 @@
From: Brian Behlendorf <[email protected]>
-Subject: [PATCH] fix deadcode
+Subject: [PATCH] fix clock wrap
-Remove deadcode. It's possible the code should be in use
-somewhere, but as the source code is laid out it currently
-is not.
+Fix lbolt clock wrap.
Signed-off-by: Brian Behlendorf <[email protected]>
+
+---
diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c
index 0ced25865..09f23c1b2 100644
--- a/cmd/zdb/zdb.c
+++ b/cmd/zdb/zdb.c
@@ -52,7 +52,6 @@
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#undef ZFS_MAXNAMELEN
-#undef verify
#include <libzfs.h>
const char cmdname[] = "zdb";
diff --git a/cmd/zinject/translate.c b/cmd/zinject/translate.c
index c85e024b6..0264aca6f 100644
--- a/cmd/zinject/translate.c
+++ b/cmd/zinject/translate.c
@@ -25,8 +25,6 @@
#include <libzfs.h>
-#undef verify /* both libzfs.h and zfs_context.h want to define this */
-
#include <sys/zfs_context.h>
#include <errno.h>
diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
index 4503a3d02..ce494fa79 100644
--- a/cmd/ztest/ztest.c
+++ b/cmd/ztest/ztest.c
@@ -138,7 +138,7 @@ typedef struct ztest_args {
spa_t *za_spa;
objset_t *za_os;
zilog_t *za_zilog;
- thread_t za_thread;
+ pthread_t za_thread;
uint64_t za_instance;
uint64_t za_random;
uint64_t za_diroff;
@@ -221,18 +221,18 @@ ztest_info_t ztest_info[] = {
* Stuff we need to share writably between parent and child.
*/
typedef struct ztest_shared {
- mutex_t zs_vdev_lock;
- rwlock_t zs_name_lock;
- uint64_t zs_vdev_primaries;
- uint64_t zs_vdev_aux;
- uint64_t zs_enospc_count;
- hrtime_t zs_start_time;
- hrtime_t zs_stop_time;
- uint64_t zs_alloc;
- uint64_t zs_space;
- ztest_info_t zs_info[ZTEST_FUNCS];
- mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS];
- uint64_t zs_seq[ZTEST_SYNC_LOCKS];
+ pthread_mutex_t zs_vdev_lock;
+ pthread_rwlock_t zs_name_lock;
+ uint64_t zs_vdev_primaries;
+ uint64_t zs_vdev_aux;
+ uint64_t zs_enospc_count;
+ hrtime_t zs_start_time;
+ hrtime_t zs_stop_time;
+ uint64_t zs_alloc;
+ uint64_t zs_space;
+ ztest_info_t zs_info[ZTEST_FUNCS];
+ pthread_mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS];
+ uint64_t zs_seq[ZTEST_SYNC_LOCKS];
} ztest_shared_t;
static char ztest_dev_template[] = "%s/%s.%llua";
@@ -804,7 +804,7 @@ ztest_spa_create_destroy(ztest_args_t *za)
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
- (void) rw_rdlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
error = spa_create(za->za_pool, nvroot, NULL, NULL, NULL);
nvlist_free(nvroot);
@@ -820,7 +820,7 @@ ztest_spa_create_destroy(ztest_args_t *za)
fatal(0, "spa_destroy() = %d", error);
spa_close(spa, FTAG);
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
}
static vdev_t *
@@ -850,7 +850,7 @@ ztest_vdev_add_remove(ztest_args_t *za)
nvlist_t *nvroot;
int error;
- (void) mutex_lock(&ztest_shared->zs_vdev_lock);
+ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
@@ -868,7 +868,7 @@ ztest_vdev_add_remove(ztest_args_t *za)
error = spa_vdev_add(spa, nvroot);
nvlist_free(nvroot);
- (void) mutex_unlock(&ztest_shared->zs_vdev_lock);
+ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
if (error == ENOSPC)
ztest_record_enospc("spa_vdev_add");
@@ -897,7 +897,7 @@ ztest_vdev_aux_add_remove(ztest_args_t *za)
aux = ZPOOL_CONFIG_L2CACHE;
}
- (void) mutex_lock(&ztest_shared->zs_vdev_lock);
+ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
@@ -953,7 +953,7 @@ ztest_vdev_aux_add_remove(ztest_args_t *za)
fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
}
- (void) mutex_unlock(&ztest_shared->zs_vdev_lock);
+ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
}
/*
@@ -979,7 +979,7 @@ ztest_vdev_attach_detach(ztest_args_t *za)
int oldvd_is_log;
int error, expected_error;
- (void) mutex_lock(&ztest_shared->zs_vdev_lock);
+ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
@@ -1039,7 +1039,7 @@ ztest_vdev_attach_detach(ztest_args_t *za)
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP)
fatal(0, "detach (%s) returned %d", oldpath, error);
- (void) mutex_unlock(&ztest_shared->zs_vdev_lock);
+ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
return;
}
@@ -1132,7 +1132,7 @@ ztest_vdev_attach_detach(ztest_args_t *za)
(longlong_t)newsize, replacing, error, expected_error);
}
- (void) mutex_unlock(&ztest_shared->zs_vdev_lock);
+ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
}
/*
@@ -1148,7 +1148,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
size_t fsize;
int fd;
- (void) mutex_lock(&ztest_shared->zs_vdev_lock);
+ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
/*
* Pick a random leaf vdev.
@@ -1179,7 +1179,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
(void) close(fd);
}
- (void) mutex_unlock(&ztest_shared->zs_vdev_lock);
+ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
}
/* ARGSUSED */
@@ -1278,7 +1278,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za)
uint64_t seq;
uint64_t objects;
- (void) rw_rdlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
(void) snprintf(name, 100, "%s/%s_temp_%llu", za->za_pool, za->za_pool,
(u_longlong_t)za->za_instance);
@@ -1321,7 +1321,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za)
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_create");
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
return;
}
fatal(0, "dmu_objset_create(%s) = %d", name, error);
@@ -1403,7 +1403,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za)
if (error)
fatal(0, "dmu_objset_destroy(%s) = %d", name, error);
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
}
/*
@@ -1417,7 +1417,7 @@ ztest_dmu_snapshot_create_destroy(ztest_args_t *za)
char snapname[100];
char osname[MAXNAMELEN];
- (void) rw_rdlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
dmu_objset_name(os, osname);
(void) snprintf(snapname, 100, "%s@%llu", osname,
(u_longlong_t)za->za_instance);
@@ -1430,7 +1430,7 @@ ztest_dmu_snapshot_create_destroy(ztest_args_t *za)
ztest_record_enospc("dmu_take_snapshot");
else if (error != 0 && error != EEXIST)
fatal(0, "dmu_take_snapshot() = %d", error);
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
}
/*
@@ -1928,7 +1928,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
int bs = ZTEST_DIROBJ_BLOCKSIZE;
int do_free = 0;
uint64_t off, txg, txg_how;
- mutex_t *lp;
+ pthread_mutex_t *lp;
char osname[MAXNAMELEN];
char iobuf[SPA_MAXBLOCKSIZE];
blkptr_t blk = { 0 };
@@ -1978,7 +1978,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
txg = dmu_tx_get_txg(tx);
lp = &ztest_shared->zs_sync_lock[b];
- (void) mutex_lock(lp);
+ (void) pthread_mutex_lock(lp);
wbt->bt_objset = dmu_objset_id(os);
wbt->bt_object = ZTEST_DIROBJ;
@@ -2031,7 +2031,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
dmu_write(os, ZTEST_DIROBJ, off, btsize, wbt, tx);
}
- (void) mutex_unlock(lp);
+ (void) pthread_mutex_unlock(lp);
if (ztest_random(1000) == 0)
(void) poll(NULL, 0, 1); /* open dn_notxholds window */
@@ -2050,13 +2050,13 @@ ztest_dmu_write_parallel(ztest_args_t *za)
/*
* dmu_sync() the block we just wrote.
*/
- (void) mutex_lock(lp);
+ (void) pthread_mutex_lock(lp);
blkoff = P2ALIGN_TYPED(off, bs, uint64_t);
error = dmu_buf_hold(os, ZTEST_DIROBJ, blkoff, FTAG, &db);
za->za_dbuf = db;
if (error) {
- (void) mutex_unlock(lp);
+ (void) pthread_mutex_unlock(lp);
return;
}
blkoff = off - blkoff;
@@ -2064,7 +2064,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
dmu_buf_rele(db, FTAG);
za->za_dbuf = NULL;
- (void) mutex_unlock(lp);
+ (void) pthread_mutex_unlock(lp);
if (error)
return;
@@ -2444,7 +2444,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za)
char osname[MAXNAMELEN];
int error;
- (void) rw_rdlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
dmu_objset_name(os, osname);
@@ -2483,7 +2483,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za)
}
}
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
}
/*
@@ -2647,7 +2647,7 @@ ztest_spa_rename(ztest_args_t *za)
int error;
spa_t *spa;
- (void) rw_wrlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_wrlock(&ztest_shared->zs_name_lock);
oldname = za->za_pool;
newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
@@ -2699,7 +2699,7 @@ ztest_spa_rename(ztest_args_t *za)
umem_free(newname, strlen(newname) + 1);
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
}
@@ -2937,15 +2937,18 @@ ztest_spa_import_export(char *oldname, char *newname)
nvlist_free(config);
}
-static void
-ztest_resume(spa_t *spa)
+static void *
+ztest_resume(void *arg)
{
+ spa_t *spa = arg;
+
if (spa_suspended(spa)) {
spa_vdev_state_enter(spa);
vdev_clear(spa, NULL);
(void) spa_vdev_state_exit(spa, NULL, 0);
zio_resume(spa);
}
+ return (NULL);
}
static void *
@@ -3035,15 +3038,16 @@ ztest_run(char *pool)
ztest_args_t *za;
spa_t *spa;
char name[100];
- thread_t resume_tid;
+ pthread_t resume_tid;
ztest_exiting = B_FALSE;
- (void) _mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL);
- (void) rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL);
+ (void) pthread_mutex_init(&zs->zs_vdev_lock, NULL);
+ (void) pthread_rwlock_init(&zs->zs_name_lock, NULL);
for (t = 0; t < ZTEST_SYNC_LOCKS; t++)
- (void) _mutex_init(&zs->zs_sync_lock[t], USYNC_THREAD, NULL);
+ (void) pthread_mutex_init(&zs->zs_sync_lock[t], NULL);
+
/*
* Destroy one disk before we even start.
@@ -3110,8 +3114,7 @@ ztest_run(char *pool)
/*
* Create a thread to periodically resume suspended I/O.
*/
- VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
- &resume_tid) == 0);
+ VERIFY(pthread_create(&resume_tid, NULL, ztest_resume_thread, spa)==0);
/*
* Verify that we can safely inquire about about any object,
@@ -3160,7 +3163,7 @@ ztest_run(char *pool)
if (t < zopt_datasets) {
int test_future = FALSE;
- (void) rw_rdlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
(void) snprintf(name, 100, "%s/%s_%d", pool, pool, d);
error = dmu_objset_create(name, DMU_OST_OTHER, NULL, 0,
ztest_create_cb, NULL);
@@ -3168,7 +3171,7 @@ ztest_run(char *pool)
test_future = TRUE;
} else if (error == ENOSPC) {
zs->zs_enospc_count++;
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
break;
} else if (error != 0) {
fatal(0, "dmu_objset_create(%s) = %d",
@@ -3179,7 +3182,7 @@ ztest_run(char *pool)
if (error)
fatal(0, "dmu_objset_open('%s') = %d",
name, error);
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
if (test_future)
ztest_dmu_check_future_leak(&za[t]);
zil_replay(za[d].za_os, za[d].za_os,
@@ -3187,12 +3190,12 @@ ztest_run(char *pool)
za[d].za_zilog = zil_open(za[d].za_os, NULL);
}
- VERIFY(thr_create(0, 0, ztest_thread, &za[t], THR_BOUND,
- &za[t].za_thread) == 0);
+ VERIFY(pthread_create(&za[t].za_thread, NULL, ztest_thread,
+ &za[t]) == 0);
}
while (--t >= 0) {
- VERIFY(thr_join(za[t].za_thread, NULL, NULL) == 0);
+ VERIFY(pthread_join(za[t].za_thread, NULL) == 0);
if (t < zopt_datasets) {
zil_close(za[t].za_zilog);
dmu_objset_close(za[t].za_os);
@@ -3211,14 +3214,14 @@ ztest_run(char *pool)
* If we had out-of-space errors, destroy a random objset.
*/
if (zs->zs_enospc_count != 0) {
- (void) rw_rdlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
d = (int)ztest_random(zopt_datasets);
(void) snprintf(name, 100, "%s/%s_%d", pool, pool, d);
if (zopt_verbose >= 3)
(void) printf("Destroying %s to free up space\n", name);
(void) dmu_objset_find(name, ztest_destroy_cb, &za[d],
DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
}
txg_wait_synced(spa_get_dsl(spa), 0);
@@ -3227,7 +3230,7 @@ ztest_run(char *pool)
/* Kill the resume thread */
ztest_exiting = B_TRUE;
- VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
+ VERIFY(pthread_join(resume_tid, NULL) == 0);
ztest_resume(spa);
/*
diff --git a/lib/libuutil/uu_misc.c b/lib/libuutil/uu_misc.c
index 74ec177c1..fc57328c2 100644
--- a/lib/libuutil/uu_misc.c
+++ b/lib/libuutil/uu_misc.c
@@ -37,7 +37,6 @@
#include <stdlib.h>
#include <string.h>
#include <sys/debug.h>
-#include <thread.h>
#include <unistd.h>
#if !defined(TEXT_DOMAIN)
@@ -70,11 +69,12 @@ static va_list uu_panic_args;
static pthread_t uu_panic_thread;
static uint32_t _uu_main_error;
+static __thread int _uu_main_thread = 0;
void
uu_set_error(uint_t code)
{
- if (thr_main() != 0) {
+ if (_uu_main_thread) {
_uu_main_error = code;
return;
}
@@ -103,7 +103,7 @@ uu_set_error(uint_t code)
uint32_t
uu_error(void)
{
- if (thr_main() != 0)
+ if (_uu_main_thread)
return (_uu_main_error);
if (uu_error_key_setup < 0) /* can't happen? */
@@ -251,5 +251,6 @@ uu_release_child(void)
static void
uu_init(void)
{
+ _uu_main_thread = 1;
(void) pthread_atfork(uu_lockup, uu_release, uu_release_child);
}
diff --git a/lib/libzfs/libzfs_config.c b/lib/libzfs/libzfs_config.c
index 94640d1b1..781153225 100644
--- a/lib/libzfs/libzfs_config.c
+++ b/lib/libzfs/libzfs_config.c
@@ -123,7 +123,7 @@ namespace_reload(libzfs_handle_t *hdl)
return (no_memory(hdl));
}
- if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
+ if (zcmd_alloc_dst_nvlist(hdl, &zc, 32768) != 0)
return (-1);
for (;;) {
diff --git a/lib/libzfs/libzfs_sendrecv.c b/lib/libzfs/libzfs_sendrecv.c
index 5a2e2aeb6..ab6977e9e 100644
--- a/lib/libzfs/libzfs_sendrecv.c
+++ b/lib/libzfs/libzfs_sendrecv.c
@@ -1642,7 +1642,7 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
* Determine name of destination snapshot, store in zc_value.
*/
(void) strcpy(zc.zc_value, tosnap);
- (void) strncat(zc.zc_value, drrb->drr_toname+choplen,
+ (void) strlcat(zc.zc_value, drrb->drr_toname+choplen,
sizeof (zc.zc_value));
if (!zfs_name_valid(zc.zc_value, ZFS_TYPE_SNAPSHOT)) {
zcmd_free_nvlists(&zc);
diff --git a/lib/libzpool/include/sys/zfs_context.h b/lib/libzpool/include/sys/zfs_context.h
index a50e4b0ac..5022d4c8e 100644
--- a/lib/libzpool/include/sys/zfs_context.h
+++ b/lib/libzpool/include/sys/zfs_context.h
@@ -50,8 +50,7 @@ extern "C" {
#include <errno.h>
#include <string.h>
#include <strings.h>
-#include <synch.h>
-#include <thread.h>
+#include <pthread.h>
#include <assert.h>
#include <alloca.h>
#include <umem.h>
@@ -75,6 +74,12 @@ extern "C" {
#include <sys/sysevent/eventdefs.h>
/*
+ * Stack
+ */
+
+#define noinline __attribute__((noinline))
+
+/*
* Debugging
*/
@@ -104,6 +109,7 @@ extern void vpanic(const char *, __va_list);
#define fm_panic panic
/* This definition is copied from assert.h. */
+#ifndef verify
#if defined(__STDC__)
#if __STDC_VERSION__ - 0 >= 199901L
#define verify(EX) (void)((EX) || \
@@ -114,7 +120,10 @@ extern void vpanic(const char *, __va_list);
#else
#define verify(EX) (void)((EX) || (_assert("EX", __FILE__, __LINE__), 0))
#endif /* __STDC__ */
+#endif
+#undef VERIFY
+#undef ASSERT
#define VERIFY verify
#define ASSERT assert
@@ -187,15 +196,18 @@ _NOTE(CONSTCOND) } while (0)
/*
* Threads
*/
-#define curthread ((void *)(uintptr_t)thr_self())
+#define curthread ((void *)(uintptr_t)pthread_self())
+#define tsd_get(key) pthread_getspecific(key)
+#define tsd_set(key, val) pthread_setspecific(key, val)
typedef struct kthread kthread_t;
+typedef void (*thread_func_t)(void *);
#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
- zk_thread_create(func, arg)
-#define thread_exit() thr_exit(NULL)
+ zk_thread_create((thread_func_t)func, arg)
+#define thread_exit() pthread_exit(NULL)
-extern kthread_t *zk_thread_create(void (*func)(), void *arg);
+extern kthread_t *zk_thread_create(thread_func_t func, void *arg);
#define issig(why) (FALSE)
#define ISSIG(thr, why) (FALSE)
@@ -203,28 +215,18 @@ extern kthread_t *zk_thread_create(void (*func)(), void *arg);
/*
* Mutexes
*/
+#define MTX_MAGIC 0x9522f51362a6e326ull
typedef struct kmutex {
void *m_owner;
- boolean_t initialized;
- mutex_t m_lock;
+ uint64_t m_magic;
+ pthread_mutex_t m_lock;
} kmutex_t;
-#define MUTEX_DEFAULT USYNC_THREAD
-#undef MUTEX_HELD
-#define MUTEX_HELD(m) _mutex_held(&(m)->m_lock)
+#define MUTEX_DEFAULT 0
+#define MUTEX_HELD(m) ((m)->m_owner == curthread)
-/*
- * Argh -- we have to get cheesy here because the kernel and userland
- * have different signatures for the same routine.
- */
-extern int _mutex_init(mutex_t *mp, int type, void *arg);
-extern int _mutex_destroy(mutex_t *mp);
-
-#define mutex_init(mp, b, c, d) zmutex_init((kmutex_t *)(mp))
-#define mutex_destroy(mp) zmutex_destroy((kmutex_t *)(mp))
-
-extern void zmutex_init(kmutex_t *mp);
-extern void zmutex_destroy(kmutex_t *mp);
+extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
+extern void mutex_destroy(kmutex_t *mp);
extern void mutex_enter(kmutex_t *mp);
extern void mutex_exit(kmutex_t *mp);
extern int mutex_tryenter(kmutex_t *mp);
@@ -233,23 +235,24 @@ extern void *mutex_owner(kmutex_t *mp);
/*
* RW locks
*/
+#define RW_MAGIC 0x4d31fb123648e78aull
typedef struct krwlock {
- void *rw_owner;
- boolean_t initialized;
- rwlock_t rw_lock;
+ void *rw_owner;
+ void *rw_wr_owner;
+ uint64_t rw_magic;
+ pthread_rwlock_t rw_lock;
+ uint_t rw_readers;
} krwlock_t;
typedef int krw_t;
#define RW_READER 0
#define RW_WRITER 1
-#define RW_DEFAULT USYNC_THREAD
-
-#undef RW_READ_HELD
-#define RW_READ_HELD(x) _rw_read_held(&(x)->rw_lock)
+#define RW_DEFAULT 0
-#undef RW_WRITE_HELD
-#define RW_WRITE_HELD(x) _rw_write_held(&(x)->rw_lock)
+#define RW_READ_HELD(x) ((x)->rw_readers > 0)
+#define RW_WRITE_HELD(x) ((x)->rw_wr_owner == curthread)
+#define RW_LOCK_HELD(x) (RW_READ_HELD(x) || RW_WRITE_HELD(x))
extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg);
extern void rw_destroy(krwlock_t *rwlp);
@@ -267,9 +270,13 @@ extern gid_t *crgetgroups(cred_t *cr);
/*
* Condition variables
*/
-typedef cond_t kcondvar_t;
+#define CV_MAGIC 0xd31ea9a83b1b30c4ull
+typedef struct kcondvar {
+ uint64_t cv_magic;
+ pthread_cond_t cv;
+} kcondvar_t;
-#define CV_DEFAULT USYNC_THREAD
+#define CV_DEFAULT 0
extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg);
extern void cv_destroy(kcondvar_t *cv);
@@ -443,7 +450,8 @@ extern void delay(clock_t ticks);
#define minclsyspri 60
#define maxclsyspri 99
-#define CPU_SEQID (thr_self() & (max_ncpus - 1))
+/* XXX: not portable */
+#define CPU_SEQID (pthread_self() & (max_ncpus - 1))
#define kcred NULL
#define CRED() NULL
diff --git a/lib/libzpool/kernel.c b/lib/libzpool/kernel.c
index 89108fe5b..66ee516b3 100644
--- a/lib/libzpool/kernel.c
+++ b/lib/libzpool/kernel.c
@@ -34,8 +34,8 @@
#include <sys/stat.h>
#include <sys/processor.h>
#include <sys/zfs_context.h>
-#include <sys/zmod.h>
#include <sys/utsname.h>
+#include <sys/time.h>
#include <sys/systeminfo.h>
/*
@@ -57,13 +57,17 @@ struct utsname utsname = {
*/
/*ARGSUSED*/
kthread_t *
-zk_thread_create(void (*func)(), void *arg)
+zk_thread_create(thread_func_t func, void *arg)
{
- thread_t tid;
+ pthread_t tid;
- VERIFY(thr_create(0, 0, (void *(*)(void *))func, arg, THR_DETACHED,
- &tid) == 0);
+ pthread_attr_t attr;
+ VERIFY(pthread_attr_init(&attr) == 0);
+ VERIFY(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0);
+ VERIFY(pthread_create(&tid, &attr, (void *(*)(void *))func, arg) == 0);
+
+ /* XXX: not portable */
return ((void *)(uintptr_t)tid);
}
@@ -96,30 +100,37 @@ kstat_delete(kstat_t *ksp)
* =========================================================================
*/
void
-zmutex_init(kmutex_t *mp)
+mutex_init(kmutex_t *mp, char *name, int type, void *cookie)
{
+ ASSERT(type == MUTEX_DEFAULT);
+ ASSERT(cookie == NULL);
+
+#ifdef IM_FEELING_LUCKY
+ ASSERT(mp->m_magic != MTX_MAGIC);
+#endif
+
mp->m_owner = NULL;
- mp->initialized = B_TRUE;
- (void) _mutex_init(&mp->m_lock, USYNC_THREAD, NULL);
+ mp->m_magic = MTX_MAGIC;
+ VERIFY3S(pthread_mutex_init(&mp->m_lock, NULL), ==, 0);
}
void
-zmutex_destroy(kmutex_t *mp)
+mutex_destroy(kmutex_t *mp)
{
- ASSERT(mp->initialized == B_TRUE);
+ ASSERT(mp->m_magic == MTX_MAGIC);
ASSERT(mp->m_owner == NULL);
- (void) _mutex_destroy(&(mp)->m_lock);
+ VERIFY3S(pthread_mutex_destroy(&(mp)->m_lock), ==, 0);
mp->m_owner = (void *)-1UL;
- mp->initialized = B_FALSE;
+ mp->m_magic = 0;
}
void
mutex_enter(kmutex_t *mp)
{
- ASSERT(mp->initialized == B_TRUE);
+ ASSERT(mp->m_magic == MTX_MAGIC);
ASSERT(mp->m_owner != (void *)-1UL);
ASSERT(mp->m_owner != curthread);
- VERIFY(mutex_lock(&mp->m_lock) == 0);
+ VERIFY3S(pthread_mutex_lock(&mp->m_lock), ==, 0);
ASSERT(mp->m_owner == NULL);
mp->m_owner = curthread;
}
@@ -127,9 +138,9 @@ mutex_enter(kmutex_t *mp)
int
mutex_tryenter(kmutex_t *mp)
{
- ASSERT(mp->initialized == B_TRUE);
+ ASSERT(mp->m_magic == MTX_MAGIC);
ASSERT(mp->m_owner != (void *)-1UL);
- if (0 == mutex_trylock(&mp->m_lock)) {
+ if (0 == pthread_mutex_trylock(&mp->m_lock)) {
ASSERT(mp->m_owner == NULL);
mp->m_owner = curthread;
return (1);
@@ -141,16 +152,16 @@ mutex_tryenter(kmutex_t *mp)
void
mutex_exit(kmutex_t *mp)
{
- ASSERT(mp->initialized == B_TRUE);
+ ASSERT(mp->m_magic == MTX_MAGIC);
ASSERT(mutex_owner(mp) == curthread);
mp->m_owner = NULL;
- VERIFY(mutex_unlock(&mp->m_lock) == 0);
+ VERIFY3S(pthread_mutex_unlock(&mp->m_lock), ==, 0);
}
void *
mutex_owner(kmutex_t *mp)
{
- ASSERT(mp->initialized == B_TRUE);
+ ASSERT(mp->m_magic == MTX_MAGIC);
return (mp->m_owner);
}
@@ -163,31 +174,48 @@ mutex_owner(kmutex_t *mp)
void
rw_init(krwlock_t *rwlp, char *name, int type, void *arg)
{
- rwlock_init(&rwlp->rw_lock, USYNC_THREAD, NULL);
+ ASSERT(type == RW_DEFAULT);
+ ASSERT(arg == NULL);
+
+#ifdef IM_FEELING_LUCKY
+ ASSERT(rwlp->rw_magic != RW_MAGIC);
+#endif
+
+ VERIFY3S(pthread_rwlock_init(&rwlp->rw_lock, NULL), ==, 0);
rwlp->rw_owner = NULL;
- rwlp->initialized = B_TRUE;
+ rwlp->rw_wr_owner = NULL;
+ rwlp->rw_readers = 0;
+ rwlp->rw_magic = RW_MAGIC;
}
void
rw_destroy(krwlock_t *rwlp)
{
- rwlock_destroy(&rwlp->rw_lock);
- rwlp->rw_owner = (void *)-1UL;
- rwlp->initialized = B_FALSE;
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
+
+ VERIFY3S(pthread_rwlock_destroy(&rwlp->rw_lock), ==, 0);
+ rwlp->rw_magic = 0;
}
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
- ASSERT(!RW_LOCK_HELD(rwlp));
- ASSERT(rwlp->initialized == B_TRUE);
- ASSERT(rwlp->rw_owner != (void *)-1UL);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
ASSERT(rwlp->rw_owner != curthread);
+ ASSERT(rwlp->rw_wr_owner != curthread);
- if (rw == RW_READER)
- VERIFY(rw_rdlock(&rwlp->rw_lock) == 0);
- else
- VERIFY(rw_wrlock(&rwlp->rw_lock) == 0);
+ if (rw == RW_READER) {
+ VERIFY3S(pthread_rwlock_rdlock(&rwlp->rw_lock), ==, 0);
+ ASSERT(rwlp->rw_wr_owner == NULL);
+
+ atomic_inc_uint(&rwlp->rw_readers);
+ } else {
+ VERIFY3S(pthread_rwlock_wrlock(&rwlp->rw_lock), ==, 0);
+ ASSERT(rwlp->rw_wr_owner == NULL);
+ ASSERT3U(rwlp->rw_readers, ==, 0);
+
+ rwlp->rw_wr_owner = curthread;
+ }
rwlp->rw_owner = curthread;
}
@@ -195,11 +223,16 @@ rw_enter(krwlock_t *rwlp, krw_t rw)
void
rw_exit(krwlock_t *rwlp)
{
- ASSERT(rwlp->initialized == B_TRUE);
- ASSERT(rwlp->rw_owner != (void *)-1UL);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
+ ASSERT(RW_LOCK_HELD(rwlp));
+
+ if (RW_READ_HELD(rwlp))
+ atomic_dec_uint(&rwlp->rw_readers);
+ else
+ rwlp->rw_wr_owner = NULL;
rwlp->rw_owner = NULL;
- VERIFY(rw_unlock(&rwlp->rw_lock) == 0);
+ VERIFY3S(pthread_rwlock_unlock(&rwlp->rw_lock), ==, 0);
}
int
@@ -207,19 +240,29 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
{
int rv;
- ASSERT(rwlp->initialized == B_TRUE);
- ASSERT(rwlp->rw_owner != (void *)-1UL);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
if (rw == RW_READER)
- rv = rw_tryrdlock(&rwlp->rw_lock);
+ rv = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
else
- rv = rw_trywrlock(&rwlp->rw_lock);
+ rv = pthread_rwlock_trywrlock(&rwlp->rw_lock);
if (rv == 0) {
+ ASSERT(rwlp->rw_wr_owner == NULL);
+
+ if (rw == RW_READER)
+ atomic_inc_uint(&rwlp->rw_readers);
+ else {
+ ASSERT3U(rwlp->rw_readers, ==, 0);
+ rwlp->rw_wr_owner = curthread;
+ }
+
rwlp->rw_owner = curthread;
return (1);
}
+ VERIFY3S(rv, ==, EBUSY);
+
return (0);
}
@@ -227,8 +270,7 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
int
rw_tryupgrade(krwlock_t *rwlp)
{
- ASSERT(rwlp->initialized == B_TRUE);
- ASSERT(rwlp->rw_owner != (void *)-1UL);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
return (0);
}
@@ -242,22 +284,34 @@ rw_tryupgrade(krwlock_t *rwlp)
void
cv_init(kcondvar_t *cv, char *name, int type, void *arg)
{
- VERIFY(cond_init(cv, type, NULL) == 0);
+ ASSERT(type == CV_DEFAULT);
+
+#ifdef IM_FEELING_LUCKY
+ ASSERT(cv->cv_magic != CV_MAGIC);
+#endif
+
+ cv->cv_magic = CV_MAGIC;
+
+ VERIFY3S(pthread_cond_init(&cv->cv, NULL), ==, 0);
}
void
cv_destroy(kcondvar_t *cv)
{
- VERIFY(cond_destroy(cv) == 0);
+ ASSERT(cv->cv_magic == CV_MAGIC);
+ VERIFY3S(pthread_cond_destroy(&cv->cv), ==, 0);
+ cv->cv_magic = 0;
}
void
cv_wait(kcondvar_t *cv, kmutex_t *mp)
{
+ ASSERT(cv->cv_magic == CV_MAGIC);
ASSERT(mutex_owner(mp) == curthread);
mp->m_owner = NULL;
- int ret = cond_wait(cv, &mp->m_lock);
- VERIFY(ret == 0 || ret == EINTR);
+ int ret = pthread_cond_wait(&cv->cv, &mp->m_lock);
+ if (ret != 0)
+ VERIFY3S(ret, ==, EINTR);
mp->m_owner = curthread;
}
@@ -265,29 +319,38 @@ clock_t
cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
{
int error;
+ struct timeval tv;
timestruc_t ts;
clock_t delta;
+ ASSERT(cv->cv_magic == CV_MAGIC);
+
top:
delta = abstime - lbolt;
if (delta <= 0)
return (-1);
- ts.tv_sec = delta / hz;
- ts.tv_nsec = (delta % hz) * (NANOSEC / hz);
+ VERIFY(gettimeofday(&tv, NULL) == 0);
+
+ ts.tv_sec = tv.tv_sec + delta / hz;
+ ts.tv_nsec = tv.tv_usec * 1000 + (delta % hz) * (NANOSEC / hz);
+ if (ts.tv_nsec >= NANOSEC) {
+ ts.tv_sec++;
+ ts.tv_nsec -= NANOSEC;
+ }
ASSERT(mutex_owner(mp) == curthread);
mp->m_owner = NULL;
- error = cond_reltimedwait(cv, &mp->m_lock, &ts);
+ error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts);
mp->m_owner = curthread;
- if (error == ETIME)
+ if (error == ETIMEDOUT)
return (-1);
if (error == EINTR)
goto top;
- ASSERT(error == 0);
+ VERIFY3S(error, ==, 0);
return (1);
}
@@ -295,13 +358,15 @@ top:
void
cv_signal(kcondvar_t *cv)
{
- VERIFY(cond_signal(cv) == 0);
+ ASSERT(cv->cv_magic == CV_MAGIC);
+ VERIFY3S(pthread_cond_signal(&cv->cv), ==, 0);
}
void
cv_broadcast(kcondvar_t *cv)
{
- VERIFY(cond_broadcast(cv) == 0);
+ ASSERT(cv->cv_magic == CV_MAGIC);
+ VERIFY3S(pthread_cond_broadcast(&cv->cv), ==, 0);
}
/*
@@ -543,7 +608,7 @@ __dprintf(const char *file, const char *func, int line, const char *fmt, ...)
if (dprintf_find_string("pid"))
(void) printf("%d ", getpid());
if (dprintf_find_string("tid"))
- (void) printf("%u ", thr_self());
+ (void) printf("%u ", (uint_t) pthread_self());
if (dprintf_find_string("cpu"))
(void) printf("%u ", getcpuid());
if (dprintf_find_string("time"))
@@ -801,31 +866,6 @@ kernel_fini(void)
urandom_fd = -1;
}
-int
-z_uncompress(void *dst, size_t *dstlen, const void *src, size_t srclen)
-{
- int ret;
- uLongf len = *dstlen;
-
- if ((ret = uncompress(dst, &len, src, srclen)) == Z_OK)
- *dstlen = (size_t)len;
-
- return (ret);
-}
-
-int
-z_compress_level(void *dst, size_t *dstlen, const void *src, size_t srclen,
- int level)
-{
- int ret;
- uLongf len = *dstlen;
-
- if ((ret = compress2(dst, &len, src, srclen, level)) == Z_OK)
- *dstlen = (size_t)len;
-
- return (ret);
-}
-
uid_t
crgetuid(cred_t *cr)
{
diff --git a/lib/libzpool/taskq.c b/lib/libzpool/taskq.c
index 93acdcf8e..d28f6024b 100644
--- a/lib/libzpool/taskq.c
+++ b/lib/libzpool/taskq.c
@@ -42,7 +42,7 @@ struct taskq {
krwlock_t tq_threadlock;
kcondvar_t tq_dispatch_cv;
kcondvar_t tq_wait_cv;
- thread_t *tq_threadlist;
+ pthread_t *tq_threadlist;
int tq_flags;
int tq_active;
int tq_nthreads;
@@ -185,7 +185,7 @@ taskq_create(const char *name, int nthreads, pri_t pri,
tq->tq_maxalloc = maxalloc;
tq->tq_task.task_next = &tq->tq_task;
tq->tq_task.task_prev = &tq->tq_task;
- tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP);
+ tq->tq_threadlist = kmem_alloc(nthreads * sizeof (pthread_t), KM_SLEEP);
if (flags & TASKQ_PREPOPULATE) {
mutex_enter(&tq->tq_lock);
@@ -195,8 +195,8 @@ taskq_create(const char *name, int nthreads, pri_t pri,
}
for (t = 0; t < nthreads; t++)
- (void) thr_create(0, 0, taskq_thread,
- tq, THR_BOUND, &tq->tq_threadlist[t]);
+ VERIFY(pthread_create(&tq->tq_threadlist[t],
+ NULL, taskq_thread, tq) == 0);
return (tq);
}
@@ -226,9 +226,9 @@ taskq_destroy(taskq_t *tq)
mutex_exit(&tq->tq_lock);
for (t = 0; t < nthreads; t++)
- (void) thr_join(tq->tq_threadlist[t], NULL, NULL);
+ VERIFY(pthread_join(tq->tq_threadlist[t], NULL) == 0);
- kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t));
+ kmem_free(tq->tq_threadlist, nthreads * sizeof (pthread_t));
rw_destroy(&tq->tq_threadlock);
mutex_destroy(&tq->tq_lock);
@@ -247,7 +247,7 @@ taskq_member(taskq_t *tq, void *t)
return (1);
for (i = 0; i < tq->tq_nthreads; i++)
- if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t)
+ if (tq->tq_threadlist[i] == (pthread_t)(uintptr_t)t)
return (1);
return (0);
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 3a9598a92..5b38cf30f 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -777,6 +777,8 @@ hdr_cons(void *vbuf, void *unused, int kmflag)
refcount_create(&buf->b_refcnt);
cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
+ list_link_init(&buf->b_arc_node);
+ list_link_init(&buf->b_l2node);
arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
return (0);
@@ -1588,7 +1590,7 @@ arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
mutex_exit(&state->arcs_mtx);
if (bytes_evicted < bytes)
- dprintf("only evicted %lld bytes from %x",
+ dprintf("only evicted %lld bytes from %x\n",
(longlong_t)bytes_evicted, state);
if (skipped)
@@ -1688,7 +1690,7 @@ top:
}
if (bytes_deleted < bytes)
- dprintf("only deleted %lld bytes from %p",
+ dprintf("only deleted %lld bytes from %p\n",
(longlong_t)bytes_deleted, state);
}
@@ -1957,7 +1959,7 @@ arc_kmem_reap_now(arc_reclaim_strategy_t strat)
static void
arc_reclaim_thread(void)
{
- clock_t growtime = 0;
+ int64_t growtime = 0;
arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
callb_cpr_t cpr;
@@ -1980,12 +1982,12 @@ arc_reclaim_thread(void)
}
/* reset the growth delay for every reclaim */
- growtime = lbolt + (arc_grow_retry * hz);
+ growtime = lbolt64 + (arc_grow_retry * hz);
arc_kmem_reap_now(last_reclaim);
arc_warm = B_TRUE;
- } else if (arc_no_grow && lbolt >= growtime) {
+ } else if (arc_no_grow && lbolt64 >= growtime) {
arc_no_grow = FALSE;
}
@@ -4506,6 +4508,7 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end)
adddev->l2ad_evict = adddev->l2ad_start;
adddev->l2ad_first = B_TRUE;
adddev->l2ad_writing = B_FALSE;
+ list_link_init(&adddev->l2ad_node);
ASSERT3U(adddev->l2ad_write, >, 0);
/*
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index 113fa1f03..fa5f7634e 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -58,6 +58,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
refcount_create(&db->db_holds);
+ list_link_init(&db->db_link);
return (0);
}
@@ -1018,6 +1019,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
* transaction group won't leak out when we sync the older txg.
*/
dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
+ list_link_init(&dr->dr_dirty_node);
if (db->db_level == 0) {
void *data_old = db->db_buf;
@@ -1895,7 +1897,11 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
}
}
-static void
+/* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
+ * is critical the we not allow the compiler to inline this function in to
+ * dbuf_sync_list() thereby drastically bloating the stack usage.
+ */
+noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
@@ -1935,7 +1941,11 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
zio_nowait(zio);
}
-static void
+/* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
+ * critical the we not allow the compiler to inline this function in to
+ * dbuf_sync_list() thereby drastically bloating the stack usage.
+ */
+noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
@@ -1988,6 +1998,10 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
drp = &(*drp)->dr_next;
ASSERT(dr->dr_next == NULL);
*drp = dr->dr_next;
+ if (dr->dr_dbuf->db_level != 0) {
+ mutex_destroy(&dr->dt.di.dr_mtx);
+ list_destroy(&dr->dt.di.dr_children);
+ }
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index 538a141b0..e695ef02a 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -276,13 +276,19 @@ dnode_create(objset_impl_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object)
{
dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
- (void) dnode_cons(dn, NULL, 0); /* XXX */
dn->dn_objset = os;
dn->dn_object = object;
dn->dn_dbuf = db;
dn->dn_phys = dnp;
+ list_link_init(&dn->dn_link);
+ {
+ int i;
+ for (i = 0; i < TXG_SIZE; i++)
+ list_link_init(&dn->dn_dirty_link[i]);
+ }
+
if (dnp->dn_datablkszsec)
dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
dn->dn_indblkshift = dnp->dn_indblkshift;
diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c
index 23dcb4c7b..7b9603fd0 100644
--- a/module/zfs/dnode_sync.c
+++ b/module/zfs/dnode_sync.c
@@ -403,9 +403,13 @@ dnode_evict_dbufs(dnode_t *dn)
if (evicting)
delay(1);
pass++;
- ASSERT(pass < 100); /* sanity check */
+ if ((pass % 100) == 0)
+ dprintf("Exceeded %d passes evicting dbufs\n", pass);
} while (progress);
+ if (pass >= 100)
+ dprintf("Required %d passes to evict dbufs\n", pass);
+
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
mutex_enter(&dn->dn_bonus->db_mtx);
@@ -438,6 +442,8 @@ dnode_undirty_dbufs(list_t *list)
} else {
mutex_exit(&db->db_mtx);
dnode_undirty_dbufs(&dr->dt.di.dr_children);
+ mutex_destroy(&dr->dt.di.dr_mtx);
+ list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
dbuf_rele(db, (void *)(uintptr_t)txg);
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index a68b12d33..f0070ebc9 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -357,12 +357,13 @@ dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
ds->ds_dbuf = dbuf;
ds->ds_object = dsobj;
ds->ds_phys = dbuf->db_data;
+ list_link_init(&ds->ds_synced_link);
mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_deadlist.bpl_lock, NULL, MUTEX_DEFAULT,
NULL);
- rw_init(&ds->ds_rwlock, 0, 0, 0);
+ rw_init(&ds->ds_rwlock, NULL, RW_DEFAULT, NULL);
cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
err = bplist_open(&ds->ds_deadlist,
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index dacc57c81..41386b269 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -219,6 +219,7 @@ dsl_pool_close(dsl_pool_t *dp)
txg_list_destroy(&dp->dp_dirty_datasets);
txg_list_destroy(&dp->dp_dirty_dirs);
+ txg_list_destroy(&dp->dp_sync_tasks);
list_destroy(&dp->dp_synced_datasets);
arc_flush(dp->dp_spa);
diff --git a/module/zfs/gzip.c b/module/zfs/gzip.c
index b257d4af7..a60772719 100644
--- a/module/zfs/gzip.c
+++ b/module/zfs/gzip.c
@@ -28,22 +28,35 @@
#include <sys/debug.h>
#include <sys/types.h>
-#include <sys/zmod.h>
#ifdef _KERNEL
+
#include <sys/systm.h>
-#else
+#include <sys/zmod.h>
+
+typedef size_t zlen_t;
+#define compress_func z_compress_level
+#define uncompress_func z_uncompress
+
+#else /* _KERNEL */
+
#include <strings.h>
+#include <zlib.h>
+
+typedef uLongf zlen_t;
+#define compress_func compress2
+#define uncompress_func uncompress
+
#endif
size_t
gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
- size_t dstlen = d_len;
+ zlen_t dstlen = d_len;
ASSERT(d_len <= s_len);
- if (z_compress_level(d_start, &dstlen, s_start, s_len, n) != Z_OK) {
+ if (compress_func(d_start, &dstlen, s_start, s_len, n) != Z_OK) {
if (d_len != s_len)
return (s_len);
@@ -51,18 +64,18 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
return (s_len);
}
- return (dstlen);
+ return ((size_t) dstlen);
}
/*ARGSUSED*/
int
gzip_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
- size_t dstlen = d_len;
+ zlen_t dstlen = d_len;
ASSERT(d_len >= s_len);
- if (z_uncompress(d_start, &dstlen, s_start, s_len) != Z_OK)
+ if (uncompress_func(d_start, &dstlen, s_start, s_len) != Z_OK)
return (-1);
return (0);
diff --git a/module/zfs/include/sys/dbuf.h b/module/zfs/include/sys/dbuf.h
index 75ce27264..bea71f06f 100644
--- a/module/zfs/include/sys/dbuf.h
+++ b/module/zfs/include/sys/dbuf.h
@@ -85,9 +85,6 @@ struct dmu_tx;
* etc.
*/
-#define LIST_LINK_INACTIVE(link) \
- ((link)->list_next == NULL && (link)->list_prev == NULL)
-
struct dmu_buf_impl;
typedef enum override_states {
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index 2554f96a9..47a1ce89c 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -313,6 +313,8 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
+ list_link_init(&vd->vdev_config_dirty_node);
+ list_link_init(&vd->vdev_state_dirty_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
diff --git a/module/zfs/zap.c b/module/zfs/zap.c
index ca859ec35..297984bc9 100644
--- a/module/zfs/zap.c
+++ b/module/zfs/zap.c
@@ -388,7 +388,7 @@ zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
- rw_init(&l->l_rwlock, 0, 0, 0);
+ rw_init(&l->l_rwlock, NULL, RW_DEFAULT, NULL);
rw_enter(&l->l_rwlock, RW_WRITER);
l->l_blkid = zap_allocate_blocks(zap, 1);
l->l_dbuf = NULL;
@@ -446,7 +446,7 @@ zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
ASSERT(blkid != 0);
l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
- rw_init(&l->l_rwlock, 0, 0, 0);
+ rw_init(&l->l_rwlock, NULL, RW_DEFAULT, NULL);
rw_enter(&l->l_rwlock, RW_WRITER);
l->l_blkid = blkid;
l->l_bs = highbit(db->db_size)-1;
diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c
index abba42775..96964d683 100644
--- a/module/zfs/zap_micro.c
+++ b/module/zfs/zap_micro.c
@@ -286,7 +286,7 @@ mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
- rw_init(&zap->zap_rwlock, 0, 0, 0);
+ rw_init(&zap->zap_rwlock, NULL, RW_DEFAULT, NULL);
rw_enter(&zap->zap_rwlock, RW_WRITER);
zap->zap_objset = os;
zap->zap_object = obj;
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index 74983cdc5..7542ea1af 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
@@ -1569,6 +1569,8 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
zfsvfs.z_norm |= U8_TEXTPREP_TOUPPER;
+ /* XXX - This must be destroyed but I'm not quite sure yet so
+ * I'm just annotating that fact when it's an issue. -Brian */
mutex_init(&zfsvfs.z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs.z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
diff --git a/module/zfs/zil.c b/module/zfs/zil.c
index 83fef0d87..ab6692ab7 100644
--- a/module/zfs/zil.c
+++ b/module/zfs/zil.c
@@ -494,9 +494,14 @@ zil_rollback_destroy(zilog_t *zilog, dmu_tx_t *tx)
/*
* Ensure there's no outstanding ZIL IO. No lwbs or just the
* unused one that allocated in advance is ok.
+ *
+ * XXX: The assertion is correct, but we need a portable version
+ * which does not rely on directly accessing the list nodes.
*/
+#if 0
ASSERT(zilog->zl_lwb_list.list_head.list_next ==
zilog->zl_lwb_list.list_head.list_prev);
+#endif
(void) zil_parse(zilog, zil_free_log_block, zil_free_log_record,
tx, zh->zh_claim_txg);
}
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index a669ad64a..a2a920034 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -970,7 +970,7 @@ zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q)
t = ZIO_TYPE_NULL;
(void) taskq_dispatch(zio->io_spa->spa_zio_taskq[t][q],
- (task_func_t *)zio_execute, zio, TQ_SLEEP);
+ (task_func_t *)zio_execute, zio, TQ_NOSLEEP);
}
static boolean_t