aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/arc.c4
-rw-r--r--module/zfs/dbuf.c2
-rw-r--r--module/zfs/mmp.c5
-rw-r--r--module/zfs/spa.c10
-rw-r--r--module/zfs/txg.c10
-rw-r--r--module/zfs/zfs_sa.c2
6 files changed, 21 insertions, 12 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 481c38189..157a28d4b 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -4214,7 +4214,7 @@ arc_kmem_reap_now(void)
* using mutex_tryenter() from arc_reclaim_thread().
*/
static void
-arc_reclaim_thread(void)
+arc_reclaim_thread(void *unused)
{
fstrans_cookie_t cookie = spl_fstrans_mark();
hrtime_t growtime = 0;
@@ -7515,7 +7515,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
* heart of the L2ARC.
*/
static void
-l2arc_feed_thread(void)
+l2arc_feed_thread(void *unused)
{
callb_cpr_t cpr;
l2arc_dev_t *dev;
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index dc2c00495..625e06701 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -531,7 +531,7 @@ dbuf_evict_one(void)
* out of the cache it is destroyed and becomes eligible for arc eviction.
*/
static void
-dbuf_evict_thread(void)
+dbuf_evict_thread(void *unused)
{
callb_cpr_t cpr;
diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c
index 00478a39f..a4771b677 100644
--- a/module/zfs/mmp.c
+++ b/module/zfs/mmp.c
@@ -123,7 +123,7 @@ uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
*/
uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
-static void mmp_thread(spa_t *spa);
+static void mmp_thread(void *arg);
void
mmp_init(spa_t *spa)
@@ -364,8 +364,9 @@ mmp_write_uberblock(spa_t *spa)
}
static void
-mmp_thread(spa_t *spa)
+mmp_thread(void *arg)
{
+ spa_t *spa = (spa_t *)arg;
mmp_thread_t *mmp = &spa->spa_mmp;
boolean_t last_spa_suspended = spa_suspended(spa);
boolean_t last_spa_multihost = spa_multihost(spa);
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index f1f1444f1..cb86c6200 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -1028,6 +1028,11 @@ spa_create_zio_taskqs(spa_t *spa)
}
}
+/*
+ * Disabled until spa_thread() can be adapted for Linux.
+ */
+#undef HAVE_SPA_THREAD
+
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
@@ -3415,7 +3420,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
- if (mutex_owner(&spa_namespace_lock) != curthread) {
+ if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
@@ -6068,8 +6073,9 @@ spa_async_autoexpand(spa_t *spa, vdev_t *vd)
}
static void
-spa_async_thread(spa_t *spa)
+spa_async_thread(void *arg)
{
+ spa_t *spa = (spa_t *)arg;
int tasks, i;
ASSERT(spa->spa_sync_on);
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index 65bd7f93a..8b1ec9c05 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -108,8 +108,8 @@
* now transition to the syncing state.
*/
-static void txg_sync_thread(dsl_pool_t *dp);
-static void txg_quiesce_thread(dsl_pool_t *dp);
+static void txg_sync_thread(void *dp);
+static void txg_quiesce_thread(void *dp);
int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
@@ -477,8 +477,9 @@ txg_wait_callbacks(dsl_pool_t *dp)
}
static void
-txg_sync_thread(dsl_pool_t *dp)
+txg_sync_thread(void *arg)
{
+ dsl_pool_t *dp = (dsl_pool_t *)arg;
spa_t *spa = dp->dp_spa;
tx_state_t *tx = &dp->dp_tx;
callb_cpr_t cpr;
@@ -561,8 +562,9 @@ txg_sync_thread(dsl_pool_t *dp)
}
static void
-txg_quiesce_thread(dsl_pool_t *dp)
+txg_quiesce_thread(void *arg)
{
+ dsl_pool_t *dp = (dsl_pool_t *)arg;
tx_state_t *tx = &dp->dp_tx;
callb_cpr_t cpr;
diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c
index 13e99c058..08e881cc3 100644
--- a/module/zfs/zfs_sa.c
+++ b/module/zfs/zfs_sa.c
@@ -300,7 +300,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
* Otherwise, we know we are doing the
* sa_update() that caused us to enter this function.
*/
- if (mutex_owner(&zp->z_lock) != curthread) {
+ if (MUTEX_NOT_HELD(&zp->z_lock)) {
if (mutex_tryenter(&zp->z_lock) == 0)
return;
else