diff options
author | GeLiXin <[email protected]> | 2016-05-21 11:34:06 +0800 |
---|---|---|
committer | Ned Bass <[email protected]> | 2016-09-09 13:21:09 -0700 |
commit | 74acdfc6827385a7b576c67e494bf40dc3f80db7 (patch) | |
tree | fcf2b39eed863d4cf981acf07935208d9e90c55e /module/zfs | |
parent | d9e1eec9a2a321295b78e74daea2b7cf5eff5f25 (diff) |
Fix self-healing IO prior to dsl_pool_init() completion
Async writes triggered by a self-healing IO may be issued before the
pool finishes the process of initialization. This results in a NULL
dereference of `spa->spa_dsl_pool` in vdev_queue_max_async_writes().
George Wilson recommended addressing this issue by initializing the
passed `dsl_pool_t **` prior to dmu_objset_open_impl(). Since the
caller is passing the `spa->spa_dsl_pool` this has the effect of
ensuring it's initialized.
However, since this depends on the caller knowing they must pass
the `spa->spa_dsl_pool` an additional NULL check was added to
vdev_queue_max_async_writes(). This guards against any future
restructuring of the code which might result in dsl_pool_init()
being called differently.
Signed-off-by: GeLiXin <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #4652
Diffstat (limited to 'module/zfs')
-rwxr-xr-x[-rw-r--r--] | module/zfs/dsl_pool.c | 14 | ||||
-rw-r--r-- | module/zfs/vdev_queue.c | 15 |
2 files changed, 23 insertions, 6 deletions
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index ada0eac63..cf5259acd 100644..100755 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -182,12 +182,20 @@ dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) int err; dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); + /* + * Initialize the caller's dsl_pool_t structure before we actually open + * the meta objset. This is done because a self-healing write zio may + * be issued as part of dmu_objset_open_impl() and the spa needs its + * dsl_pool_t initialized in order to handle the write. + */ + *dpp = dp; + err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, &dp->dp_meta_objset); - if (err != 0) + if (err != 0) { dsl_pool_close(dp); - else - *dpp = dp; + *dpp = NULL; + } return (err); } diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c index 0c62a6fa3..4ed62f963 100644 --- a/module/zfs/vdev_queue.c +++ b/module/zfs/vdev_queue.c @@ -249,20 +249,29 @@ static int vdev_queue_max_async_writes(spa_t *spa) { int writes; - uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total; + uint64_t dirty = 0; + dsl_pool_t *dp = spa_get_dsl(spa); uint64_t min_bytes = zfs_dirty_data_max * zfs_vdev_async_write_active_min_dirty_percent / 100; uint64_t max_bytes = zfs_dirty_data_max * zfs_vdev_async_write_active_max_dirty_percent / 100; /* + * Async writes may occur before the assignment of the spa's + * dsl_pool_t if a self-healing zio is issued prior to the + * completion of dmu_objset_open_impl(). + */ + if (dp == NULL) + return (zfs_vdev_async_write_max_active); + + /* * Sync tasks correspond to interactive user actions. To reduce the * execution time of those actions we push data out as fast as possible. */ - if (spa_has_pending_synctask(spa)) { + if (spa_has_pending_synctask(spa)) return (zfs_vdev_async_write_max_active); - } + dirty = dp->dp_dirty_total; if (dirty < min_bytes) return (zfs_vdev_async_write_min_active); if (dirty > max_bytes) |