diff options
author | LOLi <[email protected]> | 2017-12-09 01:58:41 +0100 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2017-12-08 16:58:41 -0800 |
commit | 4e9b156960562373e005798575a3fbc6d66e32ff (patch) | |
tree | a554474cae9721dbb6756620e5009b1fa4f31ad6 /cmd/zed | |
parent | 3ab3166347b84c6004002f9a6d06f7a87fe1cd4a (diff) |
Various ZED fixes
* Teach ZED to handle spares usingi the configured ashift: if the zpool
'ashift' property is set then ZED should use its value when kicking
in a hotspare; with this change 512e disks can be used as spares
for VDEVs that were created with ashift=9, even if ZFS natively
detects them as 4K block devices.
* Introduce an additional auto_spare test case which verifies that in
the face of multiple device failures an appropiate number of spares
are kicked in.
* Fix zed_stop() in "libtest.shlib" which did not correctly wait the
target pid.
* Fix ZED crashing on startup caused by a race condition in libzfs
when used in multi-threaded context.
* Convert ZED over to using the tpool library which is already present
in the Illumos FMA code.
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: loli10K <[email protected]>
Closes #2562
Closes #6858
Diffstat (limited to 'cmd/zed')
-rw-r--r-- | cmd/zed/agents/zfs_agents.c | 16 | ||||
-rw-r--r-- | cmd/zed/agents/zfs_agents.h | 7 | ||||
-rw-r--r-- | cmd/zed/agents/zfs_diagnosis.c | 10 | ||||
-rw-r--r-- | cmd/zed/agents/zfs_mod.c | 46 | ||||
-rw-r--r-- | cmd/zed/agents/zfs_retire.c | 16 |
5 files changed, 36 insertions, 59 deletions
diff --git a/cmd/zed/agents/zfs_agents.c b/cmd/zed/agents/zfs_agents.c index a40d59497..47e251a5e 100644 --- a/cmd/zed/agents/zfs_agents.c +++ b/cmd/zed/agents/zfs_agents.c @@ -350,19 +350,3 @@ zfs_agent_fini(void) g_zfs_hdl = NULL; } - -/* - * In ZED context, all the FMA agents run in the same thread - * and do not require a unique libzfs instance. Modules should - * use these stubs. - */ -libzfs_handle_t * -__libzfs_init(void) -{ - return (g_zfs_hdl); -} - -void -__libzfs_fini(libzfs_handle_t *hdl) -{ -} diff --git a/cmd/zed/agents/zfs_agents.h b/cmd/zed/agents/zfs_agents.h index 3c9af54c9..d1a459139 100644 --- a/cmd/zed/agents/zfs_agents.h +++ b/cmd/zed/agents/zfs_agents.h @@ -39,13 +39,6 @@ extern int zfs_slm_init(void); extern void zfs_slm_fini(void); extern void zfs_slm_event(const char *, const char *, nvlist_t *); -/* - * In ZED context, all the FMA agents run in the same thread - * and do not require a unique libzfs instance. - */ -extern libzfs_handle_t *__libzfs_init(void); -extern void __libzfs_fini(libzfs_handle_t *); - #ifdef __cplusplus } #endif diff --git a/cmd/zed/agents/zfs_diagnosis.c b/cmd/zed/agents/zfs_diagnosis.c index 49e3e1660..6f1f4d017 100644 --- a/cmd/zed/agents/zfs_diagnosis.c +++ b/cmd/zed/agents/zfs_diagnosis.c @@ -919,27 +919,27 @@ _zfs_diagnosis_init(fmd_hdl_t *hdl) { libzfs_handle_t *zhdl; - if ((zhdl = __libzfs_init()) == NULL) + if ((zhdl = libzfs_init()) == NULL) return; if ((zfs_case_pool = uu_list_pool_create("zfs_case_pool", sizeof (zfs_case_t), offsetof(zfs_case_t, zc_node), NULL, UU_LIST_POOL_DEBUG)) == NULL) { - __libzfs_fini(zhdl); + libzfs_fini(zhdl); return; } if ((zfs_cases = uu_list_create(zfs_case_pool, NULL, UU_LIST_DEBUG)) == NULL) { uu_list_pool_destroy(zfs_case_pool); - __libzfs_fini(zhdl); + libzfs_fini(zhdl); return; } if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) { uu_list_destroy(zfs_cases); uu_list_pool_destroy(zfs_case_pool); - __libzfs_fini(zhdl); + libzfs_fini(zhdl); return; } @@ -975,5 +975,5 @@ _zfs_diagnosis_fini(fmd_hdl_t *hdl) uu_list_pool_destroy(zfs_case_pool); zhdl = fmd_hdl_getspecific(hdl); - __libzfs_fini(zhdl); + libzfs_fini(zhdl); } diff --git a/cmd/zed/agents/zfs_mod.c b/cmd/zed/agents/zfs_mod.c index 5fa74d0ce..54568e75a 100644 --- a/cmd/zed/agents/zfs_mod.c +++ b/cmd/zed/agents/zfs_mod.c @@ -64,7 +64,6 @@ * trigger the FMA fault that we skipped earlier. * * ZFS on Linux porting notes: - * In lieu of a thread pool, just spawn a thread on demmand. * Linux udev provides a disk insert for both the disk and the partition * */ @@ -83,6 +82,7 @@ #include <sys/sunddi.h> #include <sys/sysevent/eventdefs.h> #include <sys/sysevent/dev.h> +#include <thread_pool.h> #include <pthread.h> #include <unistd.h> #include "zfs_agents.h" @@ -97,12 +97,12 @@ typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t); libzfs_handle_t *g_zfshdl; list_t g_pool_list; /* list of unavailable pools at initialization */ list_t g_device_list; /* list of disks with asynchronous label request */ +tpool_t *g_tpool; boolean_t g_enumeration_done; -pthread_t g_zfs_tid; +pthread_t g_zfs_tid; /* zfs_enum_pools() thread */ typedef struct unavailpool { zpool_handle_t *uap_zhp; - pthread_t uap_enable_tid; /* dataset enable thread if activated */ list_node_t uap_node; } unavailpool_t; @@ -135,7 +135,6 @@ zfs_unavail_pool(zpool_handle_t *zhp, void *data) unavailpool_t *uap; uap = malloc(sizeof (unavailpool_t)); uap->uap_zhp = zhp; - uap->uap_enable_tid = 0; list_insert_tail((list_t *)data, uap); } else { zpool_close(zhp); @@ -512,19 +511,14 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data) (dp->dd_func)(zhp, nvl, dp->dd_islabeled); } -static void * +void zfs_enable_ds(void *arg) { unavailpool_t *pool = (unavailpool_t *)arg; - assert(pool->uap_enable_tid = pthread_self()); - (void) zpool_enable_datasets(pool->uap_zhp, NULL, 0); zpool_close(pool->uap_zhp); - pool->uap_zhp = NULL; - - /* Note: zfs_slm_fini() will cleanup this pool entry on exit */ - return (NULL); + free(pool); } static int @@ -559,15 +553,13 @@ zfs_iter_pool(zpool_handle_t *zhp, void *data) for (pool = list_head(&g_pool_list); pool != NULL; pool = list_next(&g_pool_list, pool)) { - if (pool->uap_enable_tid != 0) - continue; /* entry already processed */ if (strcmp(zpool_get_name(zhp), zpool_get_name(pool->uap_zhp))) continue; if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) { - /* send to a background thread; keep on list */ - (void) pthread_create(&pool->uap_enable_tid, - NULL, zfs_enable_ds, pool); + list_remove(&g_pool_list, pool); + (void) tpool_dispatch(g_tpool, zfs_enable_ds, + pool); break; } } @@ -857,7 +849,7 @@ zfs_enum_pools(void *arg) int zfs_slm_init() { - if ((g_zfshdl = __libzfs_init()) == NULL) + if ((g_zfshdl = libzfs_init()) == NULL) return (-1); /* @@ -869,7 +861,7 @@ zfs_slm_init() if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) { list_destroy(&g_pool_list); - __libzfs_fini(g_zfshdl); + libzfs_fini(g_zfshdl); return (-1); } @@ -887,19 +879,15 @@ zfs_slm_fini() /* wait for zfs_enum_pools thread to complete */ (void) pthread_join(g_zfs_tid, NULL); + /* destroy the thread pool */ + if (g_tpool != NULL) { + tpool_wait(g_tpool); + tpool_destroy(g_tpool); + } while ((pool = (list_head(&g_pool_list))) != NULL) { - /* - * each pool entry has two possibilities - * 1. was made available (so wait for zfs_enable_ds thread) - * 2. still unavailable (just close the pool) - */ - if (pool->uap_enable_tid) - (void) pthread_join(pool->uap_enable_tid, NULL); - else if (pool->uap_zhp != NULL) - zpool_close(pool->uap_zhp); - list_remove(&g_pool_list, pool); + zpool_close(pool->uap_zhp); free(pool); } list_destroy(&g_pool_list); @@ -910,7 +898,7 @@ zfs_slm_fini() } list_destroy(&g_device_list); - __libzfs_fini(g_zfshdl); + libzfs_fini(g_zfshdl); } void diff --git a/cmd/zed/agents/zfs_retire.c b/cmd/zed/agents/zfs_retire.c index f69c583f0..5a090e32f 100644 --- a/cmd/zed/agents/zfs_retire.c +++ b/cmd/zed/agents/zfs_retire.c @@ -176,6 +176,8 @@ replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev) nvlist_t **spares; uint_t s, nspares; char *dev_name; + zprop_source_t source; + int ashift; config = zpool_get_config(zhp, NULL); if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, @@ -189,6 +191,11 @@ replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev) &spares, &nspares) != 0) return; + /* + * lookup "ashift" pool property, we may need it for the replacement + */ + ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source); + replacement = fmd_nvl_alloc(hdl, FMD_SLEEP); (void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE, @@ -207,6 +214,11 @@ replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev) &spare_name) != 0) continue; + /* if set, add the "ashift" pool property to the spare nvlist */ + if (source != ZPROP_SRC_DEFAULT) + (void) nvlist_add_uint64(spares[s], + ZPOOL_CONFIG_ASHIFT, ashift); + (void) nvlist_add_nvlist_array(replacement, ZPOOL_CONFIG_CHILDREN, &spares[s], 1); @@ -483,7 +495,7 @@ _zfs_retire_init(fmd_hdl_t *hdl) zfs_retire_data_t *zdp; libzfs_handle_t *zhdl; - if ((zhdl = __libzfs_init()) == NULL) + if ((zhdl = libzfs_init()) == NULL) return; if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) { @@ -504,7 +516,7 @@ _zfs_retire_fini(fmd_hdl_t *hdl) if (zdp != NULL) { zfs_retire_clear_data(hdl, zdp); - __libzfs_fini(zdp->zrd_hdl); + libzfs_fini(zdp->zrd_hdl); fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t)); } } |