diff options
author | Allan Jude <[email protected]> | 2021-11-30 09:46:25 -0500 |
---|---|---|
committer | GitHub <[email protected]> | 2021-11-30 07:46:25 -0700 |
commit | 2a673e76a928cca4df7794cdcaa02e0be149c4da (patch) | |
tree | 016afa631bb0d98f55b01e93ab80842fe6ffda27 /cmd/zpool | |
parent | 5f64bf7fdeebfbad50e98c6cd0c3a361a9aecabc (diff) |
Vdev Properties Feature
Add properties, similar to pool properties, to each vdev.
This makes use of the existing per-vdev ZAP that was added as
part of device evacuation/removal.
A large number of read-only properties are exposed,
many of the members of struct vdev_t, that provide useful
statistics.
Adds support for read-only "removing" vdev property.
Adds the "allocating" property that defaults to "on" and
can be set to "off" to prevent future allocations from that
top-level vdev.
Supports user-defined vdev properties.
Includes support for properties.vdev in SYSFS.
Co-authored-by: Allan Jude <[email protected]>
Co-authored-by: Mark Maybee <[email protected]>
Reviewed-by: Matthew Ahrens <[email protected]>
Reviewed-by: Mark Maybee <[email protected]>
Signed-off-by: Allan Jude <[email protected]>
Closes #11711
Diffstat (limited to 'cmd/zpool')
-rw-r--r-- | cmd/zpool/zpool_iter.c | 17 | ||||
-rw-r--r-- | cmd/zpool/zpool_main.c | 447 | ||||
-rw-r--r-- | cmd/zpool/zpool_util.h | 5 |
3 files changed, 349 insertions, 120 deletions
diff --git a/cmd/zpool/zpool_iter.c b/cmd/zpool/zpool_iter.c index abfa2b7f6..8cf6bab42 100644 --- a/cmd/zpool/zpool_iter.c +++ b/cmd/zpool/zpool_iter.c @@ -60,6 +60,7 @@ struct zpool_list { uu_avl_t *zl_avl; uu_avl_pool_t *zl_pool; zprop_list_t **zl_proplist; + zfs_type_t zl_type; }; /* ARGSUSED */ @@ -90,8 +91,7 @@ add_pool(zpool_handle_t *zhp, void *data) if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) { if (zlp->zl_proplist && zpool_expand_proplist(zhp, zlp->zl_proplist, - zlp->zl_literal) - != 0) { + zlp->zl_type, zlp->zl_literal) != 0) { zpool_close(zhp); free(node); return (-1); @@ -113,7 +113,7 @@ add_pool(zpool_handle_t *zhp, void *data) * line. */ zpool_list_t * -pool_list_get(int argc, char **argv, zprop_list_t **proplist, +pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type, boolean_t literal, int *err) { zpool_list_t *zlp; @@ -131,6 +131,7 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zpool_no_memory(); zlp->zl_proplist = proplist; + zlp->zl_type = type; zlp->zl_literal = literal; @@ -248,12 +249,14 @@ pool_list_count(zpool_list_t *zlp) */ int for_each_pool(int argc, char **argv, boolean_t unavail, - zprop_list_t **proplist, boolean_t literal, zpool_iter_f func, void *data) + zprop_list_t **proplist, zfs_type_t type, boolean_t literal, + zpool_iter_f func, void *data) { zpool_list_t *list; int ret = 0; - if ((list = pool_list_get(argc, argv, proplist, literal, &ret)) == NULL) + if ((list = pool_list_get(argc, argv, proplist, type, literal, + &ret)) == NULL) return (1); if (pool_list_iter(list, unavail, func, data) != 0) @@ -678,8 +681,8 @@ all_pools_for_each_vdev_run(int argc, char **argv, char *cmd, vcdl->g_zfs = g_zfs; /* Gather our list of all vdevs in all pools */ - for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, - all_pools_for_each_vdev_gather_cb, vcdl); + for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + B_FALSE, all_pools_for_each_vdev_gather_cb, vcdl); /* Run command on all vdevs in all pools */ all_pools_for_each_vdev_run_vcdl(vcdl); diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c index 3a2caa9a8..4e2f828cb 100644 --- a/cmd/zpool/zpool_main.c +++ b/cmd/zpool/zpool_main.c @@ -32,6 +32,7 @@ * Copyright (c) 2017, Intel Corporation. * Copyright (c) 2019, loli10K <[email protected]> * Copyright (c) 2021, Colm Buckley <[email protected]> + * Copyright (c) 2021, Klara Inc. * Copyright [2021] Hewlett Packard Enterprise Development LP */ @@ -335,6 +336,7 @@ static zpool_command_t command_table[] = { #define VDEV_ALLOC_CLASS_LOGS "logs" static zpool_command_t *current_command; +static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV); static char history_str[HIS_MAX_RECORD_LEN]; static boolean_t log_history = B_TRUE; static uint_t timestamp_fmt = NODATE; @@ -470,7 +472,7 @@ zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) * Callback routine that will print out a pool property value. */ static int -print_prop_cb(int prop, void *cb) +print_pool_prop_cb(int prop, void *cb) { FILE *fp = cb; @@ -490,6 +492,29 @@ print_prop_cb(int prop, void *cb) } /* + * Callback routine that will print out a vdev property value. + */ +static int +print_vdev_prop_cb(int prop, void *cb) +{ + FILE *fp = cb; + + (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop)); + + if (vdev_prop_readonly(prop)) + (void) fprintf(fp, " NO "); + else + (void) fprintf(fp, " YES "); + + if (vdev_prop_values(prop) == NULL) + (void) fprintf(fp, "-\n"); + else + (void) fprintf(fp, "%s\n", vdev_prop_values(prop)); + + return (ZPROP_CONT); +} + +/* * Display usage message. If we're inside a command, display only the usage for * that command. Otherwise, iterate over the entire command table and display * a complete usage message. @@ -519,6 +544,7 @@ usage(boolean_t requested) } if (current_command != NULL && + current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) && ((strcmp(current_command->name, "set") == 0) || (strcmp(current_command->name, "get") == 0) || (strcmp(current_command->name, "list") == 0))) { @@ -530,14 +556,21 @@ usage(boolean_t requested) "PROPERTY", "EDIT", "VALUES"); /* Iterate over all properties */ - (void) zprop_iter(print_prop_cb, fp, B_FALSE, B_TRUE, - ZFS_TYPE_POOL); + if (current_prop_type == ZFS_TYPE_POOL) { + (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE, + B_TRUE, current_prop_type); - (void) fprintf(fp, "\t%-19s ", "feature@..."); - (void) fprintf(fp, "YES disabled | enabled | active\n"); + (void) fprintf(fp, "\t%-19s ", "feature@..."); + (void) fprintf(fp, "YES " + "disabled | enabled | active\n"); - (void) fprintf(fp, gettext("\nThe feature@ properties must be " - "appended with a feature name.\nSee zpool-features(7).\n")); + (void) fprintf(fp, gettext("\nThe feature@ properties " + "must be appended with a feature name.\n" + "See zpool-features(7).\n")); + } else if (current_prop_type == ZFS_TYPE_VDEV) { + (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE, + B_TRUE, current_prop_type); + } } /* @@ -795,9 +828,10 @@ add_prop_list(const char *propname, char *propval, nvlist_t **props, zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY); if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL && - !zpool_prop_feature(propname)) { + (!zpool_prop_feature(propname) && + !zpool_prop_vdev(propname))) { (void) fprintf(stderr, gettext("property '%s' is " - "not a valid pool property\n"), propname); + "not a valid pool or vdev property\n"), propname); return (2); } @@ -832,7 +866,7 @@ add_prop_list(const char *propname, char *propval, nvlist_t **props, return (2); } - if (zpool_prop_feature(propname)) + if (zpool_prop_feature(propname) || zpool_prop_vdev(propname)) normnm = propname; else normnm = zpool_prop_to_name(prop); @@ -1930,7 +1964,7 @@ zpool_do_export(int argc, char **argv) } return (for_each_pool(argc, argv, B_TRUE, NULL, - B_FALSE, zpool_export_one, &cb)); + ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb)); } /* check arguments */ @@ -1939,8 +1973,8 @@ zpool_do_export(int argc, char **argv) usage(B_FALSE); } - ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_export_one, - &cb); + ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + B_FALSE, zpool_export_one, &cb); return (ret); } @@ -2436,6 +2470,12 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name, 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift); } + if (vs->vs_scan_removing != 0) { + (void) printf(gettext(" (removing)")); + } else if (vs->vs_noalloc != 0) { + (void) printf(gettext(" (non-allocating)")); + } + /* The root vdev has the scrub/resilver stats */ root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE); @@ -3857,24 +3897,22 @@ zpool_do_sync(int argc, char **argv) argv += optind; /* if argc == 0 we will execute zpool_sync_one on all pools */ - ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, zpool_sync_one, - &force); + ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, + B_FALSE, zpool_sync_one, &force); return (ret); } typedef struct iostat_cbdata { uint64_t cb_flags; - int cb_name_flags; int cb_namewidth; int cb_iteration; - char **cb_vdev_names; /* Only show these vdevs */ - unsigned int cb_vdev_names_count; boolean_t cb_verbose; boolean_t cb_literal; boolean_t cb_scripted; zpool_list_t *cb_list; vdev_cmd_data_list_t *vcdl; + vdev_cbdata_t cb_vdevs; } iostat_cbdata_t; /* iostat labels */ @@ -4128,7 +4166,7 @@ print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width, if (cb->cb_flags & IOS_ANYHISTO_M) { title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; - } else if (cb->cb_vdev_names_count) { + } else if (cb->cb_vdevs.cb_names_count) { title = "vdev"; } else { title = "pool"; @@ -4188,7 +4226,7 @@ print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width, if (cb->cb_flags & IOS_ANYHISTO_M) { title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; - } else if (cb->cb_vdev_names_count) { + } else if (cb->cb_vdevs.cb_names_count) { title = "vdev"; } else { title = "pool"; @@ -4696,9 +4734,9 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, } /* Do we only want to see a specific vdev? */ - for (i = 0; i < cb->cb_vdev_names_count; i++) { + for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) { /* Yes we do. Is this the vdev? */ - if (strcmp(name, cb->cb_vdev_names[i]) == 0) { + if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) { /* * This is our vdev. Since it is the only vdev we * will be displaying, make depth = 0 so that it @@ -4709,7 +4747,7 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, } } - if (cb->cb_vdev_names_count && (i == cb->cb_vdev_names_count)) { + if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) { /* Couldn't match the name */ goto children; } @@ -4816,7 +4854,7 @@ children: continue; vname = zpool_vdev_name(g_zfs, zhp, newchild[c], - cb->cb_name_flags); + cb->cb_vdevs.cb_name_flags); ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, newchild[c], cb, depth + 2); free(vname); @@ -4850,7 +4888,8 @@ children: if (!printed) { if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && - !cb->cb_scripted && !cb->cb_vdev_names) { + !cb->cb_scripted && + !cb->cb_vdevs.cb_names) { print_iostat_dashes(cb, 0, class_name[n]); } @@ -4859,7 +4898,7 @@ children: } vname = zpool_vdev_name(g_zfs, zhp, newchild[c], - cb->cb_name_flags); + cb->cb_vdevs.cb_name_flags); ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, newchild[c], cb, depth + 2); free(vname); @@ -4883,14 +4922,14 @@ children: if (children > 0) { if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted && - !cb->cb_vdev_names) { + !cb->cb_vdevs.cb_names) { print_iostat_dashes(cb, 0, "cache"); } printf("\n"); for (c = 0; c < children; c++) { vname = zpool_vdev_name(g_zfs, zhp, newchild[c], - cb->cb_name_flags); + cb->cb_vdevs.cb_name_flags); ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, newchild[c], cb, depth + 2); free(vname); @@ -4946,7 +4985,8 @@ print_iostat(zpool_handle_t *zhp, void *data) ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, cb, 0); if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) && - !cb->cb_scripted && cb->cb_verbose && !cb->cb_vdev_names_count) { + !cb->cb_scripted && cb->cb_verbose && + !cb->cb_vdevs.cb_names_count) { print_iostat_separator(cb); if (cb->vcdl != NULL) { print_cmd_columns(cb->vcdl, 1); @@ -5153,27 +5193,30 @@ get_stat_flags(zpool_list_t *list) } /* - * Return 1 if cb_data->cb_vdev_names[0] is this vdev's name, 0 otherwise. + * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise. */ static int is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data) { - iostat_cbdata_t *cb = cb_data; + vdev_cbdata_t *cb = cb_data; char *name = NULL; - int ret = 0; + int ret = 1; /* assume match */ zpool_handle_t *zhp = zhp_data; name = zpool_vdev_name(g_zfs, zhp, nv, cb->cb_name_flags); - if (strcmp(name, cb->cb_vdev_names[0]) == 0) - ret = 1; /* match */ + if (strcmp(name, cb->cb_names[0])) { + free(name); + name = zpool_vdev_name(g_zfs, zhp, nv, VDEV_NAME_GUID); + ret = (strcmp(name, cb->cb_names[0]) == 0); + } free(name); return (ret); } /* - * Returns 1 if cb_data->cb_vdev_names[0] is a vdev name, 0 otherwise. + * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise. */ static int is_vdev(zpool_handle_t *zhp, void *cb_data) @@ -5189,7 +5232,7 @@ is_vdev(zpool_handle_t *zhp, void *cb_data) */ static int are_vdevs_in_pool(int argc, char **argv, char *pool_name, - iostat_cbdata_t *cb) + vdev_cbdata_t *cb) { char **tmp_name; int ret = 0; @@ -5202,23 +5245,23 @@ are_vdevs_in_pool(int argc, char **argv, char *pool_name, if (pool_name) pool_count = 1; - /* Temporarily hijack cb_vdev_names for a second... */ - tmp_name = cb->cb_vdev_names; + /* Temporarily hijack cb_names for a second... */ + tmp_name = cb->cb_names; /* Go though our list of prospective vdev names */ for (i = 0; i < argc; i++) { - cb->cb_vdev_names = argv + i; + cb->cb_names = argv + i; /* Is this name a vdev in our pools? */ ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL, - B_FALSE, is_vdev, cb); + ZFS_TYPE_POOL, B_FALSE, is_vdev, cb); if (!ret) { /* No match */ break; } } - cb->cb_vdev_names = tmp_name; + cb->cb_names = tmp_name; return (ret); } @@ -5239,8 +5282,8 @@ is_pool_cb(zpool_handle_t *zhp, void *data) static int is_pool(char *name) { - return (for_each_pool(0, NULL, B_TRUE, NULL, B_FALSE, is_pool_cb, - name)); + return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE, + is_pool_cb, name)); } /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */ @@ -5263,7 +5306,7 @@ are_all_pools(int argc, char **argv) */ static void error_list_unresolved_vdevs(int argc, char **argv, char *pool_name, - iostat_cbdata_t *cb) + vdev_cbdata_t *cb) { int i; char *name; @@ -5287,7 +5330,7 @@ error_list_unresolved_vdevs(int argc, char **argv, char *pool_name, /* * Same as get_interval_count(), but with additional checks to not misinterpret * guids as interval/count values. Assumes VDEV_NAME_GUID is set in - * cb.cb_name_flags. + * cb.cb_vdevs.cb_name_flags. */ static void get_interval_count_filter_guids(int *argc, char **argv, float *interval, @@ -5297,7 +5340,8 @@ get_interval_count_filter_guids(int *argc, char **argv, float *interval, int argc_for_interval = 0; /* Is the last arg an interval value? Or a guid? */ - if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, cb)) { + if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, + &cb->cb_vdevs)) { /* * The last arg is not a guid, so it's probably an * interval value. @@ -5305,7 +5349,8 @@ get_interval_count_filter_guids(int *argc, char **argv, float *interval, argc_for_interval++; if (*argc >= 2 && - !are_vdevs_in_pool(1, &argv[*argc - 2], NULL, cb)) { + !are_vdevs_in_pool(1, &argv[*argc - 2], NULL, + &cb->cb_vdevs)) { /* * The 2nd to last arg is not a guid, so it's probably * an interval value. @@ -5448,7 +5493,7 @@ get_namewidth_iostat(zpool_handle_t *zhp, void *data) * get_namewidth() returns the maximum width of any name in that column * for any pool/vdev/device line that will be output. */ - width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_name_flags, + width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_vdevs.cb_name_flags, cb->cb_verbose); /* @@ -5626,11 +5671,11 @@ zpool_do_iostat(int argc, char **argv) cb.cb_scripted = scripted; if (guid) - cb.cb_name_flags |= VDEV_NAME_GUID; + cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID; if (follow_links) - cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; + cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; if (full_name) - cb.cb_name_flags |= VDEV_NAME_PATH; + cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH; cb.cb_iteration = 0; cb.cb_namewidth = 0; cb.cb_verbose = verbose; @@ -5647,17 +5692,18 @@ zpool_do_iostat(int argc, char **argv) /* No args, so just print the defaults. */ } else if (are_all_pools(argc, argv)) { /* All the args are pool names */ - } else if (are_vdevs_in_pool(argc, argv, NULL, &cb)) { + } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) { /* All the args are vdevs */ - cb.cb_vdev_names = argv; - cb.cb_vdev_names_count = argc; + cb.cb_vdevs.cb_names = argv; + cb.cb_vdevs.cb_names_count = argc; argc = 0; /* No pools to process */ } else if (are_all_pools(1, argv)) { /* The first arg is a pool name */ - if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], &cb)) { + if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], + &cb.cb_vdevs)) { /* ...and the rest are vdev names */ - cb.cb_vdev_names = argv + 1; - cb.cb_vdev_names_count = argc - 1; + cb.cb_vdevs.cb_names = argv + 1; + cb.cb_vdevs.cb_names_count = argc - 1; argc = 1; /* One pool to process */ } else { fprintf(stderr, gettext("Expected either a list of ")); @@ -5665,7 +5711,7 @@ zpool_do_iostat(int argc, char **argv) fprintf(stderr, " \"%s\", ", argv[0]); fprintf(stderr, gettext("but got:\n")); error_list_unresolved_vdevs(argc - 1, argv + 1, - argv[0], &cb); + argv[0], &cb.cb_vdevs); fprintf(stderr, "\n"); usage(B_FALSE); return (1); @@ -5680,7 +5726,7 @@ zpool_do_iostat(int argc, char **argv) return (1); } - if (cb.cb_vdev_names_count != 0) { + if (cb.cb_vdevs.cb_names_count != 0) { /* * If user specified vdevs, it implies verbose. */ @@ -5691,7 +5737,8 @@ zpool_do_iostat(int argc, char **argv) * Construct the list of all interesting pools. */ ret = 0; - if ((list = pool_list_get(argc, argv, NULL, parsable, &ret)) == NULL) + if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable, + &ret)) == NULL) return (1); if (pool_list_count(list) == 0 && argc != 0) { @@ -5799,8 +5846,9 @@ zpool_do_iostat(int argc, char **argv) if (cmd != NULL && cb.cb_verbose && !(cb.cb_flags & IOS_ANYHISTO_M)) { cb.vcdl = all_pools_for_each_vdev_run(argc, - argv, cmd, g_zfs, cb.cb_vdev_names, - cb.cb_vdev_names_count, cb.cb_name_flags); + argv, cmd, g_zfs, cb.cb_vdevs.cb_names, + cb.cb_vdevs.cb_names_count, + cb.cb_vdevs.cb_name_flags); } else { cb.vcdl = NULL; } @@ -5852,7 +5900,7 @@ zpool_do_iostat(int argc, char **argv) if (((npools > 1 && !verbose && !(cb.cb_flags & IOS_ANYHISTO_M)) || (!(cb.cb_flags & IOS_ANYHISTO_M) && - cb.cb_vdev_names_count)) && + cb.cb_vdevs.cb_names_count)) && !cb.cb_scripted) { print_iostat_separator(&cb); if (cb.vcdl != NULL) @@ -6314,6 +6362,7 @@ zpool_do_list(int argc, char **argv) unsigned long count = 0; zpool_list_t *list; boolean_t first = B_TRUE; + current_prop_type = ZFS_TYPE_POOL; /* check options */ while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) { @@ -6365,7 +6414,7 @@ zpool_do_list(int argc, char **argv) for (;;) { if ((list = pool_list_get(argc, argv, &cb.cb_proplist, - cb.cb_literal, &ret)) == NULL) + ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL) return (1); if (pool_list_count(list) == 0) @@ -7121,8 +7170,8 @@ zpool_do_reopen(int argc, char **argv) argv += optind; /* if argc == 0 we will execute zpool_reopen_one on all pools */ - ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_reopen_one, - &scrub_restart); + ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + B_FALSE, zpool_reopen_one, &scrub_restart); return (ret); } @@ -7251,13 +7300,13 @@ zpool_do_scrub(int argc, char **argv) usage(B_FALSE); } - error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, - scrub_callback, &cb); + error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + B_FALSE, scrub_callback, &cb); if (wait && !error) { zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB; - error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, - wait_callback, &act); + error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + B_FALSE, wait_callback, &act); } return (error); @@ -7295,8 +7344,8 @@ zpool_do_resilver(int argc, char **argv) usage(B_FALSE); } - return (for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, - scrub_callback, &cb)); + return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + B_FALSE, scrub_callback, &cb)); } /* @@ -8719,8 +8768,8 @@ zpool_do_status(int argc, char **argv) cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd, NULL, NULL, 0, 0); - ret = for_each_pool(argc, argv, B_TRUE, NULL, cb.cb_literal, - status_callback, &cb); + ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + cb.cb_literal, status_callback, &cb); if (cb.vcdl != NULL) free_vdev_cmd_data_list(cb.vcdl); @@ -9279,8 +9328,8 @@ zpool_do_upgrade(int argc, char **argv) (void) printf(gettext("\n")); } } else { - ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, - upgrade_one, &cb); + ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, + B_FALSE, upgrade_one, &cb); } return (ret); @@ -9476,8 +9525,8 @@ zpool_do_history(int argc, char **argv) argc -= optind; argv += optind; - ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, get_history_one, - &cbdata); + ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, + B_FALSE, get_history_one, &cbdata); if (argc == 0 && cbdata.first == B_TRUE) { (void) fprintf(stderr, gettext("no pools available\n")); @@ -9875,44 +9924,135 @@ zpool_do_events(int argc, char **argv) } static int -get_callback(zpool_handle_t *zhp, void *data) +get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data) { zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; - char value[MAXNAMELEN]; + char value[ZFS_MAXPROPLEN]; zprop_source_t srctype; - zprop_list_t *pl; - - for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { + for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL; + pl = pl->pl_next) { + char *prop_name; /* - * Skip the special fake placeholder. This will also skip + * If the first property is pool name, it is a special + * placeholder that we can skip. This will also skip * over the name property when 'all' is specified. */ if (pl->pl_prop == ZPOOL_PROP_NAME && pl == cbp->cb_proplist) continue; - if (pl->pl_prop == ZPROP_INVAL && - (zpool_prop_feature(pl->pl_user_prop) || - zpool_prop_unsupported(pl->pl_user_prop))) { - srctype = ZPROP_SRC_LOCAL; + if (pl->pl_prop == ZPROP_INVAL) { + prop_name = pl->pl_user_prop; + } else { + prop_name = (char *)vdev_prop_to_name(pl->pl_prop); + } + if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop, + prop_name, value, sizeof (value), &srctype, + cbp->cb_literal) == 0) { + zprop_print_one_property(vdevname, cbp, prop_name, + value, srctype, NULL, NULL); + } + } - if (zpool_prop_get_feature(zhp, pl->pl_user_prop, - value, sizeof (value)) == 0) { - zprop_print_one_property(zpool_get_name(zhp), - cbp, pl->pl_user_prop, value, srctype, - NULL, NULL); - } + return (0); +} + +static int +get_callback_vdev_width_cb(void *zhp_data, nvlist_t *nv, void *data) +{ + zpool_handle_t *zhp = zhp_data; + zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; + char *vdevname = zpool_vdev_name(g_zfs, zhp, nv, + cbp->cb_vdevs.cb_name_flags); + int ret; + + /* Adjust the column widths for the vdev properties */ + ret = vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist); + + return (ret); +} + +static int +get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data) +{ + zpool_handle_t *zhp = zhp_data; + zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; + char *vdevname = zpool_vdev_name(g_zfs, zhp, nv, + cbp->cb_vdevs.cb_name_flags); + int ret; + + /* Display the properties */ + ret = get_callback_vdev(zhp, vdevname, data); + + return (ret); +} + +static int +get_callback(zpool_handle_t *zhp, void *data) +{ + zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; + char value[MAXNAMELEN]; + zprop_source_t srctype; + zprop_list_t *pl; + int vid; + + if (cbp->cb_type == ZFS_TYPE_VDEV) { + if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) { + for_each_vdev(zhp, get_callback_vdev_width_cb, data); + for_each_vdev(zhp, get_callback_vdev_cb, data); } else { - if (zpool_get_prop(zhp, pl->pl_prop, value, - sizeof (value), &srctype, cbp->cb_literal) != 0) + /* Adjust column widths for vdev properties */ + for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; + vid++) { + vdev_expand_proplist(zhp, + cbp->cb_vdevs.cb_names[vid], + &cbp->cb_proplist); + } + /* Display the properties */ + for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; + vid++) { + get_callback_vdev(zhp, + cbp->cb_vdevs.cb_names[vid], data); + } + } + } else { + assert(cbp->cb_type == ZFS_TYPE_POOL); + for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { + /* + * Skip the special fake placeholder. This will also + * skip over the name property when 'all' is specified. + */ + if (pl->pl_prop == ZPOOL_PROP_NAME && + pl == cbp->cb_proplist) continue; - zprop_print_one_property(zpool_get_name(zhp), cbp, - zpool_prop_to_name(pl->pl_prop), value, srctype, - NULL, NULL); + if (pl->pl_prop == ZPROP_INVAL && + (zpool_prop_feature(pl->pl_user_prop) || + zpool_prop_unsupported(pl->pl_user_prop))) { + srctype = ZPROP_SRC_LOCAL; + + if (zpool_prop_get_feature(zhp, + pl->pl_user_prop, value, + sizeof (value)) == 0) { + zprop_print_one_property( + zpool_get_name(zhp), cbp, + pl->pl_user_prop, value, srctype, + NULL, NULL); + } + } else { + if (zpool_get_prop(zhp, pl->pl_prop, value, + sizeof (value), &srctype, + cbp->cb_literal) != 0) + continue; + + zprop_print_one_property(zpool_get_name(zhp), + cbp, zpool_prop_to_name(pl->pl_prop), + value, srctype, NULL, NULL); + } } } + return (0); } @@ -9936,6 +10076,7 @@ zpool_do_get(int argc, char **argv) int ret; int c, i; char *value; + char *propstr = NULL; cb.cb_first = B_TRUE; @@ -9948,6 +10089,8 @@ zpool_do_get(int argc, char **argv) cb.cb_columns[2] = GET_COL_VALUE; cb.cb_columns[3] = GET_COL_SOURCE; cb.cb_type = ZFS_TYPE_POOL; + cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; + current_prop_type = cb.cb_type; /* check options */ while ((c = getopt(argc, argv, ":Hpo:")) != -1) { @@ -10025,13 +10168,52 @@ zpool_do_get(int argc, char **argv) usage(B_FALSE); } - if (zprop_get_list(g_zfs, argv[0], &cb.cb_proplist, - ZFS_TYPE_POOL) != 0) - usage(B_FALSE); + /* Properties list is needed later by zprop_get_list() */ + propstr = argv[0]; argc--; argv++; + if (argc == 0) { + /* No args, so just print the defaults. */ + } else if (are_all_pools(argc, argv)) { + /* All the args are pool names */ + } else if (are_all_pools(1, argv)) { + /* The first arg is a pool name */ + if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) || + are_vdevs_in_pool(argc - 1, argv + 1, argv[0], + &cb.cb_vdevs)) { + /* ... and the rest are vdev names */ + cb.cb_vdevs.cb_names = argv + 1; + cb.cb_vdevs.cb_names_count = argc - 1; + cb.cb_type = ZFS_TYPE_VDEV; + argc = 1; /* One pool to process */ + } else { + fprintf(stderr, gettext("Expected a list of vdevs in" + " \"%s\", but got:\n"), argv[0]); + error_list_unresolved_vdevs(argc - 1, argv + 1, + argv[0], &cb.cb_vdevs); + fprintf(stderr, "\n"); + usage(B_FALSE); + return (1); + } + } else { + /* + * The first arg isn't a pool name, + */ + fprintf(stderr, gettext("missing pool name.\n")); + fprintf(stderr, "\n"); + usage(B_FALSE); + return (1); + } + + if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist, + cb.cb_type) != 0) { + /* Use correct list of valid properties (pool or vdev) */ + current_prop_type = cb.cb_type; + usage(B_FALSE); + } + if (cb.cb_proplist != NULL) { fake_name.pl_prop = ZPOOL_PROP_NAME; fake_name.pl_width = strlen(gettext("NAME")); @@ -10039,8 +10221,8 @@ zpool_do_get(int argc, char **argv) cb.cb_proplist = &fake_name; } - ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_literal, - get_callback, &cb); + ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type, + cb.cb_literal, get_callback, &cb); if (cb.cb_proplist == &fake_name) zprop_free_list(fake_name.pl_next); @@ -10053,14 +10235,15 @@ zpool_do_get(int argc, char **argv) typedef struct set_cbdata { char *cb_propname; char *cb_value; + zfs_type_t cb_type; + vdev_cbdata_t cb_vdevs; boolean_t cb_any_successful; } set_cbdata_t; static int -set_callback(zpool_handle_t *zhp, void *data) +set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb) { int error; - set_cbdata_t *cb = (set_cbdata_t *)data; /* Check if we have out-of-bounds features */ if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) { @@ -10121,9 +10304,24 @@ set_callback(zpool_handle_t *zhp, void *data) error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value); - if (!error) - cb->cb_any_successful = B_TRUE; + return (error); +} + +static int +set_callback(zpool_handle_t *zhp, void *data) +{ + int error; + set_cbdata_t *cb = (set_cbdata_t *)data; + + if (cb->cb_type == ZFS_TYPE_VDEV) { + error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names, + cb->cb_propname, cb->cb_value); + } else { + assert(cb->cb_type == ZFS_TYPE_POOL); + error = set_pool_callback(zhp, cb); + } + cb->cb_any_successful = !error; return (error); } @@ -10133,6 +10331,7 @@ zpool_do_set(int argc, char **argv) set_cbdata_t cb = { 0 }; int error; + current_prop_type = ZFS_TYPE_POOL; if (argc > 1 && argv[1][0] == '-') { (void) fprintf(stderr, gettext("invalid option '%c'\n"), argv[1][1]); @@ -10150,12 +10349,14 @@ zpool_do_set(int argc, char **argv) usage(B_FALSE); } - if (argc > 3) { + if (argc > 4) { (void) fprintf(stderr, gettext("too many pool names\n")); usage(B_FALSE); } cb.cb_propname = argv[1]; + cb.cb_type = ZFS_TYPE_POOL; + cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; cb.cb_value = strchr(cb.cb_propname, '='); if (cb.cb_value == NULL) { (void) fprintf(stderr, gettext("missing value in " @@ -10165,9 +10366,33 @@ zpool_do_set(int argc, char **argv) *(cb.cb_value) = '\0'; cb.cb_value++; + argc -= 2; + argv += 2; + + if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) { + /* Argument is a vdev */ + cb.cb_vdevs.cb_names = argv; + cb.cb_vdevs.cb_names_count = 1; + cb.cb_type = ZFS_TYPE_VDEV; + argc = 0; /* No pools to process */ + } else if (are_all_pools(1, argv)) { + /* The first arg is a pool name */ + if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], + &cb.cb_vdevs)) { + /* 2nd argument is a vdev */ + cb.cb_vdevs.cb_names = argv + 1; + cb.cb_vdevs.cb_names_count = 1; + cb.cb_type = ZFS_TYPE_VDEV; + argc = 1; /* One pool to process */ + } else if (argc > 1) { + (void) fprintf(stderr, + gettext("too many pool names\n")); + usage(B_FALSE); + } + } - error = for_each_pool(argc - 2, argv + 2, B_TRUE, NULL, B_FALSE, - set_callback, &cb); + error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + B_FALSE, set_callback, &cb); return (error); } diff --git a/cmd/zpool/zpool_util.h b/cmd/zpool/zpool_util.h index 6665eaf0d..583f48cca 100644 --- a/cmd/zpool/zpool_util.h +++ b/cmd/zpool/zpool_util.h @@ -65,7 +65,7 @@ nvlist_t *split_mirror_vdev(zpool_handle_t *zhp, char *newname, /* * Pool list functions */ -int for_each_pool(int, char **, boolean_t unavail, zprop_list_t **, +int for_each_pool(int, char **, boolean_t unavail, zprop_list_t **, zfs_type_t, boolean_t, zpool_iter_f, void *); /* Vdev list functions */ @@ -73,7 +73,8 @@ int for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data); typedef struct zpool_list zpool_list_t; -zpool_list_t *pool_list_get(int, char **, zprop_list_t **, boolean_t, int *); +zpool_list_t *pool_list_get(int, char **, zprop_list_t **, zfs_type_t, + boolean_t, int *); void pool_list_update(zpool_list_t *); int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *); void pool_list_free(zpool_list_t *); |