aboutsummaryrefslogtreecommitdiffstats
path: root/cmd/zpool/zpool_main.c
diff options
context:
space:
mode:
authorUmer Saleem <[email protected]>2024-05-09 16:54:47 +0500
committerBrian Behlendorf <[email protected]>2024-08-06 12:47:10 -0700
commit959e963c819f4416f3308d3bbc504d9c3d9fea98 (patch)
treea4ddd7fe636b3799c4478f1dcc51385eb5f1df42 /cmd/zpool/zpool_main.c
parent4e6b3f7e1d424c923041acecfed8f10780766e19 (diff)
JSON output support for zpool status
This commit adds support for zpool status command to displpay status of ZFS pools in JSON format using '-j' option. Status information is collected in nvlist which is later dumped on stdout in JSON format. Existing options for zpool status work with '-j' flag. man page for zpool status is updated accordingly. Reviewed-by: Tony Hutter <[email protected]> Reviewed-by: Ameer Hamza <[email protected]> Signed-off-by: Umer Saleem <[email protected]> Closes #16217
Diffstat (limited to 'cmd/zpool/zpool_main.c')
-rw-r--r--cmd/zpool/zpool_main.c1704
1 files changed, 1444 insertions, 260 deletions
diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c
index 311591ea0..6f8817c09 100644
--- a/cmd/zpool/zpool_main.c
+++ b/cmd/zpool/zpool_main.c
@@ -66,7 +66,7 @@
#include <sys/zfs_ioctl.h>
#include <sys/mount.h>
#include <sys/sysmacros.h>
-
+#include <string.h>
#include <math.h>
#include <libzfs.h>
@@ -141,7 +141,8 @@ enum zpool_options {
ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,
ZPOOL_OPTION_POOL_KEY_GUID,
- ZPOOL_OPTION_JSON_NUMS_AS_INT
+ ZPOOL_OPTION_JSON_NUMS_AS_INT,
+ ZPOOL_OPTION_JSON_FLAT_VDEVS
};
/*
@@ -276,6 +277,86 @@ static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
NULL},
};
+static const char *pool_scan_func_str[] = {
+ "NONE",
+ "SCRUB",
+ "RESILVER",
+ "ERRORSCRUB"
+};
+
+static const char *pool_scan_state_str[] = {
+ "NONE",
+ "SCANNING",
+ "FINISHED",
+ "CANCELED",
+ "ERRORSCRUBBING"
+};
+
+static const char *vdev_rebuild_state_str[] = {
+ "NONE",
+ "ACTIVE",
+ "CANCELED",
+ "COMPLETE"
+};
+
+static const char *checkpoint_state_str[] = {
+ "NONE",
+ "EXISTS",
+ "DISCARDING"
+};
+
+static const char *vdev_state_str[] = {
+ "UNKNOWN",
+ "CLOSED",
+ "OFFLINE",
+ "REMOVED",
+ "CANT_OPEN",
+ "FAULTED",
+ "DEGRADED",
+ "ONLINE"
+};
+
+static const char *vdev_aux_str[] = {
+ "NONE",
+ "OPEN_FAILED",
+ "CORRUPT_DATA",
+ "NO_REPLICAS",
+ "BAD_GUID_SUM",
+ "TOO_SMALL",
+ "BAD_LABEL",
+ "VERSION_NEWER",
+ "VERSION_OLDER",
+ "UNSUP_FEAT",
+ "SPARED",
+ "ERR_EXCEEDED",
+ "IO_FAILURE",
+ "BAD_LOG",
+ "EXTERNAL",
+ "SPLIT_POOL",
+ "BAD_ASHIFT",
+ "EXTERNAL_PERSIST",
+ "ACTIVE",
+ "CHILDREN_OFFLINE",
+ "ASHIFT_TOO_BIG"
+};
+
+static const char *vdev_init_state_str[] = {
+ "NONE",
+ "ACTIVE",
+ "CANCELED",
+ "SUSPENDED",
+ "COMPLETE"
+};
+
+static const char *vdev_trim_state_str[] = {
+ "NONE",
+ "ACTIVE",
+ "CANCELED",
+ "SUSPENDED",
+ "COMPLETE"
+};
+
+#define ZFS_NICE_TIMESTAMP 100
/*
* Given a cb->cb_flags with a histogram bit set, return the iostat_type.
@@ -402,8 +483,8 @@ get_usage(zpool_help_t idx)
case HELP_LABELCLEAR:
return (gettext("\tlabelclear [-f] <vdev>\n"));
case HELP_LIST:
- return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
- "[-j [--json-int, --json-pool-key-guid]]\n"
+ return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "
+ "[--json-int, --json-pool-key-guid]] ...\n"
"\t [-T d|u] [pool] [interval [count]]\n"));
case HELP_PREFETCH:
return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n"
@@ -432,9 +513,11 @@ get_usage(zpool_help_t idx)
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
"[<device> ...]\n"));
case HELP_STATUS:
- return (gettext("\tstatus [--power] [-c [script1,script2,...]] "
- "[-DegiLpPstvx] [-T d|u] [pool] ...\n"
- "\t [interval [count]]\n"));
+ return (gettext("\tstatus [--power] [-j [--json-int, "
+ "--json-flat-vdevs, ...\n"
+ "\t --json-pool-key-guid]] [-c [script1,script2,...]] "
+ "[-DegiLpPstvx] ...\n"
+ "\t [-T d|u] [pool] [interval [count]]\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade\n"
"\tupgrade -v\n"
@@ -443,7 +526,8 @@ get_usage(zpool_help_t idx)
return (gettext("\tevents [-vHf [pool] | -c]\n"));
case HELP_GET:
return (gettext("\tget [-Hp] [-j [--json-int, "
- "--json-pool-key-guid]] [-o \"all\" | field[,...]] "
+ "--json-pool-key-guid]] ...\n"
+ "\t [-o \"all\" | field[,...]] "
"<\"all\" | property[,...]> <pool> ...\n"));
case HELP_SET:
return (gettext("\tset <property=value> <pool>\n"
@@ -901,6 +985,84 @@ print_spare_list(nvlist_t *nv, int indent)
}
}
+typedef struct spare_cbdata {
+ uint64_t cb_guid;
+ zpool_handle_t *cb_zhp;
+} spare_cbdata_t;
+
+static boolean_t
+find_vdev(nvlist_t *nv, uint64_t search)
+{
+ uint64_t guid;
+ nvlist_t **child;
+ uint_t c, children;
+
+ if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
+ search == guid)
+ return (B_TRUE);
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0) {
+ for (c = 0; c < children; c++)
+ if (find_vdev(child[c], search))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+static int
+find_spare(zpool_handle_t *zhp, void *data)
+{
+ spare_cbdata_t *cbp = data;
+ nvlist_t *config, *nvroot;
+
+ config = zpool_get_config(zhp, NULL);
+ verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+
+ if (find_vdev(nvroot, cbp->cb_guid)) {
+ cbp->cb_zhp = zhp;
+ return (1);
+ }
+
+ zpool_close(zhp);
+ return (0);
+}
+
+static void
+nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,
+ boolean_t literal, boolean_t as_int, int format)
+{
+ char buf[256];
+ if (literal) {
+ if (!as_int)
+ snprintf(buf, 256, "%llu", (u_longlong_t)value);
+ } else {
+ switch (format) {
+ case ZFS_NICENUM_1024:
+ zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);
+ break;
+ case ZFS_NICENUM_BYTES:
+ zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);
+ break;
+ case ZFS_NICENUM_TIME:
+ zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);
+ break;
+ case ZFS_NICE_TIMESTAMP:
+ format_timestamp(value, buf, 256);
+ break;
+ default:
+ fprintf(stderr, "Invalid number format");
+ exit(1);
+ }
+ }
+ if (as_int)
+ fnvlist_add_uint64(item, key, value);
+ else
+ fnvlist_add_string(item, key, buf);
+}
+
/*
* Generates an nvlist with output version for every command based on params.
* Purpose of this is to add a version of JSON output, considering the schema
@@ -967,13 +1129,36 @@ fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,
}
static void
+used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)
+{
+ spare_cbdata_t spare_cb;
+ verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,
+ &spare_cb.cb_guid) == 0);
+ if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
+ if (strcmp(zpool_get_name(spare_cb.cb_zhp),
+ zpool_get_name(zhp)) != 0) {
+ fnvlist_add_string(list, "used_by",
+ zpool_get_name(spare_cb.cb_zhp));
+ }
+ zpool_close(spare_cb.cb_zhp);
+ }
+}
+
+static void
fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
boolean_t addtype, boolean_t as_int)
{
- boolean_t spare, l2c, log;
- const char *path, *phys, *devid;
- nvlist_t *nvdev = zpool_find_vdev(zhp, name, &spare, &l2c, &log);
+ boolean_t l2c = B_FALSE;
+ const char *path, *phys, *devid, *bias = NULL;
+ uint64_t hole = 0, log = 0, spare = 0;
+ vdev_stat_t *vs;
+ uint_t c;
+ nvlist_t *nvdev;
+ if (strcmp(name, zpool_get_name(zhp)) != 0)
+ nvdev = zpool_find_vdev(zhp, name, NULL, &l2c, NULL);
+ else
+ nvdev = zpool_find_vdev(zhp, "root-0", NULL, &l2c, NULL);
fnvlist_add_string(list, "name", name);
if (addtype)
fnvlist_add_string(list, "type", "VDEV");
@@ -993,7 +1178,6 @@ fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
fnvlist_add_string(list, "guid", buf);
}
}
-
if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)
fnvlist_add_string(list, "path", path);
if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,
@@ -1002,6 +1186,31 @@ fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,
&devid) == 0)
fnvlist_add_string(list, "devid", devid);
+ (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);
+ (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,
+ &spare);
+ (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);
+ if (hole)
+ fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);
+ else if (l2c)
+ fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);
+ else if (spare)
+ fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);
+ else if (log)
+ fnvlist_add_string(list, "class", VDEV_TYPE_LOG);
+ else {
+ (void) nvlist_lookup_string(nvdev,
+ ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
+ if (bias != NULL)
+ fnvlist_add_string(list, "class", bias);
+ else
+ fnvlist_add_string(list, "class", "normal");
+ }
+ if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,
+ (uint64_t **)&vs, &c) == 0) {
+ fnvlist_add_string(list, "state",
+ vdev_state_str[vs->vs_state]);
+ }
}
}
@@ -2343,51 +2552,6 @@ max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
return (max);
}
-typedef struct spare_cbdata {
- uint64_t cb_guid;
- zpool_handle_t *cb_zhp;
-} spare_cbdata_t;
-
-static boolean_t
-find_vdev(nvlist_t *nv, uint64_t search)
-{
- uint64_t guid;
- nvlist_t **child;
- uint_t c, children;
-
- if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
- search == guid)
- return (B_TRUE);
-
- if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
- &child, &children) == 0) {
- for (c = 0; c < children; c++)
- if (find_vdev(child[c], search))
- return (B_TRUE);
- }
-
- return (B_FALSE);
-}
-
-static int
-find_spare(zpool_handle_t *zhp, void *data)
-{
- spare_cbdata_t *cbp = data;
- nvlist_t *config, *nvroot;
-
- config = zpool_get_config(zhp, NULL);
- verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
- &nvroot) == 0);
-
- if (find_vdev(nvroot, cbp->cb_guid)) {
- cbp->cb_zhp = zhp;
- return (1);
- }
-
- zpool_close(zhp);
- return (0);
-}
-
typedef struct status_cbdata {
int cb_count;
int cb_name_flags;
@@ -2405,6 +2569,11 @@ typedef struct status_cbdata {
boolean_t cb_print_vdev_trim;
vdev_cmd_data_list_t *vcdl;
boolean_t cb_print_power;
+ boolean_t cb_json;
+ boolean_t cb_flat_vdevs;
+ nvlist_t *cb_jsobj;
+ boolean_t cb_json_as_int;
+ boolean_t cb_json_pool_key_guid;
} status_cbdata_t;
/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
@@ -2417,6 +2586,46 @@ is_blank_str(const char *str)
return (B_TRUE);
}
+static void
+zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,
+ nvlist_t *item)
+{
+ vdev_cmd_data_t *data;
+ int i, j, k = 1;
+ char tmp[256];
+ const char *val;
+
+ for (i = 0; i < vcdl->count; i++) {
+ if ((strcmp(vcdl->data[i].path, path) != 0) ||
+ (strcmp(vcdl->data[i].pool, pool) != 0))
+ continue;
+
+ data = &vcdl->data[i];
+ for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
+ val = NULL;
+ for (int k = 0; k < data->cols_cnt; k++) {
+ if (strcmp(data->cols[k],
+ vcdl->uniq_cols[j]) == 0) {
+ val = data->lines[k];
+ break;
+ }
+ }
+ if (val == NULL || is_blank_str(val))
+ val = "-";
+ fnvlist_add_string(item, vcdl->uniq_cols[j], val);
+ }
+
+ for (j = data->cols_cnt; j < data->lines_cnt; j++) {
+ if (data->lines[j]) {
+ snprintf(tmp, 256, "extra_%d", k++);
+ fnvlist_add_string(item, tmp,
+ data->lines[j]);
+ }
+ }
+ break;
+ }
+}
+
/* Print command output lines for specific vdev in a specific pool */
static void
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
@@ -3132,6 +3341,7 @@ show_import(nvlist_t *config, boolean_t report_error)
uint_t vsc;
const char *comment;
const char *indent;
+ char buf[2048];
status_cbdata_t cb = { 0 };
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
@@ -3237,7 +3447,8 @@ show_import(nvlist_t *config, boolean_t report_error)
printf_color(ANSI_YELLOW, gettext("The pool uses the following "
"feature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
- zpool_print_unsup_feat(config);
+ zpool_collect_unsup_feat(config, buf, 2048);
+ (void) printf("%s", buf);
color_end();
break;
@@ -3249,7 +3460,8 @@ show_import(nvlist_t *config, boolean_t report_error)
"\t%sfeature(s) not supported on this system:\n"),
indent, indent);
color_start(ANSI_YELLOW);
- zpool_print_unsup_feat(config);
+ zpool_collect_unsup_feat(config, buf, 2048);
+ (void) printf("%s", buf);
color_end();
break;
@@ -8825,6 +9037,807 @@ check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
return (rebuilding);
}
+static void
+vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
+ int depth, boolean_t isspare, char *parent, nvlist_t *item)
+{
+ nvlist_t *vds, **child, *ch = NULL;
+ uint_t vsc, children;
+ vdev_stat_t *vs;
+ char *vname;
+ uint64_t notpresent;
+ const char *type, *path;
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) != 0)
+ children = 0;
+ verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
+ (uint64_t **)&vs, &vsc) == 0);
+ verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
+ if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
+ return;
+
+ if (cb->cb_print_unhealthy && depth > 0 &&
+ for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
+ return;
+ }
+ vname = zpool_vdev_name(g_zfs, zhp, nv,
+ cb->cb_name_flags | VDEV_NAME_TYPE_ID);
+ vds = fnvlist_alloc();
+ fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);
+ if (cb->cb_flat_vdevs && parent != NULL) {
+ fnvlist_add_string(vds, "parent", parent);
+ }
+
+ if (isspare) {
+ if (vs->vs_aux == VDEV_AUX_SPARED) {
+ fnvlist_add_string(vds, "state", "INUSE");
+ used_by_other(zhp, nv, vds);
+ } else if (vs->vs_state == VDEV_STATE_HEALTHY)
+ fnvlist_add_string(vds, "state", "AVAIL");
+ } else {
+ if (vs->vs_alloc) {
+ nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ }
+ if (vs->vs_space) {
+ nice_num_str_nvlist(vds, "total_space", vs->vs_space,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ }
+ if (vs->vs_dspace) {
+ nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ }
+ if (vs->vs_rsize) {
+ nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ }
+ if (vs->vs_esize) {
+ nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ }
+ if (vs->vs_self_healed) {
+ nice_num_str_nvlist(vds, "self_healed",
+ vs->vs_self_healed, cb->cb_literal,
+ cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ }
+ if (vs->vs_pspace) {
+ nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ }
+ nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ nice_num_str_nvlist(vds, "checksum_errors",
+ vs->vs_checksum_errors, cb->cb_literal,
+ cb->cb_json_as_int, ZFS_NICENUM_1024);
+ if (vs->vs_scan_processed) {
+ nice_num_str_nvlist(vds, "scan_processed",
+ vs->vs_scan_processed, cb->cb_literal,
+ cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ }
+ if (vs->vs_checkpoint_space) {
+ nice_num_str_nvlist(vds, "checkpoint_space",
+ vs->vs_checkpoint_space, cb->cb_literal,
+ cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ }
+ if (vs->vs_resilver_deferred) {
+ nice_num_str_nvlist(vds, "resilver_deferred",
+ vs->vs_resilver_deferred, B_TRUE,
+ cb->cb_json_as_int, ZFS_NICENUM_1024);
+ }
+ if (children == 0) {
+ nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ }
+ if (cb->cb_print_power) {
+ if (children == 0) {
+ /* Only leaf vdevs have physical slots */
+ switch (zpool_power_current_state(zhp, (char *)
+ fnvlist_lookup_string(nv,
+ ZPOOL_CONFIG_PATH))) {
+ case 0:
+ fnvlist_add_string(vds, "power_state",
+ "off");
+ break;
+ case 1:
+ fnvlist_add_string(vds, "power_state",
+ "on");
+ break;
+ default:
+ fnvlist_add_string(vds, "power_state",
+ "-");
+ }
+ } else {
+ fnvlist_add_string(vds, "power_state", "-");
+ }
+ }
+ }
+
+ if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
+ &notpresent) == 0) {
+ nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
+ 1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ fnvlist_add_string(vds, "was",
+ fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));
+ } else if (vs->vs_aux != VDEV_AUX_NONE) {
+ fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);
+ } else if (children == 0 && !isspare &&
+ getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
+ VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
+ vs->vs_configured_ashift < vs->vs_physical_ashift) {
+ nice_num_str_nvlist(vds, "configured_ashift",
+ vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ nice_num_str_nvlist(vds, "physical_ashift",
+ vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ }
+ if (vs->vs_scan_removing != 0) {
+ nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,
+ B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
+ nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,
+ B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ }
+
+ if (cb->vcdl != NULL) {
+ if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
+ zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),
+ path, vds);
+ }
+ }
+
+ if (children == 0) {
+ if (cb->cb_print_vdev_init) {
+ if (vs->vs_initialize_state != 0) {
+ uint64_t st = vs->vs_initialize_state;
+ fnvlist_add_string(vds, "init_state",
+ vdev_init_state_str[st]);
+ nice_num_str_nvlist(vds, "initialized",
+ vs->vs_initialize_bytes_done,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(vds, "to_initialize",
+ vs->vs_initialize_bytes_est,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(vds, "init_time",
+ vs->vs_initialize_action_time,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(vds, "init_errors",
+ vs->vs_initialize_errors,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ } else {
+ fnvlist_add_string(vds, "init_state",
+ "UNINITIALIZED");
+ }
+ }
+ if (cb->cb_print_vdev_trim) {
+ if (vs->vs_trim_notsup == 0) {
+ if (vs->vs_trim_state != 0) {
+ uint64_t st = vs->vs_trim_state;
+ fnvlist_add_string(vds, "trim_state",
+ vdev_trim_state_str[st]);
+ nice_num_str_nvlist(vds, "trimmed",
+ vs->vs_trim_bytes_done,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(vds, "to_trim",
+ vs->vs_trim_bytes_est,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(vds, "trim_time",
+ vs->vs_trim_action_time,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(vds, "trim_errors",
+ vs->vs_trim_errors,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ } else
+ fnvlist_add_string(vds, "trim_state",
+ "UNTRIMMED");
+ }
+ nice_num_str_nvlist(vds, "trim_notsup",
+ vs->vs_trim_notsup, B_TRUE,
+ cb->cb_json_as_int, ZFS_NICENUM_1024);
+ }
+ } else {
+ ch = fnvlist_alloc();
+ }
+
+ if (cb->cb_flat_vdevs && children == 0) {
+ fnvlist_add_nvlist(item, vname, vds);
+ }
+
+ for (int c = 0; c < children; c++) {
+ uint64_t islog = B_FALSE, ishole = B_FALSE;
+ (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+ &islog);
+ (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
+ &ishole);
+ if (islog || ishole)
+ continue;
+ if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
+ continue;
+ if (cb->cb_flat_vdevs) {
+ vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
+ vname, item);
+ }
+ vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
+ vname, ch);
+ }
+
+ if (ch != NULL) {
+ if (!nvlist_empty(ch))
+ fnvlist_add_nvlist(vds, "vdevs", ch);
+ fnvlist_free(ch);
+ }
+ fnvlist_add_nvlist(item, vname, vds);
+ fnvlist_free(vds);
+ free(vname);
+}
+
+static void
+class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
+ const char *class, nvlist_t *item)
+{
+ uint_t c, children;
+ nvlist_t **child;
+ nvlist_t *class_obj = NULL;
+
+ if (!cb->cb_flat_vdevs)
+ class_obj = fnvlist_alloc();
+
+ assert(zhp != NULL || !cb->cb_verbose);
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
+ &children) != 0)
+ return;
+
+ for (c = 0; c < children; c++) {
+ uint64_t is_log = B_FALSE;
+ const char *bias = NULL;
+ const char *type = NULL;
+ char *name = zpool_vdev_name(g_zfs, zhp, child[c],
+ cb->cb_name_flags | VDEV_NAME_TYPE_ID);
+
+ (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+ &is_log);
+
+ if (is_log) {
+ bias = (char *)VDEV_ALLOC_CLASS_LOGS;
+ } else {
+ (void) nvlist_lookup_string(child[c],
+ ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
+ (void) nvlist_lookup_string(child[c],
+ ZPOOL_CONFIG_TYPE, &type);
+ }
+
+ if (bias == NULL || strcmp(bias, class) != 0)
+ continue;
+ if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
+ continue;
+
+ if (cb->cb_flat_vdevs) {
+ vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
+ NULL, item);
+ } else {
+ vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
+ NULL, class_obj);
+ }
+ free(name);
+ }
+ if (!cb->cb_flat_vdevs) {
+ if (!nvlist_empty(class_obj))
+ fnvlist_add_nvlist(item, class, class_obj);
+ fnvlist_free(class_obj);
+ }
+}
+
+static void
+l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
+ nvlist_t *item)
+{
+ nvlist_t *l2c = NULL, **l2cache;
+ uint_t nl2cache;
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
+ &l2cache, &nl2cache) == 0) {
+ if (nl2cache == 0)
+ return;
+ if (!cb->cb_flat_vdevs)
+ l2c = fnvlist_alloc();
+ for (int i = 0; i < nl2cache; i++) {
+ if (cb->cb_flat_vdevs) {
+ vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
+ B_FALSE, NULL, item);
+ } else {
+ vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
+ B_FALSE, NULL, l2c);
+ }
+ }
+ }
+ if (!cb->cb_flat_vdevs) {
+ if (!nvlist_empty(l2c))
+ fnvlist_add_nvlist(item, "l2cache", l2c);
+ fnvlist_free(l2c);
+ }
+}
+
+static void
+spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
+ nvlist_t *item)
+{
+ nvlist_t *sp = NULL, **spares;
+ uint_t nspares;
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
+ &spares, &nspares) == 0) {
+ if (nspares == 0)
+ return;
+ if (!cb->cb_flat_vdevs)
+ sp = fnvlist_alloc();
+ for (int i = 0; i < nspares; i++) {
+ if (cb->cb_flat_vdevs) {
+ vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
+ NULL, item);
+ } else {
+ vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
+ NULL, sp);
+ }
+ }
+ }
+ if (!cb->cb_flat_vdevs) {
+ if (!nvlist_empty(sp))
+ fnvlist_add_nvlist(item, "spares", sp);
+ fnvlist_free(sp);
+ }
+}
+
+static void
+errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
+{
+ uint64_t nerr;
+ nvlist_t *config = zpool_get_config(zhp, NULL);
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
+ &nerr) == 0) {
+ nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ if (nerr != 0 && cb->cb_verbose) {
+ nvlist_t *nverrlist = NULL;
+ if (zpool_get_errlog(zhp, &nverrlist) == 0) {
+ int i = 0;
+ int count = 0;
+ size_t len = MAXPATHLEN * 2;
+ nvpair_t *elem = NULL;
+
+ for (nvpair_t *pair =
+ nvlist_next_nvpair(nverrlist, NULL);
+ pair != NULL;
+ pair = nvlist_next_nvpair(nverrlist, pair))
+ count++;
+ char **errl = (char **)malloc(
+ count * sizeof (char *));
+
+ while ((elem = nvlist_next_nvpair(nverrlist,
+ elem)) != NULL) {
+ nvlist_t *nv;
+ uint64_t dsobj, obj;
+
+ verify(nvpair_value_nvlist(elem,
+ &nv) == 0);
+ verify(nvlist_lookup_uint64(nv,
+ ZPOOL_ERR_DATASET, &dsobj) == 0);
+ verify(nvlist_lookup_uint64(nv,
+ ZPOOL_ERR_OBJECT, &obj) == 0);
+ errl[i] = safe_malloc(len);
+ zpool_obj_to_path(zhp, dsobj, obj,
+ errl[i++], len);
+ }
+ nvlist_free(nverrlist);
+ fnvlist_add_string_array(item, "errlist",
+ (const char **)errl, count);
+ for (int i = 0; i < count; ++i)
+ free(errl[i]);
+ free(errl);
+ } else
+ fnvlist_add_string(item, "errlist",
+ strerror(errno));
+ }
+ }
+}
+
+static void
+ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)
+{
+ nice_num_str_nvlist(item, "blocks", dds->dds_blocks,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(item, "physical_size", dds->dds_psize,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+}
+
+static void
+dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
+{
+ nvlist_t *config;
+ if (cb->cb_dedup_stats) {
+ ddt_histogram_t *ddh;
+ ddt_stat_t *dds;
+ ddt_object_t *ddo;
+ nvlist_t *ddt_stat, *ddt_obj, *dedup;
+ uint_t c;
+ uint64_t cspace_prop;
+
+ config = zpool_get_config(zhp, NULL);
+ if (nvlist_lookup_uint64_array(config,
+ ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)
+ return;
+
+ dedup = fnvlist_alloc();
+ ddt_obj = fnvlist_alloc();
+ nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ if (ddo->ddo_count == 0) {
+ fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
+ ddt_obj);
+ fnvlist_add_nvlist(item, "dedup_stats", dedup);
+ fnvlist_free(ddt_obj);
+ fnvlist_free(dedup);
+ return;
+ } else {
+ nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ /*
+ * Squash cached size into in-core size to handle race.
+ * Only include cached size if it is available.
+ */
+ cspace_prop = zpool_get_prop_int(zhp,
+ ZPOOL_PROP_DEDUPCACHED, NULL);
+ cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
+ nice_num_str_nvlist(dedup, "cspace", cspace_prop,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ }
+
+ ddt_stat = fnvlist_alloc();
+ if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
+ (uint64_t **)&dds, &c) == 0) {
+ nvlist_t *total = fnvlist_alloc();
+ if (dds->dds_blocks == 0)
+ fnvlist_add_string(total, "blocks", "0");
+ else
+ ddt_stats_nvlist(dds, cb, total);
+ fnvlist_add_nvlist(ddt_stat, "total", total);
+ fnvlist_free(total);
+ }
+ if (nvlist_lookup_uint64_array(config,
+ ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {
+ nvlist_t *hist = fnvlist_alloc();
+ nvlist_t *entry = NULL;
+ char buf[16];
+ for (int h = 0; h < 64; h++) {
+ if (ddh->ddh_stat[h].dds_blocks != 0) {
+ entry = fnvlist_alloc();
+ ddt_stats_nvlist(&ddh->ddh_stat[h], cb,
+ entry);
+ snprintf(buf, 16, "%d", h);
+ fnvlist_add_nvlist(hist, buf, entry);
+ fnvlist_free(entry);
+ }
+ }
+ if (!nvlist_empty(hist))
+ fnvlist_add_nvlist(ddt_stat, "histogram", hist);
+ fnvlist_free(hist);
+ }
+
+ if (!nvlist_empty(ddt_obj)) {
+ fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
+ ddt_obj);
+ }
+ fnvlist_free(ddt_obj);
+ if (!nvlist_empty(ddt_stat)) {
+ fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,
+ ddt_stat);
+ }
+ fnvlist_free(ddt_stat);
+ if (!nvlist_empty(dedup))
+ fnvlist_add_nvlist(item, "dedup_stats", dedup);
+ fnvlist_free(dedup);
+ }
+}
+
+static void
+raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
+ nvlist_t *nvroot, nvlist_t *item)
+{
+ uint_t c;
+ pool_raidz_expand_stat_t *pres = NULL;
+ if (nvlist_lookup_uint64_array(nvroot,
+ ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {
+ nvlist_t **child;
+ uint_t children;
+ nvlist_t *nv = fnvlist_alloc();
+ verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0);
+ assert(pres->pres_expanding_vdev < children);
+ char *name =
+ zpool_vdev_name(g_zfs, zhp,
+ child[pres->pres_expanding_vdev], 0);
+ fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);
+ fnvlist_add_string(nv, "state",
+ pool_scan_state_str[pres->pres_state]);
+ nice_num_str_nvlist(nv, "expanding_vdev",
+ pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "waiting_for_resilver",
+ pres->pres_waiting_for_resilver, B_TRUE,
+ cb->cb_json_as_int, ZFS_NICENUM_1024);
+ fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);
+ fnvlist_free(nv);
+ free(name);
+ }
+}
+
+static void
+checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,
+ nvlist_t *item)
+{
+ uint_t c;
+ pool_checkpoint_stat_t *pcs = NULL;
+ if (nvlist_lookup_uint64_array(nvroot,
+ ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {
+ nvlist_t *nv = fnvlist_alloc();
+ fnvlist_add_string(nv, "state",
+ checkpoint_state_str[pcs->pcs_state]);
+ nice_num_str_nvlist(nv, "start_time",
+ pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(nv, "space",
+ pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);
+ fnvlist_free(nv);
+ }
+}
+
+static void
+removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
+ nvlist_t *nvroot, nvlist_t *item)
+{
+ uint_t c;
+ pool_removal_stat_t *prs = NULL;
+ if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,
+ (uint64_t **)&prs, &c) == 0) {
+ if (prs->prs_state != DSS_NONE) {
+ nvlist_t **child;
+ uint_t children;
+ verify(nvlist_lookup_nvlist_array(nvroot,
+ ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
+ assert(prs->prs_removing_vdev < children);
+ char *vdev_name = zpool_vdev_name(g_zfs, zhp,
+ child[prs->prs_removing_vdev], B_TRUE);
+ nvlist_t *nv = fnvlist_alloc();
+ fill_vdev_info(nv, zhp, vdev_name, B_FALSE,
+ cb->cb_json_as_int);
+ fnvlist_add_string(nv, "state",
+ pool_scan_state_str[prs->prs_state]);
+ nice_num_str_nvlist(nv, "removing_vdev",
+ prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ nice_num_str_nvlist(nv, "start_time",
+ prs->prs_start_time, cb->cb_literal,
+ cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "copied", prs->prs_copied,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "mapping_memory",
+ prs->prs_mapping_memory, cb->cb_literal,
+ cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ fnvlist_add_nvlist(item,
+ ZPOOL_CONFIG_REMOVAL_STATS, nv);
+ fnvlist_free(nv);
+ free(vdev_name);
+ }
+ }
+}
+
+static void
+scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
+ nvlist_t *nvroot, nvlist_t *item)
+{
+ pool_scan_stat_t *ps = NULL;
+ uint_t c;
+ nvlist_t *scan = fnvlist_alloc();
+ nvlist_t **child;
+ uint_t children;
+
+ if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
+ (uint64_t **)&ps, &c) == 0) {
+ fnvlist_add_string(scan, "function",
+ pool_scan_func_str[ps->pss_func]);
+ fnvlist_add_string(scan, "state",
+ pool_scan_state_str[ps->pss_state]);
+ nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(scan, "examined", ps->pss_examined,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(scan, "processed", ps->pss_processed,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(scan, "errors", ps->pss_errors,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,
+ B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ nice_num_str_nvlist(scan, "scrub_pause",
+ ps->pss_pass_scrub_pause, cb->cb_literal,
+ cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(scan, "scrub_spent_paused",
+ ps->pss_pass_scrub_spent_paused,
+ B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ nice_num_str_nvlist(scan, "issued_bytes_per_scan",
+ ps->pss_pass_issued, cb->cb_literal,
+ cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(scan, "issued", ps->pss_issued,
+ cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
+ if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
+ ps->pss_error_scrub_start > ps->pss_start_time) {
+ fnvlist_add_string(scan, "err_scrub_func",
+ pool_scan_func_str[ps->pss_error_scrub_func]);
+ fnvlist_add_string(scan, "err_scrub_state",
+ pool_scan_state_str[ps->pss_error_scrub_state]);
+ nice_num_str_nvlist(scan, "err_scrub_start_time",
+ ps->pss_error_scrub_start,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(scan, "err_scrub_end_time",
+ ps->pss_error_scrub_end,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(scan, "err_scrub_examined",
+ ps->pss_error_scrub_examined,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ nice_num_str_nvlist(scan, "err_scrub_to_examine",
+ ps->pss_error_scrub_to_be_examined,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ nice_num_str_nvlist(scan, "err_scrub_pause",
+ ps->pss_pass_error_scrub_pause,
+ B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
+ }
+ }
+
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0) {
+ vdev_rebuild_stat_t *vrs;
+ uint_t i;
+ char *name;
+ nvlist_t *nv;
+ nvlist_t *rebuild = fnvlist_alloc();
+ uint64_t st;
+ for (uint_t c = 0; c < children; c++) {
+ if (nvlist_lookup_uint64_array(child[c],
+ ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,
+ &i) == 0) {
+ if (vrs->vrs_state != VDEV_REBUILD_NONE) {
+ nv = fnvlist_alloc();
+ name = zpool_vdev_name(g_zfs, zhp,
+ child[c], VDEV_NAME_TYPE_ID);
+ fill_vdev_info(nv, zhp, name, B_FALSE,
+ cb->cb_json_as_int);
+ st = vrs->vrs_state;
+ fnvlist_add_string(nv, "state",
+ vdev_rebuild_state_str[st]);
+ nice_num_str_nvlist(nv, "start_time",
+ vrs->vrs_start_time, cb->cb_literal,
+ cb->cb_json_as_int,
+ ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(nv, "end_time",
+ vrs->vrs_end_time, cb->cb_literal,
+ cb->cb_json_as_int,
+ ZFS_NICE_TIMESTAMP);
+ nice_num_str_nvlist(nv, "scan_time",
+ vrs->vrs_scan_time_ms * 1000000,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_TIME);
+ nice_num_str_nvlist(nv, "scanned",
+ vrs->vrs_bytes_scanned,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "issued",
+ vrs->vrs_bytes_issued,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "rebuilt",
+ vrs->vrs_bytes_rebuilt,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "to_scan",
+ vrs->vrs_bytes_est, cb->cb_literal,
+ cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "errors",
+ vrs->vrs_errors, cb->cb_literal,
+ cb->cb_json_as_int,
+ ZFS_NICENUM_1024);
+ nice_num_str_nvlist(nv, "pass_time",
+ vrs->vrs_pass_time_ms * 1000000,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_TIME);
+ nice_num_str_nvlist(nv, "pass_scanned",
+ vrs->vrs_pass_bytes_scanned,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "pass_issued",
+ vrs->vrs_pass_bytes_issued,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ nice_num_str_nvlist(nv, "pass_skipped",
+ vrs->vrs_pass_bytes_skipped,
+ cb->cb_literal, cb->cb_json_as_int,
+ ZFS_NICENUM_BYTES);
+ fnvlist_add_nvlist(rebuild, name, nv);
+ free(name);
+ }
+ }
+ }
+ if (!nvlist_empty(rebuild))
+ fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);
+ fnvlist_free(rebuild);
+ }
+
+ if (!nvlist_empty(scan))
+ fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);
+ fnvlist_free(scan);
+}
+
/*
* Print the scan status.
*/
@@ -9244,285 +10257,189 @@ print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)
zpool_dump_ddt(dds, ddh);
}
-/*
- * Display a summary of pool status. Displays a summary such as:
- *
- * pool: tank
- * status: DEGRADED
- * reason: One or more devices ...
- * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
- * config:
- * mirror DEGRADED
- * c1t0d0 OK
- * c2t0d0 UNAVAIL
- *
- * When given the '-v' option, we print out the complete config. If the '-e'
- * option is specified, then we print out error rate information as well.
- */
-static int
-status_callback(zpool_handle_t *zhp, void *data)
-{
- status_cbdata_t *cbp = data;
- nvlist_t *config, *nvroot;
- const char *msgid;
- zpool_status_t reason;
- zpool_errata_t errata;
- const char *health;
- uint_t c;
- vdev_stat_t *vs;
-
- /* If dedup stats were requested, also fetch dedupcached. */
- if (cbp->cb_dedup_stats > 1)
- zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
-
- config = zpool_get_config(zhp, NULL);
- reason = zpool_get_status(zhp, &msgid, &errata);
-
- cbp->cb_count++;
-
- /*
- * If we were given 'zpool status -x', only report those pools with
- * problems.
- */
- if (cbp->cb_explain &&
- (reason == ZPOOL_STATUS_OK ||
- reason == ZPOOL_STATUS_VERSION_OLDER ||
- reason == ZPOOL_STATUS_FEAT_DISABLED ||
- reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
- reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
- if (!cbp->cb_allpools) {
- (void) printf(gettext("pool '%s' is healthy\n"),
- zpool_get_name(zhp));
- if (cbp->cb_first)
- cbp->cb_first = B_FALSE;
- }
- return (0);
- }
-
- if (cbp->cb_first)
- cbp->cb_first = B_FALSE;
- else
- (void) printf("\n");
+#define ST_SIZE 4096
+#define AC_SIZE 2048
- nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
- verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
- (uint64_t **)&vs, &c) == 0);
-
- health = zpool_get_state_str(zhp);
-
- printf(" ");
- printf_color(ANSI_BOLD, gettext("pool:"));
- printf(" %s\n", zpool_get_name(zhp));
- fputc(' ', stdout);
- printf_color(ANSI_BOLD, gettext("state: "));
-
- printf_color(health_str_to_color(health), "%s", health);
-
- fputc('\n', stdout);
+static void
+print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
+ zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)
+{
+ char status[ST_SIZE];
+ char action[AC_SIZE];
+ memset(status, 0, ST_SIZE);
+ memset(action, 0, AC_SIZE);
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices could "
+ snprintf(status, ST_SIZE, gettext("One or more devices could "
"not be opened. Sufficient replicas exist for\n\tthe pool "
"to continue functioning in a degraded state.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Attach the missing device "
+ snprintf(action, AC_SIZE, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_NR:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices could "
+ snprintf(status, ST_SIZE, gettext("One or more devices could "
"not be opened. There are insufficient\n\treplicas for the"
" pool to continue functioning.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Attach the missing device "
+ snprintf(action, AC_SIZE, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices could "
+ snprintf(status, ST_SIZE, gettext("One or more devices could "
"not be used because the label is missing or\n\tinvalid. "
"Sufficient replicas exist for the pool to continue\n\t"
"functioning in a degraded state.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Replace the device using "
+ snprintf(action, AC_SIZE, gettext("Replace the device using "
"'zpool replace'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices could "
+ snprintf(status, ST_SIZE, gettext("One or more devices could "
"not be used because the label is missing \n\tor invalid. "
"There are insufficient replicas for the pool to "
"continue\n\tfunctioning.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
- zpool_get_name(zhp), reason, config);
+ zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
+ action, AC_SIZE);
break;
case ZPOOL_STATUS_FAILING_DEV:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices has "
+ snprintf(status, ST_SIZE, gettext("One or more devices has "
"experienced an unrecoverable error. An\n\tattempt was "
"made to correct the error. Applications are "
"unaffected.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Determine if the "
+ snprintf(action, AC_SIZE, gettext("Determine if the "
"device needs to be replaced, and clear the errors\n\tusing"
" 'zpool clear' or replace the device with 'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices has "
+ snprintf(status, ST_SIZE, gettext("One or more devices has "
"been taken offline by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Online the device "
+ snprintf(action, AC_SIZE, gettext("Online the device "
"using 'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_REMOVED_DEV:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices has "
+ snprintf(status, ST_SIZE, gettext("One or more devices has "
"been removed by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Online the device "
+ snprintf(action, AC_SIZE, gettext("Online the device "
"using zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices is "
+ snprintf(status, ST_SIZE, gettext("One or more devices is "
"currently being resilvered. The pool will\n\tcontinue "
"to function, possibly in a degraded state.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
+ snprintf(action, AC_SIZE, gettext("Wait for the resilver to "
"complete.\n"));
break;
case ZPOOL_STATUS_REBUILD_SCRUB:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices have "
+ snprintf(status, ST_SIZE, gettext("One or more devices have "
"been sequentially resilvered, scrubbing\n\tthe pool "
"is recommended.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
+ snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
"verify all data checksums.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices has "
+ snprintf(status, ST_SIZE, gettext("One or more devices has "
"experienced an error resulting in data\n\tcorruption. "
"Applications may be affected.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Restore the file in question"
+ snprintf(action, AC_SIZE, gettext("Restore the file in question"
" if possible. Otherwise restore the\n\tentire pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("The pool metadata is "
+ snprintf(status, ST_SIZE, gettext("The pool metadata is "
"corrupted and the pool cannot be opened.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
- zpool_get_name(zhp), reason, config);
+ zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
+ action, AC_SIZE);
break;
case ZPOOL_STATUS_VERSION_OLDER:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
+ snprintf(status, ST_SIZE, gettext("The pool is formatted using "
"a legacy on-disk format. The pool can\n\tstill be used, "
"but some features are unavailable.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
+ snprintf(action, AC_SIZE, gettext("Upgrade the pool using "
"'zpool upgrade'. Once this is done, the\n\tpool will no "
"longer be accessible on software that does not support\n\t"
"feature flags.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
+ snprintf(status, ST_SIZE, gettext("The pool has been upgraded "
"to a newer, incompatible on-disk version.\n\tThe pool "
"cannot be accessed on this system.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Access the pool from a "
+ snprintf(action, AC_SIZE, gettext("Access the pool from a "
"system running more recent software, or\n\trestore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("Some supported and "
+ snprintf(status, ST_SIZE, gettext("Some supported and "
"requested features are not enabled on the pool.\n\t"
"The pool can still be used, but some features are "
"unavailable.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Enable all features using "
+ snprintf(action, AC_SIZE, gettext("Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(7) for details.\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("This pool has a "
+ snprintf(status, ST_SIZE, gettext("This pool has a "
"compatibility list specified, but it could not be\n\t"
"read/parsed at this time. The pool can still be used, "
"but this\n\tshould be investigated.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Check the value of the "
+ snprintf(action, AC_SIZE, gettext("Check the value of the "
"'compatibility' property against the\n\t"
"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more features "
+ snprintf(status, ST_SIZE, gettext("One or more features "
"are enabled on the pool despite not being\n\t"
"requested by the 'compatibility' property.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Consider setting "
+ snprintf(action, AC_SIZE, gettext("Consider setting "
"'compatibility' to an appropriate value, or\n\t"
"adding needed features to the relevant file in\n\t"
ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
+ snprintf(status, ST_SIZE, gettext("The pool cannot be accessed "
"on this system because it uses the\n\tfollowing feature(s)"
" not supported on this system:\n"));
- zpool_print_unsup_feat(config);
- (void) printf("\n");
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Access the pool from a "
+ zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
+ 1024);
+ snprintf(action, AC_SIZE, gettext("Access the pool from a "
"system that supports the required feature(s),\n\tor "
"restore the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("The pool can only be "
+ snprintf(status, ST_SIZE, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
- zpool_print_unsup_feat(config);
- (void) printf("\n");
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
+ zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
+ 1024);
+ snprintf(action, AC_SIZE, gettext("The pool cannot be accessed "
"in read-write mode. Import the pool with\n"
"\t\"-o readonly=on\", access the pool from a system that "
"supports the\n\trequired feature(s), or restore the "
@@ -9530,106 +10447,90 @@ status_callback(zpool_handle_t *zhp, void *data)
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices are "
+ snprintf(status, ST_SIZE, gettext("One or more devices are "
"faulted in response to persistent errors.\n\tSufficient "
"replicas exist for the pool to continue functioning "
"in a\n\tdegraded state.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
+ snprintf(action, AC_SIZE, gettext("Replace the faulted device, "
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_NR:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices are "
+ snprintf(status, ST_SIZE, gettext("One or more devices are "
"faulted in response to persistent errors. There are "
"insufficient replicas for the pool to\n\tcontinue "
"functioning.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
+ snprintf(action, AC_SIZE, gettext("Destroy and re-create the "
"pool from a backup source. Manually marking the device\n"
"\trepaired using 'zpool clear' may allow some data "
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("The pool is suspended "
+ snprintf(status, ST_SIZE, gettext("The pool is suspended "
"because multihost writes failed or were delayed;\n\t"
"another system could import the pool undetected.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
+ snprintf(action, AC_SIZE, gettext("Make sure the pool's devices"
" are connected, then reboot your system and\n\timport the "
"pool or run 'zpool clear' to resume the pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("One or more devices are "
+ snprintf(status, ST_SIZE, gettext("One or more devices are "
"faulted in response to IO failures.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Make sure the affected "
+ snprintf(action, AC_SIZE, gettext("Make sure the affected "
"devices are connected, then run 'zpool clear'.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("An intent log record "
+ snprintf(status, ST_SIZE, gettext("An intent log record "
"could not be read.\n"
"\tWaiting for administrator intervention to fix the "
"faulted pool.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Either restore the affected "
+ snprintf(action, AC_SIZE, gettext("Either restore the affected "
"device(s) and run 'zpool online',\n"
"\tor ignore the intent log records by running "
"'zpool clear'.\n"));
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
- (void) printf(gettext("status: One or more devices are "
+ snprintf(status, ST_SIZE, gettext("One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
- (void) printf(gettext("action: Replace affected devices with "
- "devices that support the\n\tconfigured block size, or "
- "migrate data to a properly configured\n\tpool.\n"));
+ snprintf(action, AC_SIZE, gettext("Replace affected devices "
+ "with devices that support the\n\tconfigured block size, "
+ "or migrate data to a properly configured\n\tpool.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
+ snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid"
" and system hostid on imported pool.\n\tThis pool was "
"previously imported into a system with a different "
"hostid,\n\tand then was verbatim imported into this "
"system.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("Export this pool on all "
+ snprintf(action, AC_SIZE, gettext("Export this pool on all "
"systems on which it is imported.\n"
"\tThen import it to correct the mismatch.\n"));
break;
case ZPOOL_STATUS_ERRATA:
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
+ snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"),
errata);
-
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("To correct the issue"
+ snprintf(action, AC_SIZE, gettext("To correct the issue"
" run 'zpool scrub'.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
- (void) printf(gettext("\tExisting encrypted datasets "
- "contain an on-disk incompatibility\n\twhich "
- "needs to be corrected.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("To correct the issue"
+ (void) strlcat(status, gettext("\tExisting encrypted "
+ "datasets contain an on-disk incompatibility\n\t "
+ "which needs to be corrected.\n"), ST_SIZE);
+ snprintf(action, AC_SIZE, gettext("To correct the issue"
" backup existing encrypted datasets to new\n\t"
"encrypted datasets and destroy the old ones. "
"'zfs mount -o ro' can\n\tbe used to temporarily "
@@ -9637,12 +10538,12 @@ status_callback(zpool_handle_t *zhp, void *data)
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
- (void) printf(gettext("\tExisting encrypted snapshots "
- "and bookmarks contain an on-disk\n\tincompat"
- "ibility. This may cause on-disk corruption if "
- "they are used\n\twith 'zfs recv'.\n"));
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, gettext("To correct the"
+ (void) strlcat(status, gettext("\tExisting encrypted "
+ "snapshots and bookmarks contain an on-disk\n\t"
+ "incompatibility. This may cause on-disk "
+ "corruption if they are used\n\twith "
+ "'zfs recv'.\n"), ST_SIZE);
+ snprintf(action, AC_SIZE, gettext("To correct the"
"issue, enable the bookmark_v2 feature. No "
"additional\n\taction is needed if there are no "
"encrypted snapshots or bookmarks.\n\tIf preserving"
@@ -9668,6 +10569,210 @@ status_callback(zpool_handle_t *zhp, void *data)
assert(reason == ZPOOL_STATUS_OK);
}
+ if (status[0] != 0) {
+ if (cbp->cb_json)
+ fnvlist_add_string(item, "status", status);
+ else {
+ printf_color(ANSI_BOLD, gettext("status: "));
+ printf_color(ANSI_YELLOW, status);
+ }
+ }
+
+ if (action[0] != 0) {
+ if (cbp->cb_json)
+ fnvlist_add_string(item, "action", action);
+ else {
+ printf_color(ANSI_BOLD, gettext("action: "));
+ printf_color(ANSI_YELLOW, action);
+ }
+ }
+}
+
+static int
+status_callback_json(zpool_handle_t *zhp, void *data)
+{
+ status_cbdata_t *cbp = data;
+ nvlist_t *config, *nvroot;
+ const char *msgid;
+ char pool_guid[256];
+ char msgbuf[256];
+ uint64_t guid;
+ zpool_status_t reason;
+ zpool_errata_t errata;
+ uint_t c;
+ vdev_stat_t *vs;
+ nvlist_t *item, *d, *load_info, *vds;
+ item = d = NULL;
+
+ /* If dedup stats were requested, also fetch dedupcached. */
+ if (cbp->cb_dedup_stats > 1)
+ zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
+ reason = zpool_get_status(zhp, &msgid, &errata);
+ /*
+ * If we were given 'zpool status -x', only report those pools with
+ * problems.
+ */
+ if (cbp->cb_explain &&
+ (reason == ZPOOL_STATUS_OK ||
+ reason == ZPOOL_STATUS_VERSION_OLDER ||
+ reason == ZPOOL_STATUS_FEAT_DISABLED ||
+ reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
+ reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
+ return (0);
+ }
+
+ d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
+ item = fnvlist_alloc();
+ vds = fnvlist_alloc();
+ fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);
+ config = zpool_get_config(zhp, NULL);
+
+ if (config != NULL) {
+ nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
+ verify(nvlist_lookup_uint64_array(nvroot,
+ ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);
+ if (cbp->cb_json_pool_key_guid) {
+ guid = fnvlist_lookup_uint64(config,
+ ZPOOL_CONFIG_POOL_GUID);
+ snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid);
+ }
+ cbp->cb_count++;
+
+ print_status_reason(zhp, cbp, reason, errata, item);
+ if (msgid != NULL) {
+ snprintf(msgbuf, 256,
+ "https://openzfs.github.io/openzfs-docs/msg/%s",
+ msgid);
+ fnvlist_add_string(item, "msgid", msgid);
+ fnvlist_add_string(item, "moreinfo", msgbuf);
+ }
+
+ if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
+ &load_info) == 0) {
+ fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,
+ load_info);
+ }
+
+ scan_status_nvlist(zhp, cbp, nvroot, item);
+ removal_status_nvlist(zhp, cbp, nvroot, item);
+ checkpoint_status_nvlist(nvroot, cbp, item);
+ raidz_expand_status_nvlist(zhp, cbp, nvroot, item);
+ vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);
+ if (cbp->cb_flat_vdevs) {
+ class_vdevs_nvlist(zhp, cbp, nvroot,
+ VDEV_ALLOC_BIAS_DEDUP, vds);
+ class_vdevs_nvlist(zhp, cbp, nvroot,
+ VDEV_ALLOC_BIAS_SPECIAL, vds);
+ class_vdevs_nvlist(zhp, cbp, nvroot,
+ VDEV_ALLOC_CLASS_LOGS, vds);
+ l2cache_nvlist(zhp, cbp, nvroot, vds);
+ spares_nvlist(zhp, cbp, nvroot, vds);
+
+ fnvlist_add_nvlist(item, "vdevs", vds);
+ fnvlist_free(vds);
+ } else {
+ fnvlist_add_nvlist(item, "vdevs", vds);
+ fnvlist_free(vds);
+
+ class_vdevs_nvlist(zhp, cbp, nvroot,
+ VDEV_ALLOC_BIAS_DEDUP, item);
+ class_vdevs_nvlist(zhp, cbp, nvroot,
+ VDEV_ALLOC_BIAS_SPECIAL, item);
+ class_vdevs_nvlist(zhp, cbp, nvroot,
+ VDEV_ALLOC_CLASS_LOGS, item);
+ l2cache_nvlist(zhp, cbp, nvroot, item);
+ spares_nvlist(zhp, cbp, nvroot, item);
+ }
+ dedup_stats_nvlist(zhp, cbp, item);
+ errors_nvlist(zhp, cbp, item);
+ }
+ if (cbp->cb_json_pool_key_guid) {
+ fnvlist_add_nvlist(d, pool_guid, item);
+ } else {
+ fnvlist_add_nvlist(d, zpool_get_name(zhp),
+ item);
+ }
+ fnvlist_free(item);
+ return (0);
+}
+
+/*
+ * Display a summary of pool status. Displays a summary such as:
+ *
+ * pool: tank
+ * status: DEGRADED
+ * reason: One or more devices ...
+ * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
+ * config:
+ * mirror DEGRADED
+ * c1t0d0 OK
+ * c2t0d0 UNAVAIL
+ *
+ * When given the '-v' option, we print out the complete config. If the '-e'
+ * option is specified, then we print out error rate information as well.
+ */
+static int
+status_callback(zpool_handle_t *zhp, void *data)
+{
+ status_cbdata_t *cbp = data;
+ nvlist_t *config, *nvroot;
+ const char *msgid;
+ zpool_status_t reason;
+ zpool_errata_t errata;
+ const char *health;
+ uint_t c;
+ vdev_stat_t *vs;
+
+ /* If dedup stats were requested, also fetch dedupcached. */
+ if (cbp->cb_dedup_stats > 1)
+ zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
+
+ config = zpool_get_config(zhp, NULL);
+ reason = zpool_get_status(zhp, &msgid, &errata);
+
+ cbp->cb_count++;
+
+ /*
+ * If we were given 'zpool status -x', only report those pools with
+ * problems.
+ */
+ if (cbp->cb_explain &&
+ (reason == ZPOOL_STATUS_OK ||
+ reason == ZPOOL_STATUS_VERSION_OLDER ||
+ reason == ZPOOL_STATUS_FEAT_DISABLED ||
+ reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
+ reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
+ if (!cbp->cb_allpools) {
+ (void) printf(gettext("pool '%s' is healthy\n"),
+ zpool_get_name(zhp));
+ if (cbp->cb_first)
+ cbp->cb_first = B_FALSE;
+ }
+ return (0);
+ }
+
+ if (cbp->cb_first)
+ cbp->cb_first = B_FALSE;
+ else
+ (void) printf("\n");
+
+ nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
+ verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
+ (uint64_t **)&vs, &c) == 0);
+
+ health = zpool_get_state_str(zhp);
+
+ printf(" ");
+ printf_color(ANSI_BOLD, gettext("pool:"));
+ printf(" %s\n", zpool_get_name(zhp));
+ fputc(' ', stdout);
+ printf_color(ANSI_BOLD, gettext("state: "));
+
+ printf_color(health_str_to_color(health), "%s", health);
+
+ fputc('\n', stdout);
+ print_status_reason(zhp, cbp, reason, errata, NULL);
+
if (msgid != NULL) {
printf(" ");
printf_color(ANSI_BOLD, gettext("see:"));
@@ -9782,7 +10887,11 @@ status_callback(zpool_handle_t *zhp, void *data)
* -T Display a timestamp in date(1) or Unix format
* -v Display complete error logs
* -x Display only pools with potential problems
+ * -j Display output in JSON format
* --power Display vdev enclosure slot power status
+ * --json-int Display numbers in inteeger format instead of string
+ * --json-flat-vdevs Display vdevs in flat hierarchy
+ * --json-pool-key-guid Use pool GUID as key for pool objects
*
* Describes the health status of all pools or some subset.
*/
@@ -9794,15 +10903,21 @@ zpool_do_status(int argc, char **argv)
float interval = 0;
unsigned long count = 0;
status_cbdata_t cb = { 0 };
+ nvlist_t *data;
char *cmd = NULL;
struct option long_options[] = {
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
+ {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
+ {"json-flat-vdevs", no_argument, NULL,
+ ZPOOL_OPTION_JSON_FLAT_VDEVS},
+ {"json-pool-key-guid", no_argument, NULL,
+ ZPOOL_OPTION_POOL_KEY_GUID},
{0, 0, 0, 0}
};
/* check options */
- while ((c = getopt_long(argc, argv, "c:DegiLpPstT:vx", long_options,
+ while ((c = getopt_long(argc, argv, "c:jDegiLpPstT:vx", long_options,
NULL)) != -1) {
switch (c) {
case 'c':
@@ -9863,12 +10978,25 @@ zpool_do_status(int argc, char **argv)
case 'v':
cb.cb_verbose = B_TRUE;
break;
+ case 'j':
+ cb.cb_json = B_TRUE;
+ break;
case 'x':
cb.cb_explain = B_TRUE;
break;
case ZPOOL_OPTION_POWER:
cb.cb_print_power = B_TRUE;
break;
+ case ZPOOL_OPTION_JSON_FLAT_VDEVS:
+ cb.cb_flat_vdevs = B_TRUE;
+ break;
+ case ZPOOL_OPTION_JSON_NUMS_AS_INT:
+ cb.cb_json_as_int = B_TRUE;
+ cb.cb_literal = B_TRUE;
+ break;
+ case ZPOOL_OPTION_POOL_KEY_GUID:
+ cb.cb_json_pool_key_guid = B_TRUE;
+ break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("status");
@@ -9892,23 +11020,79 @@ zpool_do_status(int argc, char **argv)
cb.cb_first = B_TRUE;
cb.cb_print_status = B_TRUE;
+ if (cb.cb_flat_vdevs && !cb.cb_json) {
+ fprintf(stderr, gettext("'--json-flat-vdevs' only works with"
+ " '-j' option\n"));
+ usage(B_FALSE);
+ }
+
+ if (cb.cb_json_as_int && !cb.cb_json) {
+ (void) fprintf(stderr, gettext("'--json-int' only works with"
+ " '-j' option\n"));
+ usage(B_FALSE);
+ }
+
+ if (!cb.cb_json && cb.cb_json_pool_key_guid) {
+ (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
+ " works with '-j' option\n"));
+ usage(B_FALSE);
+ }
+
for (;;) {
- if (timestamp_fmt != NODATE)
- print_timestamp(timestamp_fmt);
+ if (cb.cb_json) {
+ cb.cb_jsobj = zpool_json_schema(0, 1);
+ data = fnvlist_alloc();
+ fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
+ fnvlist_free(data);
+ }
+
+ if (timestamp_fmt != NODATE) {
+ if (cb.cb_json) {
+ if (cb.cb_json_as_int) {
+ fnvlist_add_uint64(cb.cb_jsobj, "time",
+ time(NULL));
+ } else {
+ char ts[128];
+ get_timestamp(timestamp_fmt, ts, 128);
+ fnvlist_add_string(cb.cb_jsobj, "time",
+ ts);
+ }
+ } else
+ print_timestamp(timestamp_fmt);
+ }
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
NULL, NULL, 0, 0);
- ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
- cb.cb_literal, status_callback, &cb);
+ if (cb.cb_json) {
+ ret = for_each_pool(argc, argv, B_TRUE, NULL,
+ ZFS_TYPE_POOL, cb.cb_literal,
+ status_callback_json, &cb);
+ } else {
+ ret = for_each_pool(argc, argv, B_TRUE, NULL,
+ ZFS_TYPE_POOL, cb.cb_literal,
+ status_callback, &cb);
+ }
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
- if (argc == 0 && cb.cb_count == 0)
- (void) fprintf(stderr, gettext("no pools available\n"));
- else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
- (void) printf(gettext("all pools are healthy\n"));
+
+ if (cb.cb_json) {
+ if (ret == 0)
+ zcmd_print_json(cb.cb_jsobj);
+ else
+ nvlist_free(cb.cb_jsobj);
+ } else {
+ if (argc == 0 && cb.cb_count == 0) {
+ (void) fprintf(stderr, "%s",
+ gettext("no pools available\n"));
+ } else if (cb.cb_explain && cb.cb_first &&
+ cb.cb_allpools) {
+ (void) printf("%s",
+ gettext("all pools are healthy\n"));
+ }
+ }
if (ret != 0)
return (ret);