aboutsummaryrefslogtreecommitdiffstats
path: root/include/libzfs.h
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2020-07-03 11:05:50 -0700
committerGitHub <[email protected]>2020-07-03 11:05:50 -0700
commit9a49d3f3d3bfa26df4e5e54d574cb490f0ee284b (patch)
tree715c2fa00e55762764cadef8460da09f919910ad /include/libzfs.h
parent7ddb753d17f2c12f152647c0e34eb9c42ee5e4af (diff)
Add device rebuild feature
The device_rebuild feature enables sequential reconstruction when resilvering. Mirror vdevs can be rebuilt in LBA order which may more quickly restore redundancy depending on the pools average block size, overall fragmentation and the performance characteristics of the devices. However, block checksums cannot be verified as part of the rebuild thus a scrub is automatically started after the sequential resilver completes. The new '-s' option has been added to the `zpool attach` and `zpool replace` command to request sequential reconstruction instead of healing reconstruction when resilvering. zpool attach -s <pool> <existing vdev> <new vdev> zpool replace -s <pool> <old vdev> <new vdev> The `zpool status` output has been updated to report the progress of sequential resilvering in the same way as healing resilvering. The one notable difference is that multiple sequential resilvers may be in progress as long as they're operating on different top-level vdevs. The `zpool wait -t resilver` command was extended to wait on sequential resilvers. From this perspective they are no different than healing resilvers. Sequential resilvers cannot be supported for RAIDZ, but are compatible with the dRAID feature being developed. As part of this change the resilver_restart_* tests were moved in to the functional/replacement directory. Additionally, the replacement tests were renamed and extended to verify both resilvering and rebuilding. Original-patch-by: Isaac Huang <[email protected]> Reviewed-by: Tony Hutter <[email protected]> Reviewed-by: John Poduska <[email protected]> Co-authored-by: Mark Maybee <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Closes #10349
Diffstat (limited to 'include/libzfs.h')
-rw-r--r--include/libzfs.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/include/libzfs.h b/include/libzfs.h
index 64a0a2035..873e8f304 100644
--- a/include/libzfs.h
+++ b/include/libzfs.h
@@ -79,7 +79,7 @@ typedef enum zfs_error {
EZFS_NODEVICE, /* no such device in pool */
EZFS_BADDEV, /* invalid device to add */
EZFS_NOREPLICAS, /* no valid replicas */
- EZFS_RESILVERING, /* currently resilvering */
+ EZFS_RESILVERING, /* resilvering (healing reconstruction) */
EZFS_BADVERSION, /* unsupported version */
EZFS_POOLUNAVAIL, /* pool is currently unavailable */
EZFS_DEVOVERFLOW, /* too many devices in one vdev */
@@ -148,6 +148,7 @@ typedef enum zfs_error {
EZFS_TRIM_NOTSUP, /* device does not support trim */
EZFS_NO_RESILVER_DEFER, /* pool doesn't support resilver_defer */
EZFS_EXPORT_IN_PROGRESS, /* currently exporting the pool */
+ EZFS_REBUILDING, /* resilvering (sequential reconstrution) */
EZFS_UNKNOWN
} zfs_error_t;
@@ -297,7 +298,7 @@ extern int zpool_vdev_online(zpool_handle_t *, const char *, int,
vdev_state_t *);
extern int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t);
extern int zpool_vdev_attach(zpool_handle_t *, const char *,
- const char *, nvlist_t *, int);
+ const char *, nvlist_t *, int, boolean_t);
extern int zpool_vdev_detach(zpool_handle_t *, const char *);
extern int zpool_vdev_remove(zpool_handle_t *, const char *);
extern int zpool_vdev_remove_cancel(zpool_handle_t *);
@@ -387,6 +388,8 @@ typedef enum {
ZPOOL_STATUS_RESILVERING, /* device being resilvered */
ZPOOL_STATUS_OFFLINE_DEV, /* device offline */
ZPOOL_STATUS_REMOVED_DEV, /* removed device */
+ ZPOOL_STATUS_REBUILDING, /* device being rebuilt */
+ ZPOOL_STATUS_REBUILD_SCRUB, /* recommend scrubbing the pool */
/*
* Finally, the following indicates a healthy pool.