aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cmd/zfs/zfs_main.c19
-rw-r--r--include/libzfs.h9
-rw-r--r--include/sys/zfs_ioctl.h3
-rw-r--r--lib/libzfs/libzfs_sendrecv.c96
-rw-r--r--man/man8/zfs.820
-rw-r--r--module/zfs/dmu_send.c1
-rw-r--r--module/zfs/dsl_userhold.c24
-rw-r--r--tests/runfiles/linux.run2
-rw-r--r--tests/zfs-tests/tests/functional/rsend/Makefile.am1
-rwxr-xr-xtests/zfs-tests/tests/functional/rsend/send_holds.ksh177
10 files changed, 324 insertions, 28 deletions
diff --git a/cmd/zfs/zfs_main.c b/cmd/zfs/zfs_main.c
index ab2b99b02..2017c9803 100644
--- a/cmd/zfs/zfs_main.c
+++ b/cmd/zfs/zfs_main.c
@@ -278,10 +278,10 @@ get_usage(zfs_help_t idx)
case HELP_PROMOTE:
return (gettext("\tpromote <clone-filesystem>\n"));
case HELP_RECEIVE:
- return (gettext("\treceive [-vnsFu] "
+ return (gettext("\treceive [-vnsFhu] "
"[-o <property>=<value>] ... [-x <property>] ...\n"
"\t <filesystem|volume|snapshot>\n"
- "\treceive [-vnsFu] [-o <property>=<value>] ... "
+ "\treceive [-vnsFhu] [-o <property>=<value>] ... "
"[-x <property>] ... \n"
"\t [-d | -e] <filesystem>\n"
"\treceive -A <filesystem|volume>\n"));
@@ -293,7 +293,7 @@ get_usage(zfs_help_t idx)
case HELP_ROLLBACK:
return (gettext("\trollback [-rRf] <snapshot>\n"));
case HELP_SEND:
- return (gettext("\tsend [-DnPpRvLecwb] [-[i|I] snapshot] "
+ return (gettext("\tsend [-DnPpRvLecwhb] [-[i|I] snapshot] "
"<snapshot>\n"
"\tsend [-nvPLecw] [-i snapshot|bookmark] "
"<filesystem|volume|snapshot>\n"
@@ -3981,11 +3981,12 @@ zfs_do_send(int argc, char **argv)
{"compressed", no_argument, NULL, 'c'},
{"raw", no_argument, NULL, 'w'},
{"backup", no_argument, NULL, 'b'},
+ {"holds", no_argument, NULL, 'h'},
{0, 0, 0, 0}
};
/* check options */
- while ((c = getopt_long(argc, argv, ":i:I:RDpvnPLet:cwb", long_options,
+ while ((c = getopt_long(argc, argv, ":i:I:RDpvnPLeht:cwb", long_options,
NULL)) != -1) {
switch (c) {
case 'i':
@@ -4008,6 +4009,9 @@ zfs_do_send(int argc, char **argv)
case 'b':
flags.backup = B_TRUE;
break;
+ case 'h':
+ flags.holds = B_TRUE;
+ break;
case 'P':
flags.parsable = B_TRUE;
flags.verbose = B_TRUE;
@@ -4130,7 +4134,7 @@ zfs_do_send(int argc, char **argv)
char frombuf[ZFS_MAX_DATASET_NAME_LEN];
if (flags.replicate || flags.doall || flags.props ||
- flags.backup || flags.dedup ||
+ flags.backup || flags.dedup || flags.holds ||
(strchr(argv[0], '@') == NULL &&
(flags.dryrun || flags.verbose || flags.progress))) {
(void) fprintf(stderr, gettext("Error: "
@@ -4235,7 +4239,7 @@ zfs_do_receive(int argc, char **argv)
nomem();
/* check options */
- while ((c = getopt(argc, argv, ":o:x:denuvFsA")) != -1) {
+ while ((c = getopt(argc, argv, ":o:x:dehnuvFsA")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
@@ -4267,6 +4271,9 @@ zfs_do_receive(int argc, char **argv)
}
flags.istail = B_TRUE;
break;
+ case 'h':
+ flags.skipholds = B_TRUE;
+ break;
case 'n':
flags.dryrun = B_TRUE;
break;
diff --git a/include/libzfs.h b/include/libzfs.h
index 72d956b41..65b06f7a8 100644
--- a/include/libzfs.h
+++ b/include/libzfs.h
@@ -645,6 +645,9 @@ typedef struct sendflags {
/* only send received properties (ie. -b) */
boolean_t backup;
+
+ /* include snapshot holds in send stream */
+ boolean_t holds;
} sendflags_t;
typedef boolean_t (snapfilter_cb_t)(zfs_handle_t *, void *);
@@ -707,6 +710,12 @@ typedef struct recvflags {
/* do not mount file systems as they are extracted (private) */
boolean_t nomount;
+
+ /* Was holds flag set in the compound header? */
+ boolean_t holds;
+
+ /* skip receive of snapshot holds */
+ boolean_t skipholds;
} recvflags_t;
extern int zfs_receive(libzfs_handle_t *, const char *, nvlist_t *,
diff --git a/include/sys/zfs_ioctl.h b/include/sys/zfs_ioctl.h
index a552fad37..f8c65f581 100644
--- a/include/sys/zfs_ioctl.h
+++ b/include/sys/zfs_ioctl.h
@@ -106,6 +106,7 @@ typedef enum drr_headertype {
#define DMU_BACKUP_FEATURE_LARGE_DNODE (1 << 23)
#define DMU_BACKUP_FEATURE_RAW (1 << 24)
/* flag #25 is reserved for the ZSTD compression feature */
+#define DMU_BACKUP_FEATURE_HOLDS (1 << 26)
/*
* Mask of all supported backup features
@@ -115,7 +116,7 @@ typedef enum drr_headertype {
DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_LZ4 | \
DMU_BACKUP_FEATURE_RESUMING | DMU_BACKUP_FEATURE_LARGE_BLOCKS | \
DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_LARGE_DNODE | \
- DMU_BACKUP_FEATURE_RAW)
+ DMU_BACKUP_FEATURE_RAW | DMU_BACKUP_FEATURE_HOLDS)
/* Are all features in the given flag word currently supported? */
#define DMU_STREAM_SUPPORTED(x) (!((x) & ~DMU_BACKUP_FEATURE_MASK))
diff --git a/lib/libzfs/libzfs_sendrecv.c b/lib/libzfs/libzfs_sendrecv.c
index 1d8292101..2aa0fd222 100644
--- a/lib/libzfs/libzfs_sendrecv.c
+++ b/lib/libzfs/libzfs_sendrecv.c
@@ -29,6 +29,7 @@
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2016 Igor Kozhukhov <[email protected]>
* Copyright (c) 2018, loli10K <[email protected]>. All rights reserved.
+ * Copyright (c) 2018 Datto Inc.
*/
#include <assert.h>
@@ -616,6 +617,7 @@ typedef struct send_data {
nvlist_t *parent_snaps;
nvlist_t *fss;
nvlist_t *snapprops;
+ nvlist_t *snapholds; /* user holds */
/* send-receive configuration, does not change during traversal */
const char *fsname;
@@ -627,6 +629,8 @@ typedef struct send_data {
boolean_t verbose;
boolean_t seenfrom;
boolean_t seento;
+ boolean_t holds; /* were holds requested with send -h */
+ boolean_t props;
/*
* The header nvlist is of the following format:
@@ -642,6 +646,7 @@ typedef struct send_data {
* "props" -> { name -> value (only if set here) }
* "snaps" -> { name (lastname) -> number (guid) }
* "snapprops" -> { name (lastname) -> { name -> value } }
+ * "snapholds" -> { name (lastname) -> { holdname -> crtime } }
*
* "origin" -> number (guid) (if clone)
* "is_encroot" -> boolean
@@ -712,6 +717,15 @@ send_iterate_snap(zfs_handle_t *zhp, void *arg)
send_iterate_prop(zhp, sd->backup, nv);
VERIFY(0 == nvlist_add_nvlist(sd->snapprops, snapname, nv));
nvlist_free(nv);
+ if (sd->holds) {
+ nvlist_t *holds = fnvlist_alloc();
+ int err = lzc_get_holds(zhp->zfs_name, &holds);
+ if (err == 0) {
+ VERIFY(0 == nvlist_add_nvlist(sd->snapholds,
+ snapname, holds));
+ }
+ fnvlist_free(holds);
+ }
zfs_close(zhp);
return (0);
@@ -893,9 +907,10 @@ send_iterate_fs(zfs_handle_t *zhp, void *arg)
}
/* iterate over props */
- VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0));
- send_iterate_prop(zhp, sd->backup, nv);
-
+ if (sd->props || sd->backup || sd->recursive) {
+ VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0));
+ send_iterate_prop(zhp, sd->backup, nv);
+ }
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
boolean_t encroot;
@@ -925,17 +940,24 @@ send_iterate_fs(zfs_handle_t *zhp, void *arg)
}
- VERIFY(0 == nvlist_add_nvlist(nvfs, "props", nv));
+ if (nv != NULL)
+ VERIFY(0 == nvlist_add_nvlist(nvfs, "props", nv));
/* iterate over snaps, and set sd->parent_fromsnap_guid */
sd->parent_fromsnap_guid = 0;
VERIFY(0 == nvlist_alloc(&sd->parent_snaps, NV_UNIQUE_NAME, 0));
VERIFY(0 == nvlist_alloc(&sd->snapprops, NV_UNIQUE_NAME, 0));
+ if (sd->holds)
+ VERIFY(0 == nvlist_alloc(&sd->snapholds, NV_UNIQUE_NAME, 0));
(void) zfs_iter_snapshots_sorted(zhp, send_iterate_snap, sd);
VERIFY(0 == nvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps));
VERIFY(0 == nvlist_add_nvlist(nvfs, "snapprops", sd->snapprops));
+ if (sd->holds)
+ VERIFY(0 == nvlist_add_nvlist(nvfs, "snapholds",
+ sd->snapholds));
nvlist_free(sd->parent_snaps);
nvlist_free(sd->snapprops);
+ nvlist_free(sd->snapholds);
/* add this fs to nvlist */
(void) snprintf(guidstring, sizeof (guidstring),
@@ -960,7 +982,8 @@ out:
static int
gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
const char *tosnap, boolean_t recursive, boolean_t raw, boolean_t verbose,
- boolean_t backup, nvlist_t **nvlp, avl_tree_t **avlp)
+ boolean_t backup, boolean_t holds, boolean_t props, nvlist_t **nvlp,
+ avl_tree_t **avlp)
{
zfs_handle_t *zhp;
send_data_t sd = { 0 };
@@ -978,6 +1001,8 @@ gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
sd.raw = raw;
sd.verbose = verbose;
sd.backup = backup;
+ sd.holds = holds;
+ sd.props = props;
if ((error = send_iterate_fs(zhp, &sd)) != 0) {
nvlist_free(sd.fss);
@@ -1008,7 +1033,7 @@ typedef struct send_dump_data {
uint64_t prevsnap_obj;
boolean_t seenfrom, seento, replicate, doall, fromorigin;
boolean_t verbose, dryrun, parsable, progress, embed_data, std_out;
- boolean_t large_block, compress, raw;
+ boolean_t large_block, compress, raw, holds;
int outfd;
boolean_t err;
nvlist_t *fss;
@@ -1864,6 +1889,9 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
}
}
+ if (flags->holds)
+ featureflags |= DMU_BACKUP_FEATURE_HOLDS;
+
/*
* Start the dedup thread if this is a dedup stream. We do not bother
* doing this if this a raw send of an encrypted dataset with dedup off
@@ -1891,7 +1919,8 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
}
}
- if (flags->replicate || flags->doall || flags->props || flags->backup) {
+ if (flags->replicate || flags->doall || flags->props ||
+ flags->holds || flags->backup) {
dmu_replay_record_t drr = { 0 };
char *packbuf = NULL;
size_t buflen = 0;
@@ -1899,7 +1928,8 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
- if (flags->replicate || flags->props || flags->backup) {
+ if (flags->replicate || flags->props || flags->backup ||
+ flags->holds) {
nvlist_t *hdrnv;
VERIFY(0 == nvlist_alloc(&hdrnv, NV_UNIQUE_NAME, 0));
@@ -1918,7 +1948,8 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
err = gather_nvlist(zhp->zfs_hdl, zhp->zfs_name,
fromsnap, tosnap, flags->replicate, flags->raw,
- flags->verbose, flags->backup, &fss, &fsavl);
+ flags->verbose, flags->backup, flags->holds,
+ flags->props, &fss, &fsavl);
if (err)
goto err_out;
VERIFY(0 == nvlist_add_nvlist(hdrnv, "fss", fss));
@@ -1988,6 +2019,7 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sdd.embed_data = flags->embed_data;
sdd.compress = flags->compress;
sdd.raw = flags->raw;
+ sdd.holds = flags->holds;
sdd.filter_cb = filter_func;
sdd.filter_cb_arg = cb_arg;
if (debugnvp)
@@ -2020,6 +2052,7 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sdd.cleanup_fd = -1;
sdd.snapholds = NULL;
}
+
if (flags->verbose || sdd.snapholds != NULL) {
/*
* Do a verbose no-op dry run to get all the verbose output
@@ -2088,7 +2121,7 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
}
if (!flags->dryrun && (flags->replicate || flags->doall ||
- flags->props || flags->backup)) {
+ flags->props || flags->backup || flags->holds)) {
/*
* write final end record. NB: want to do this even if
* there was some error, because it might not be totally
@@ -2820,7 +2853,8 @@ again:
VERIFY(0 == nvlist_alloc(&deleted, NV_UNIQUE_NAME, 0));
if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL,
- recursive, B_TRUE, B_FALSE, B_FALSE, &local_nv, &local_avl)) != 0)
+ recursive, B_TRUE, B_FALSE, B_FALSE, B_FALSE, B_TRUE, &local_nv,
+ &local_avl)) != 0)
return (error);
/*
@@ -3655,6 +3689,7 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
uint64_t parent_snapguid = 0;
prop_changelist_t *clp = NULL;
nvlist_t *snapprops_nvlist = NULL;
+ nvlist_t *snapholds_nvlist = NULL;
zprop_errflags_t prop_errflags;
nvlist_t *prop_errors = NULL;
boolean_t recursive;
@@ -3683,6 +3718,9 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
+ /* Did the user request holds be skipped via zfs recv -k? */
+ boolean_t holds = flags->holds && !flags->skipholds;
+
if (stream_avl != NULL) {
char *keylocation = NULL;
nvlist_t *lookup = NULL;
@@ -3716,11 +3754,22 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
if (flags->canmountoff) {
VERIFY(0 == nvlist_add_uint64(rcvprops,
zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0));
+ } else if (newprops) { /* nothing in rcvprops, eliminate it */
+ nvlist_free(rcvprops);
+ rcvprops = NULL;
+ newprops = B_FALSE;
}
if (0 == nvlist_lookup_nvlist(fs, "snapprops", &lookup)) {
VERIFY(0 == nvlist_lookup_nvlist(lookup,
snapname, &snapprops_nvlist));
}
+ if (holds) {
+ if (0 == nvlist_lookup_nvlist(fs, "snapholds",
+ &lookup)) {
+ VERIFY(0 == nvlist_lookup_nvlist(lookup,
+ snapname, &snapholds_nvlist));
+ }
+ }
}
cp = NULL;
@@ -4204,6 +4253,22 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
zcmd_free_nvlists(&zc);
}
}
+ if (err == 0 && snapholds_nvlist) {
+ nvpair_t *pair;
+ nvlist_t *holds, *errors = NULL;
+ int cleanup_fd = -1;
+
+ VERIFY(0 == nvlist_alloc(&holds, 0, KM_SLEEP));
+ for (pair = nvlist_next_nvpair(snapholds_nvlist, NULL);
+ pair != NULL;
+ pair = nvlist_next_nvpair(snapholds_nvlist, pair)) {
+ VERIFY(0 == nvlist_add_string(holds, destsnap,
+ nvpair_name(pair)));
+ }
+ (void) lzc_hold(holds, cleanup_fd, &errors);
+ nvlist_free(snapholds_nvlist);
+ nvlist_free(holds);
+ }
if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) {
/*
@@ -4222,7 +4287,8 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
*/
*cp = '\0';
if (gather_nvlist(hdl, destsnap, NULL, NULL, B_FALSE, B_TRUE,
- B_FALSE, B_FALSE, &local_nv, &local_avl) == 0) {
+ B_FALSE, B_FALSE, B_FALSE, B_TRUE, &local_nv, &local_avl)
+ == 0) {
*cp = '@';
fs = fsavl_find(local_avl, drrb->drr_toguid, NULL);
fsavl_destroy(local_avl);
@@ -4544,6 +4610,12 @@ zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
+ /* Holds feature is set once in the compound stream header. */
+ boolean_t holds = (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
+ DMU_BACKUP_FEATURE_HOLDS);
+ if (holds)
+ flags->holds = B_TRUE;
+
if (strchr(drrb->drr_toname, '@') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad snapshot name)"));
diff --git a/man/man8/zfs.8 b/man/man8/zfs.8
index 4658c5b8b..551089609 100644
--- a/man/man8/zfs.8
+++ b/man/man8/zfs.8
@@ -195,7 +195,7 @@
.Ar snapshot bookmark
.Nm
.Cm send
-.Op Fl DLPRbcenpvw
+.Op Fl DLPRbcehnpvw
.Op Oo Fl I Ns | Ns Fl i Oc Ar snapshot
.Ar snapshot
.Nm
@@ -209,14 +209,14 @@
.Fl t Ar receive_resume_token
.Nm
.Cm receive
-.Op Fl Fnsuv
+.Op Fl Fhnsuv
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Nm
.Cm receive
-.Op Fl Fnsuv
+.Op Fl Fhnsuv
.Op Fl d Ns | Ns Fl e
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
@@ -3408,7 +3408,7 @@ feature.
.It Xo
.Nm
.Cm send
-.Op Fl DLPRbcenpvw
+.Op Fl DLPRbcehnpvw
.Op Oo Fl I Ns | Ns Fl i Oc Ar snapshot
.Ar snapshot
.Xc
@@ -3548,6 +3548,12 @@ Note that if you do not use this flag for sending encrypted datasets, data will
be sent unencrypted and may be re-encrypted with a different encryption key on
the receiving system, which will disable the ability to do a raw send to that
system for incrementals.
+.It Fl h, -holds
+Generate a stream package that includes any snapshot holds (created with the
+.Sy zfs hold
+command), and indicating to
+.Sy zfs receive
+that the holds be applied to the dataset on the receiving system.
.It Fl i Ar snapshot
Generate an incremental stream from the first
.Ar snapshot
@@ -3743,7 +3749,7 @@ for more details.
.It Xo
.Nm
.Cm receive
-.Op Fl Fnsuv
+.Op Fl Fhnsuv
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
@@ -3752,7 +3758,7 @@ for more details.
.It Xo
.Nm
.Cm receive
-.Op Fl Fnsuv
+.Op Fl Fhnsuv
.Op Fl d Ns | Ns Fl e
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
@@ -3878,6 +3884,8 @@ snapshot as described in the paragraph above.
Discard all but the last element of the sent snapshot's file system name, using
that element to determine the name of the target file system for the new
snapshot as described in the paragraph above.
+.It Fl h
+Skip the receive of holds. There is no effect if holds are not sent.
.It Fl n
Do not actually receive the stream.
This can be useful in conjunction with the
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
index 9c0ad406b..43e19ecbc 100644
--- a/module/zfs/dmu_send.c
+++ b/module/zfs/dmu_send.c
@@ -1282,7 +1282,6 @@ dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
err = dsl_pool_hold(tosnap, FTAG, &dp);
if (err != 0)
return (err);
-
if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
/*
* We are sending a filesystem or volume. Ensure
diff --git a/module/zfs/dsl_userhold.c b/module/zfs/dsl_userhold.c
index c80b35d48..638805d0b 100644
--- a/module/zfs/dsl_userhold.c
+++ b/module/zfs/dsl_userhold.c
@@ -83,6 +83,7 @@ dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_user_hold_arg_t *dduha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
+ nvlist_t *tmp_holds;
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS)
return (SET_ERROR(ENOTSUP));
@@ -90,6 +91,26 @@ dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
if (!dmu_tx_is_syncing(tx))
return (0);
+ /*
+ * Ensure the list has no duplicates by copying name/values from
+ * non-unique dduha_holds to unique tmp_holds, and comparing counts.
+ */
+ tmp_holds = fnvlist_alloc();
+ for (nvpair_t *pair = nvlist_next_nvpair(dduha->dduha_holds, NULL);
+ pair != NULL; pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) {
+ size_t len = strlen(nvpair_name(pair)) +
+ strlen(fnvpair_value_string(pair));
+ char *nameval = kmem_zalloc(len + 2, KM_SLEEP);
+ (void) strcpy(nameval, nvpair_name(pair));
+ (void) strcat(nameval, "@");
+ (void) strcat(nameval, fnvpair_value_string(pair));
+ fnvlist_add_string(tmp_holds, nameval, "");
+ kmem_free(nameval, len + 2);
+ }
+ size_t tmp_count = fnvlist_num_pairs(tmp_holds);
+ fnvlist_free(tmp_holds);
+ if (tmp_count != fnvlist_num_pairs(dduha->dduha_holds))
+ return (SET_ERROR(EEXIST));
for (nvpair_t *pair = nvlist_next_nvpair(dduha->dduha_holds, NULL);
pair != NULL; pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) {
dsl_dataset_t *ds;
@@ -312,7 +333,8 @@ dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist)
return (0);
dduha.dduha_holds = holds;
- dduha.dduha_chkholds = fnvlist_alloc();
+ /* chkholds can have non-unique name */
+ VERIFY(0 == nvlist_alloc(&dduha.dduha_chkholds, 0, KM_SLEEP));
dduha.dduha_errlist = errlist;
dduha.dduha_minor = cleanup_minor;
diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run
index 8663c24f9..d6bc77a6b 100644
--- a/tests/runfiles/linux.run
+++ b/tests/runfiles/linux.run
@@ -793,7 +793,7 @@ tests = ['rsend_001_pos', 'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos',
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
'send-c_recv_dedup', 'send_encrypted_files', 'send_encrypted_heirarchy',
'send_encrypted_props', 'send_freeobjects', 'send_realloc_dnode_size',
- 'send_hole_birth', 'send-wDR_encrypted_zvol']
+ 'send_holds', 'send_hole_birth', 'send-wDR_encrypted_zvol']
tags = ['functional', 'rsend']
[tests/functional/scrub_mirror]
diff --git a/tests/zfs-tests/tests/functional/rsend/Makefile.am b/tests/zfs-tests/tests/functional/rsend/Makefile.am
index 316bcb4e6..0b15f8d2b 100644
--- a/tests/zfs-tests/tests/functional/rsend/Makefile.am
+++ b/tests/zfs-tests/tests/functional/rsend/Makefile.am
@@ -41,6 +41,7 @@ dist_pkgdata_SCRIPTS = \
send-cpL_varied_recsize.ksh \
send_freeobjects.ksh \
send_realloc_dnode_size.ksh \
+ send_holds.ksh \
send_hole_birth.ksh \
send-wDR_encrypted_zvol.ksh
diff --git a/tests/zfs-tests/tests/functional/rsend/send_holds.ksh b/tests/zfs-tests/tests/functional/rsend/send_holds.ksh
new file mode 100755
index 000000000..5dcf0e2a0
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/rsend/send_holds.ksh
@@ -0,0 +1,177 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
+# Copyright (c) 2018 Datto, Inc. All rights reserved.
+#
+
+. $STF_SUITE/tests/functional/rsend/rsend.kshlib
+. $STF_SUITE/tests/functional/cli_root/cli_common.kshlib
+
+#
+# DESCRIPTION:
+# Verify 'zfs send' can send dataset holds.
+# Verify 'zfs receive' can receive dataset holds.
+#
+# STRATEGY:
+# 1. Create a snapshot.
+# 2. Create a full send stream with the fs, including holds.
+# 3. Receive the send stream and verify the data integrity.
+# 4. Fill in fs with some new data.
+# 5. Create an incremental send stream with the fs, including holds.
+# 6. Receive the incremental send stream and verify the data integrity.
+# 7. Verify the holds have been received as expected.
+# 8. Create an incremental snap with no holds, and send that with -h.
+# 9. Confirm the snapshot was received as expected.
+# 10. Create an incremental snapshot and place a hold on it.
+# 11. Receive the incremental stream with -h and verify holds skipped.
+#
+
+verify_runnable "both"
+
+function cleanup
+{
+ eval "zfs holds $init_snap |grep -q hold1-1" &&
+ log_must zfs release hold1-1 $init_snap
+ eval "zfs holds $init_snap |grep -q hold1-2" &&
+ log_must zfs release hold1-2 $init_snap
+ eval "zfs holds $recv_snap |grep -q hold1-1" &&
+ log_must zfs release hold1-1 $recv_snap
+ eval "zfs holds $recv_snap |grep -q hold1-2" &&
+ log_must zfs release hold1-2 $recv_snap
+ eval "zfs holds $inc_snap |grep -q hold2-1" &&
+ log_must zfs release hold2-1 $inc_snap
+ eval "zfs holds $recv_inc_snap |grep -q hold2-1" &&
+ log_must zfs release hold2-1 $recv_inc_snap
+ eval "zfs holds $inc_snap3 |grep -q hold3-1" &&
+ log_must zfs release hold3-1 $inc_snap3
+
+ # destroy datasets
+ datasetexists $recv_root/$TESTFS1 &&
+ log_must destroy_dataset "$recv_root/$TESTFS1" "-Rf"
+ datasetexists $recv_root && log_must destroy_dataset "$recv_root" "-Rf"
+ datasetexists $TESTPOOL/$TESTFS1 && log_must destroy_dataset "$TESTPOOL/$TESTFS1" "-Rf"
+
+ [[ -e $full_bkup ]] && log_must rm -f $full_bkup
+ [[ -e $inc_bkup ]] && log_must rm -f $inc_bkup
+ [[ -e $inc_data2 ]] && log_must rm -f $inc_data2
+ [[ -d $TESTDIR1 ]] && log_must rm -rf $TESTDIR1
+
+}
+
+#
+# Check if hold exists on the specified dataset.
+#
+function check_hold #<snapshot> <hold>
+{
+ typeset dataset=$1
+ typeset hold=$2
+
+ log_note "checking $dataset for $hold"
+ eval "zfs holds $dataset |grep -q $hold"
+}
+
+log_assert "Verify 'zfs send/recv' can send and receive dataset holds."
+log_onexit cleanup
+
+init_snap=$TESTPOOL/$TESTFS1@init_snap
+inc_snap=$TESTPOOL/$TESTFS1@inc_snap
+inc_snap2=$TESTPOOL/$TESTFS1@inc_snap2
+inc_snap3=$TESTPOOL/$TESTFS1@inc_snap3
+full_bkup=$TEST_BASE_DIR/fullbkup.$$
+inc_bkup=$TEST_BASE_DIR/incbkup.$$
+init_data=$TESTDIR/$TESTFILE1
+inc_data=$TESTDIR/$TESTFILE2
+inc_data2=$TESTDIR/testfile3
+recv_root=$TESTPOOL/rst_ctr
+recv_snap=$recv_root/$TESTFS1@init_snap
+recv_inc_snap=$recv_root/$TESTFS1@inc_snap
+recv_inc_snap2=$recv_root/$TESTFS1@inc_snap2
+recv_inc_snap3=$recv_root/$TESTFS1@inc_snap3
+recv_data=$TESTDIR1/$TESTFS1/$TESTFILE1
+recv_inc_data=$TESTDIR1/$TESTFS1/$TESTFILE2
+recv_inc_data2=$TESTDIR1/$TESTFS1/testfile3
+
+log_note "Verify 'zfs send' can create full send stream."
+
+# Preparation
+if ! datasetexists $TESTPOOL/$TESTFS1; then
+ log_must zfs create $TESTPOOL/$TESTFS1
+fi
+[[ -e $init_data ]] && log_must rm -f $init_data
+log_must zfs create $recv_root
+[[ ! -d $TESTDIR1 ]] && log_must mkdir -p $TESTDIR1
+[[ ! -d $TESTDIR ]] && log_must mkdir -p $TESTDIR
+log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS1
+log_must zfs set mountpoint=$TESTDIR1 $recv_root
+
+file_write -o create -f $init_data -b $BLOCK_SIZE -c $WRITE_COUNT
+
+log_must zfs snapshot $init_snap
+log_must zfs hold hold1-1 $init_snap
+log_must zfs hold hold1-2 $init_snap
+log_must eval "zfs send -h $init_snap > $full_bkup"
+
+log_note "Verify the send stream is valid to receive."
+
+log_must zfs recv -F $recv_snap <$full_bkup
+log_must datasetexists $recv_snap
+receive_check $recv_snap ${recv_snap%%@*}
+
+log_note "Verify the holds were received."
+log_must check_hold $recv_snap hold1-1
+log_must check_hold $recv_snap hold1-2
+compare_cksum $init_data $recv_data
+
+log_note "Verify 'zfs send -i' can create incremental send stream."
+
+file_write -o create -f $inc_data -b $BLOCK_SIZE -c $WRITE_COUNT -d 0
+
+log_must zfs snapshot $inc_snap
+log_must zfs hold hold2-1 $inc_snap
+log_must eval "zfs send -h -i $init_snap $inc_snap > $inc_bkup"
+
+log_note "Verify the incremental send stream is valid to receive."
+
+log_must zfs recv -F $recv_inc_snap <$inc_bkup
+log_must datasetexists $recv_inc_snap
+log_note "Verify the hold was received from the incremental send."
+
+log_must check_hold $recv_inc_snap hold2-1
+
+compare_cksum $inc_data $recv_inc_data
+
+log_note "Verify send -h works when there are no holds."
+file_write -o create -f $inc_data2 -b $BLOCK_SIZE -c $WRITE_COUNT -d 0
+log_must zfs snapshot $inc_snap2
+log_must eval "zfs send -h -i $inc_snap $inc_snap2 > $inc_bkup"
+log_must zfs recv -F $recv_inc_snap2 <$inc_bkup
+log_must datasetexists $recv_inc_snap2
+compare_cksum $inc_data2 $recv_inc_data2
+
+log_note "Verify send -h fails properly when receive dataset already exists"
+log_must zfs recv -F $recv_inc_snap2 <$inc_bkup
+
+log_note "Verify recv -h skips the receive of holds"
+log_must zfs snapshot $inc_snap3
+log_must zfs hold hold3-1 $inc_snap3
+log_must eval "zfs send -h -i $inc_snap2 $inc_snap3 > $inc_bkup"
+log_must zfs recv -F -h $recv_inc_snap3 <$inc_bkup
+log_must datasetexists $recv_inc_snap3
+log_mustnot check_hold $recv_inc_snap3 hold3-1
+
+log_pass "'zfs send/recv' can send and receive dataset holds."