aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorTom Caputi <[email protected]>2018-06-28 17:55:11 -0400
committerBrian Behlendorf <[email protected]>2018-06-28 14:55:11 -0700
commitda2feb42fb5c7a8c1e1cc67f7a880da9d8e97bc2 (patch)
treeb29b779bb5e70b9dfbd0bf81f512dedee17175ad /module
parentedf60b864505497dc8c4f09d4ce2190c72f1e2c2 (diff)
Fix 'zfs recv' of non large_dnode send streams
Currently, there is a bug where older send streams without the DMU_BACKUP_FEATURE_LARGE_DNODE flag are not handled correctly. The code in receive_object() fails to handle cases where drro->drr_dn_slots is set to 0, which is always the case when the sending code does not support this feature flag. This patch fixes the issue by ensuring that that a value of 0 is treated as DNODE_MIN_SLOTS. Tested-by: DHE <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Tom Caputi <[email protected]> Closes #7617 Closes #7662
Diffstat (limited to 'module')
-rw-r--r--module/zfs/dmu_object.c3
-rw-r--r--module/zfs/dmu_send.c18
2 files changed, 13 insertions, 8 deletions
diff --git a/module/zfs/dmu_object.c b/module/zfs/dmu_object.c
index 62ddea905..586a04b16 100644
--- a/module/zfs/dmu_object.c
+++ b/module/zfs/dmu_object.c
@@ -261,6 +261,9 @@ dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
int dn_slots = dnodesize >> DNODE_SHIFT;
int err;
+ if (dn_slots == 0)
+ dn_slots = DNODE_MIN_SLOTS;
+
if (object == DMU_META_DNODE_OBJECT)
return (SET_ERROR(EBADF));
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
index 3dfe0b73e..ded086087 100644
--- a/module/zfs/dmu_send.c
+++ b/module/zfs/dmu_send.c
@@ -2454,6 +2454,8 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
dmu_tx_t *tx;
uint64_t object;
int err;
+ uint8_t dn_slots = drro->drr_dn_slots != 0 ?
+ drro->drr_dn_slots : DNODE_MIN_SLOTS;
if (drro->drr_type == DMU_OT_NONE ||
!DMU_OT_IS_VALID(drro->drr_type) ||
@@ -2465,7 +2467,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
drro->drr_bonuslen >
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
- drro->drr_dn_slots >
+ dn_slots >
(spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
return (SET_ERROR(EINVAL));
}
@@ -2481,7 +2483,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
drro->drr_nlevels > DN_MAX_LEVELS ||
drro->drr_nblkptr > DN_MAX_NBLKPTR ||
- DN_SLOTS_TO_BONUSLEN(drro->drr_dn_slots) <
+ DN_SLOTS_TO_BONUSLEN(dn_slots) <
drro->drr_raw_bonuslen)
return (SET_ERROR(EINVAL));
} else {
@@ -2519,7 +2521,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
if (drro->drr_blksz != doi.doi_data_block_size ||
nblkptr < doi.doi_nblkptr ||
- drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT ||
+ dn_slots != doi.doi_dnodesize >> DNODE_SHIFT ||
(rwa->raw &&
(indblksz != doi.doi_metadata_block_size ||
drro->drr_nlevels < doi.doi_indirection))) {
@@ -2540,7 +2542,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
* instead.
*/
if ((rwa->raw && drro->drr_nlevels < doi.doi_indirection) ||
- drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
+ dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
err = dmu_free_long_object(rwa->os, drro->drr_object);
if (err != 0)
return (SET_ERROR(EINVAL));
@@ -2569,11 +2571,11 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
* another object from the previous snapshot. We must free
* these objects before we attempt to allocate the new dnode.
*/
- if (drro->drr_dn_slots > 1) {
+ if (dn_slots > 1) {
boolean_t need_sync = B_FALSE;
for (uint64_t slot = drro->drr_object + 1;
- slot < drro->drr_object + drro->drr_dn_slots;
+ slot < drro->drr_object + dn_slots;
slot++) {
dmu_object_info_t slot_doi;
@@ -2609,7 +2611,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, drro->drr_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
- drro->drr_dn_slots << DNODE_SHIFT, tx);
+ dn_slots << DNODE_SHIFT, tx);
} else if (drro->drr_type != doi.doi_type ||
drro->drr_blksz != doi.doi_data_block_size ||
drro->drr_bonustype != doi.doi_bonus_type ||
@@ -2618,7 +2620,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, drro->drr_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
- drro->drr_dn_slots << DNODE_SHIFT, tx);
+ dn_slots << DNODE_SHIFT, tx);
}
if (err != 0) {
dmu_tx_commit(tx);