diff options
author | Tom Caputi <[email protected]> | 2018-02-21 15:26:51 -0500 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2018-02-21 12:26:51 -0800 |
commit | 5121c4fb0c25a369d2e6f61e5b0a79969f0b75b5 (patch) | |
tree | 4d81c34137b5448ea084d5445f73434e6be7c15e | |
parent | 478b3150dea216d73e3d65749c8d6ccd365efef4 (diff) |
Remove unnecessary txg syncs from receive_object()
1b66810b introduced serveral changes which improved the reliability
of zfs sends when large dnodes were involved. However, these fixes
required adding a few calls to txg_wait_synced() in the DRR_OBJECT
handling code. Although most of them are currently necessary, this
patch allows the code to continue without waiting in some cases
where it doesn't have to.
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Tom Caputi <[email protected]>
Closes #7197
-rw-r--r-- | module/zfs/dmu_send.c | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c index 2c2ed8fb3..8ca77e95d 100644 --- a/module/zfs/dmu_send.c +++ b/module/zfs/dmu_send.c @@ -2546,6 +2546,8 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, * these objects before we attempt to allocate the new dnode. */ if (drro->drr_dn_slots > 1) { + boolean_t need_sync = B_FALSE; + for (uint64_t slot = drro->drr_object + 1; slot < drro->drr_object + drro->drr_dn_slots; slot++) { @@ -2564,9 +2566,12 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, if (err != 0) return (err); + + need_sync = B_TRUE; } - txg_wait_synced(dmu_objset_pool(rwa->os), 0); + if (need_sync) + txg_wait_synced(dmu_objset_pool(rwa->os), 0); } tx = dmu_tx_create(rwa->os); |