summaryrefslogtreecommitdiffstats
path: root/module/zfs/dmu.c
diff options
context:
space:
mode:
authorDebabrata Banerjee <[email protected]>2017-03-24 17:28:38 -0400
committerBrian Behlendorf <[email protected]>2017-04-13 10:51:20 -0700
commit66aca24730adfb2e3875e5148a03dd1fb435d438 (patch)
tree12ce4a9e59a64c7a207703a0525238a86ba012f2 /module/zfs/dmu.c
parenta44e7faa6c63998a4c058901a5c587706abe56ab (diff)
SEEK_HOLE should not block on txg_wait_synced()
Force flushing of txg's can be painfully slow when competing for disk IO, since this is a process meant to execute asynchronously. Optimize this path via allowing data/hole seeking if the file is clean, but if dirty fall back to old logic. This is a compromise to disabling the feature entirely. Reviewed-by: Giuseppe Di Natale <[email protected]> Reviewed-by: George Melikov <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Debabrata Banerjee <[email protected]> Closes #4306 Closes #5962
Diffstat (limited to 'module/zfs/dmu.c')
-rw-r--r--module/zfs/dmu.c49
1 files changed, 42 insertions, 7 deletions
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 4e62e0435..4929ef9ab 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -67,6 +67,11 @@ int zfs_nopwrite_enabled = 1;
*/
unsigned long zfs_per_txg_dirty_frees_percent = 30;
+/*
+ * Enable/disable forcing txg sync when dirty in dmu_offset_next.
+ */
+int zfs_dmu_offset_next_sync = 0;
+
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{ DMU_BSWAP_UINT8, TRUE, "unallocated" },
{ DMU_BSWAP_ZAP, TRUE, "object directory" },
@@ -1989,24 +1994,43 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp,
zp->zp_nopwrite = nopwrite;
}
+/*
+ * This function is only called from zfs_holey_common() for zpl_llseek()
+ * in order to determine the location of holes. In order to accurately
+ * report holes all dirty data must be synced to disk. This causes extremely
+ * poor performance when seeking for holes in a dirty file. As a compromise,
+ * only provide hole data when the dnode is clean. When a dnode is dirty
+ * report the dnode as having no holes which is always a safe thing to do.
+ */
int
dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
{
dnode_t *dn;
int i, err;
+ boolean_t clean = B_TRUE;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
+
/*
- * Sync any current changes before
- * we go trundling through the block pointers.
+ * Check if dnode is dirty
*/
- for (i = 0; i < TXG_SIZE; i++) {
- if (list_link_active(&dn->dn_dirty_link[i]))
- break;
+ if (dn->dn_dirtyctx != DN_UNDIRTIED) {
+ for (i = 0; i < TXG_SIZE; i++) {
+ if (!list_is_empty(&dn->dn_dirty_records[i])) {
+ clean = B_FALSE;
+ break;
+ }
+ }
}
- if (i != TXG_SIZE) {
+
+ /*
+ * If compatibility option is on, sync any current changes before
+ * we go trundling through the block pointers.
+ */
+ if (!clean && zfs_dmu_offset_next_sync) {
+ clean = B_TRUE;
dnode_rele(dn, FTAG);
txg_wait_synced(dmu_objset_pool(os), 0);
err = dnode_hold(os, object, FTAG, &dn);
@@ -2014,7 +2038,12 @@ dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
return (err);
}
- err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
+ if (clean)
+ err = dnode_next_offset(dn,
+ (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
+ else
+ err = SET_ERROR(EBUSY);
+
dnode_rele(dn, FTAG);
return (err);
@@ -2238,5 +2267,11 @@ MODULE_PARM_DESC(zfs_nopwrite_enabled, "Enable NOP writes");
module_param(zfs_per_txg_dirty_frees_percent, ulong, 0644);
MODULE_PARM_DESC(zfs_per_txg_dirty_frees_percent,
"percentage of dirtied blocks from frees in one TXG");
+
+module_param(zfs_dmu_offset_next_sync, int, 0644);
+MODULE_PARM_DESC(zfs_dmu_offset_next_sync,
+ "Enable forcing txg sync to find holes");
+
/* END CSTYLED */
+
#endif