aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/zfs/dmu.c49
-rw-r--r--module/zfs/zfs_vnops.c4
2 files changed, 46 insertions, 7 deletions
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 4e62e0435..4929ef9ab 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -67,6 +67,11 @@ int zfs_nopwrite_enabled = 1;
*/
unsigned long zfs_per_txg_dirty_frees_percent = 30;
+/*
+ * Enable/disable forcing txg sync when dirty in dmu_offset_next.
+ */
+int zfs_dmu_offset_next_sync = 0;
+
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{ DMU_BSWAP_UINT8, TRUE, "unallocated" },
{ DMU_BSWAP_ZAP, TRUE, "object directory" },
@@ -1989,24 +1994,43 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp,
zp->zp_nopwrite = nopwrite;
}
+/*
+ * This function is only called from zfs_holey_common() for zpl_llseek()
+ * in order to determine the location of holes. In order to accurately
+ * report holes all dirty data must be synced to disk. This causes extremely
+ * poor performance when seeking for holes in a dirty file. As a compromise,
+ * only provide hole data when the dnode is clean. When a dnode is dirty
+ * report the dnode as having no holes which is always a safe thing to do.
+ */
int
dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
{
dnode_t *dn;
int i, err;
+ boolean_t clean = B_TRUE;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
+
/*
- * Sync any current changes before
- * we go trundling through the block pointers.
+ * Check if dnode is dirty
*/
- for (i = 0; i < TXG_SIZE; i++) {
- if (list_link_active(&dn->dn_dirty_link[i]))
- break;
+ if (dn->dn_dirtyctx != DN_UNDIRTIED) {
+ for (i = 0; i < TXG_SIZE; i++) {
+ if (!list_is_empty(&dn->dn_dirty_records[i])) {
+ clean = B_FALSE;
+ break;
+ }
+ }
}
- if (i != TXG_SIZE) {
+
+ /*
+ * If compatibility option is on, sync any current changes before
+ * we go trundling through the block pointers.
+ */
+ if (!clean && zfs_dmu_offset_next_sync) {
+ clean = B_TRUE;
dnode_rele(dn, FTAG);
txg_wait_synced(dmu_objset_pool(os), 0);
err = dnode_hold(os, object, FTAG, &dn);
@@ -2014,7 +2038,12 @@ dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
return (err);
}
- err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
+ if (clean)
+ err = dnode_next_offset(dn,
+ (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
+ else
+ err = SET_ERROR(EBUSY);
+
dnode_rele(dn, FTAG);
return (err);
@@ -2238,5 +2267,11 @@ MODULE_PARM_DESC(zfs_nopwrite_enabled, "Enable NOP writes");
module_param(zfs_per_txg_dirty_frees_percent, ulong, 0644);
MODULE_PARM_DESC(zfs_per_txg_dirty_frees_percent,
"percentage of dirtied blocks from frees in one TXG");
+
+module_param(zfs_dmu_offset_next_sync, int, 0644);
+MODULE_PARM_DESC(zfs_dmu_offset_next_sync,
+ "Enable forcing txg sync to find holes");
+
/* END CSTYLED */
+
#endif
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 4afae6c36..72a3104c7 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -278,6 +278,10 @@ zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
if (error == ESRCH)
return (SET_ERROR(ENXIO));
+ /* file was dirty, so fall back to using file_sz logic */
+ if (error == EBUSY)
+ error = 0;
+
/*
* We could find a hole that begins after the logical end-of-file,
* because dmu_offset_next() only works on whole blocks. If the