From dfbc86309fd8ebb70a55cafa876320dc1ea8e833 Mon Sep 17 00:00:00 2001 From: Chris Dunlop Date: Fri, 15 Jul 2016 00:44:38 +1000 Subject: Use native inode->i_nlink instead of znode->z_links A mostly mechanical change, taking into account i_nlink is 32 bits vs ZFS's 64 bit on-disk link count. We revert "xattr dir doesn't get purged during iput" (ddae16a) as this is a more Linux-integrated fix for the same issue. In addition, setting the initial link count on a new node has been changed from setting one less than required in zfs_mknode() then incrementing to the correct count in zfs_link_create() (which was somewhat bizarre in the first place), to setting the correct count in zfs_mknode() and not incrementing it in zfs_link_create(). This both means we no longer set the link count in sa_bulk_update() twice (once for the initial incorrect count then again for the correct count), as well as adhering to the Linux requirement of not incrementing a zero link count without I_LINKABLE (see linux commit f4e0c30c). Signed-off-by: Chris Dunlop Signed-off-by: Brian Behlendorf Signed-off-by: Chunwei Chen Closes #4838 Issue #227 --- module/zfs/zfs_dir.c | 55 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 20 deletions(-) (limited to 'module/zfs/zfs_dir.c') diff --git a/module/zfs/zfs_dir.c b/module/zfs/zfs_dir.c index 50fa7e248..564f71596 100644 --- a/module/zfs/zfs_dir.c +++ b/module/zfs/zfs_dir.c @@ -478,7 +478,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx) zfs_sb_t *zsb = ZTOZSB(zp); ASSERT(zp->z_unlinked); - ASSERT(zp->z_links == 0); + ASSERT(ZTOI(zp)->i_nlink == 0); VERIFY3U(0, ==, zap_add_int(zsb->z_os, zsb->z_unlinkedobj, zp->z_id, tx)); @@ -594,7 +594,7 @@ zfs_purgedir(znode_t *dzp) if (error) skipped += 1; dmu_tx_commit(tx); - set_nlink(ZTOI(xzp), xzp->z_links); + zfs_iput_async(ZTOI(xzp)); } zap_cursor_fini(&zc); @@ -612,9 +612,10 @@ zfs_rmnode(znode_t *zp) dmu_tx_t *tx; uint64_t acl_obj; uint64_t xattr_obj; + uint64_t links; int error; - ASSERT(zp->z_links == 0); + ASSERT(ZTOI(zp)->i_nlink == 0); ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0); /* @@ -694,10 +695,10 @@ zfs_rmnode(znode_t *zp) ASSERT(error == 0); mutex_enter(&xzp->z_lock); xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */ - xzp->z_links = 0; /* no more links to it */ - set_nlink(ZTOI(xzp), 0); /* this will let iput purge us */ + clear_nlink(ZTOI(xzp)); /* no more links to it */ + links = 0; VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zsb), - &xzp->z_links, sizeof (xzp->z_links), tx)); + &links, sizeof (links), tx)); mutex_exit(&xzp->z_lock); zfs_unlinked_add(xzp, tx); } @@ -736,6 +737,7 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode); sa_bulk_attr_t bulk[5]; uint64_t mtime[2], ctime[2]; + uint64_t links; int count = 0; int error; @@ -747,10 +749,16 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) mutex_exit(&zp->z_lock); return (SET_ERROR(ENOENT)); } - zp->z_links++; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, - &zp->z_links, sizeof (zp->z_links)); - + if (!(flag & ZNEW)) { + /* + * ZNEW nodes come from zfs_mknode() where the link + * count has already been initialised + */ + inc_nlink(ZTOI(zp)); + links = ZTOI(zp)->i_nlink; + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, + &links, sizeof (links)); + } } SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL, &dzp->z_id, sizeof (dzp->z_id)); @@ -770,12 +778,14 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) mutex_enter(&dzp->z_lock); dzp->z_size++; - dzp->z_links += zp_is_dir; + if (zp_is_dir) + inc_nlink(ZTOI(dzp)); + links = ZTOI(dzp)->i_nlink; count = 0; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &dzp->z_size, sizeof (dzp->z_size)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, - &dzp->z_links, sizeof (dzp->z_links)); + &links, sizeof (links)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, sizeof (mtime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, @@ -836,6 +846,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, boolean_t unlinked = B_FALSE; sa_bulk_attr_t bulk[5]; uint64_t mtime[2], ctime[2]; + uint64_t links; int count = 0; int error; @@ -862,15 +873,16 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, return (error); } - if (zp->z_links <= zp_is_dir) { + if (ZTOI(zp)->i_nlink <= zp_is_dir) { zfs_panic_recover("zfs: link count on %lu is %u, " "should be at least %u", zp->z_id, - (int)zp->z_links, zp_is_dir + 1); - zp->z_links = zp_is_dir + 1; + (int)ZTOI(zp)->i_nlink, zp_is_dir + 1); + set_nlink(ZTOI(zp), zp_is_dir + 1); } - if (--zp->z_links == zp_is_dir) { + drop_nlink(ZTOI(zp)); + if (ZTOI(zp)->i_nlink == zp_is_dir) { zp->z_unlinked = B_TRUE; - zp->z_links = 0; + clear_nlink(ZTOI(zp)); unlinked = B_TRUE; } else { SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), @@ -880,8 +892,9 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime); } + links = ZTOI(zp)->i_nlink; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), - NULL, &zp->z_links, sizeof (zp->z_links)); + NULL, &links, sizeof (links)); error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); count = 0; ASSERT(error == 0); @@ -894,9 +907,11 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, mutex_enter(&dzp->z_lock); dzp->z_size--; /* one dirent removed */ - dzp->z_links -= zp_is_dir; /* ".." link from zp */ + if (zp_is_dir) + drop_nlink(ZTOI(dzp)); /* ".." link from zp */ + links = ZTOI(dzp)->i_nlink; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), - NULL, &dzp->z_links, sizeof (dzp->z_links)); + NULL, &links, sizeof (links)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &dzp->z_size, sizeof (dzp->z_size)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), -- cgit v1.2.3