aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/zfs_znode.c
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2011-02-08 11:16:06 -0800
committerBrian Behlendorf <[email protected]>2011-02-10 09:27:21 -0800
commit3558fd73b5d863304102f6745c26e0b592aca60a (patch)
treeb22e26afbf6c494d34032876fb9be4d21d4e8ed7 /module/zfs/zfs_znode.c
parent6149f4c45fc905761a6f636ea9e14ff76ce6c842 (diff)
Prototype/structure update for Linux
I appologize in advance why to many things ended up in this commit. When it could be seperated in to a whole series of commits teasing that all apart now would take considerable time and I'm not sure there's much merrit in it. As such I'll just summerize the intent of the changes which are all (or partly) in this commit. Broadly the intent is to remove as much Solaris specific code as possible and replace it with native Linux equivilants. More specifically: 1) Replace all instances of zfsvfs_t with zfs_sb_t. While the type is largely the same calling it private super block data rather than a zfsvfs is more consistent with how Linux names this. While non critical it makes the code easier to read when your thinking in Linux friendly VFS terms. 2) Replace vnode_t with struct inode. The Linux VFS doesn't have the notion of a vnode and there's absolutely no good reason to create one. There are in fact several good reasons to remove it. It just adds overhead on Linux if we were to manage one, it conplicates the code, and it likely will lead to bugs so there's a good change it will be out of date. The code has been updated to remove all need for this type. 3) Replace all vtype_t's with umode types. Along with this shift all uses of types to mode bits. The Solaris code would pass a vtype which is redundant with the Linux mode. Just update all the code to use the Linux mode macros and remove this redundancy. 4) Remove using of vn_* helpers and replace where needed with inode helpers. The big example here is creating iput_aync to replace vn_rele_async. Other vn helpers will be addressed as needed but they should be be emulated. They are a Solaris VFS'ism and should simply be replaced with Linux equivilants. 5) Update znode alloc/free code. Under Linux it's common to embed the inode specific data with the inode itself. This removes the need for an extra memory allocation. In zfs this information is called a znode and it now embeds the inode with it. Allocators have been updated accordingly. 6) Minimal integration with the vfs flags for setting up the super block and handling mount options has been added this code will need to be refined but functionally it's all there. This will be the first and last of these to large to review commits.
Diffstat (limited to 'module/zfs/zfs_znode.c')
-rw-r--r--module/zfs/zfs_znode.c726
1 files changed, 275 insertions, 451 deletions
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index 283b4d511..024668287 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
@@ -51,9 +51,11 @@
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
+#include <sys/zfs_vnops.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/kidmap.h>
+#include <sys/zpl.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
@@ -88,11 +90,6 @@
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
-/*
- * Needed to close a small window in zfs_znode_move() that allows the zfsvfs to
- * be freed before it can be safely accessed.
- */
-krwlock_t zfsvfs_lock;
static kmem_cache_t *znode_cache = NULL;
@@ -102,14 +99,7 @@ zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
znode_t *zp = buf;
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
-
- zp->z_vnode = vn_alloc(kmflags);
- if (zp->z_vnode == NULL) {
- return (-1);
- }
- ZTOV(zp)->v_data = zp;
-
+ inode_init_once(ZTOI(zp));
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -133,9 +123,6 @@ zfs_znode_cache_destructor(void *buf, void *arg)
{
znode_t *zp = buf;
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
- ASSERT(ZTOV(zp)->v_data == zp);
- vn_free(ZTOV(zp));
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
rw_destroy(&zp->z_parent_lock);
@@ -154,11 +141,10 @@ zfs_znode_init(void)
/*
* Initialize zcache
*/
- rw_init(&zfsvfs_lock, NULL, RW_DEFAULT, NULL);
ASSERT(znode_cache == NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
- zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
+ zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_KMEM);
}
void
@@ -170,12 +156,10 @@ zfs_znode_fini(void)
if (znode_cache)
kmem_cache_destroy(znode_cache);
znode_cache = NULL;
- rw_destroy(&zfsvfs_lock);
}
-#ifdef HAVE_ZPL
int
-zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
+zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx)
{
#ifdef HAVE_SHARE
zfs_acl_ids_t acl_ids;
@@ -186,13 +170,11 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
int error;
vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
- vattr.va_type = VDIR;
- vattr.va_mode = S_IFDIR|0555;
+ vattr.va_mode = S_IFDIR | 0555;
vattr.va_uid = crgetuid(kcred);
vattr.va_gid = crgetgid(kcred);
sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
- ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
sharezp->z_moved = 0;
sharezp->z_unlinked = 0;
sharezp->z_atime_dirty = 0;
@@ -214,7 +196,7 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
zfsvfs->z_shares_dir = sharezp->z_id;
zfs_acl_ids_free(&acl_ids);
- ZTOV(sharezp)->v_count = 0;
+ // ZTOV(sharezp)->v_count = 0;
sa_handle_destroy(sharezp->z_sa_hdl);
kmem_cache_free(znode_cache, sharezp);
@@ -238,8 +220,6 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
#define MAXMIN64 0xffffffffUL
#endif
-#endif /* HAVE_ZPL */
-
/*
* Create special expldev for ZFS private use.
* Can't use standard expldev since it doesn't do
@@ -260,42 +240,18 @@ zfs_expldev(dev_t dev)
#endif
}
-/*
- * Special cmpldev for ZFS private use.
- * Can't use standard cmpldev since it takes
- * a long dev_t and compresses it to dev32_t in
- * LP64. We need to do a compaction of a long dev_t
- * to a dev32_t in ILP32.
- */
-dev_t
-zfs_cmpldev(uint64_t dev)
-{
-#ifndef _LP64
- minor_t minor = (minor_t)dev & MAXMIN64;
- major_t major = (major_t)(dev >> NBITSMINOR64) & MAXMAJ64;
-
- if (major > MAXMAJ32 || minor > MAXMIN32)
- return (NODEV32);
-
- return (((dev32_t)major << NBITSMINOR32) | minor);
-#else
- return (dev);
-#endif
-}
-
static void
-zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
+zfs_znode_sa_init(zfs_sb_t *zsb, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
- ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
+ ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zsb, zp->z_id)));
mutex_enter(&zp->z_lock);
ASSERT(zp->z_sa_hdl == NULL);
ASSERT(zp->z_acl_cached == NULL);
if (sa_hdl == NULL) {
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
+ VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
@@ -304,60 +260,119 @@ zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
- /*
- * Slap on VROOT if we are the root znode
- */
- if (zp->z_id == zfsvfs->z_root)
- ZTOV(zp)->v_flag |= VROOT;
-
mutex_exit(&zp->z_lock);
- vn_exists(ZTOV(zp));
}
void
zfs_znode_dmu_fini(znode_t *zp)
{
- ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
+ ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(ZTOZSB(zp), zp->z_id)) ||
zp->z_unlinked ||
- RW_WRITE_HELD(&zp->z_zfsvfs->z_teardown_inactive_lock));
+ RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
}
/*
- * Construct a new znode+inode and initialize.
+ * Called by new_inode() to allocate a new inode.
+ */
+int
+zfs_inode_alloc(struct super_block *sb, struct inode **ip)
+{
+ znode_t *zp;
+
+ zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
+ *ip = ZTOI(zp);
+
+ return (0);
+}
+
+/*
+ * Called in multiple places when an inode should be destroyed.
+ */
+void
+zfs_inode_destroy(struct inode *ip)
+{
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ZTOZSB(zp);
+
+ mutex_enter(&zsb->z_znodes_lock);
+ list_remove(&zsb->z_all_znodes, zp);
+ mutex_exit(&zsb->z_znodes_lock);
+
+ if (zp->z_acl_cached) {
+ zfs_acl_free(zp->z_acl_cached);
+ zp->z_acl_cached = NULL;
+ }
+
+ kmem_cache_free(znode_cache, zp);
+}
+
+static void
+zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip)
+{
+ uint64_t rdev;
+
+ switch (ip->i_mode & S_IFMT) {
+ case S_IFREG:
+ ip->i_op = &zpl_inode_operations;
+ ip->i_fop = &zpl_file_operations;
+ ip->i_mapping->a_ops = &zpl_address_space_operations;
+ break;
+
+ case S_IFDIR:
+ ip->i_op = &zpl_dir_inode_operations;
+ ip->i_fop = &zpl_dir_file_operations;
+ ITOZ(ip)->z_zn_prefetch = B_TRUE;
+ break;
+
+ case S_IFLNK:
+ ip->i_op = &zpl_symlink_inode_operations;
+ break;
+
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ VERIFY(sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zsb),
+ &rdev, sizeof (rdev)) == 0);
+ init_special_inode(ip, ip->i_mode, rdev);
+ ip->i_op = &zpl_special_inode_operations;
+ break;
+
+ default:
+ printk("ZFS: Invalid mode: 0x%x\n", ip->i_mode);
+ VERIFY(0);
+ }
+}
+
+/*
+ * Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
-zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
- dmu_object_type_t obj_type, sa_handle_t *hdl)
+zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
+ dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl)
{
znode_t *zp;
- struct inode *inode;
+ struct inode *ip;
uint64_t parent;
sa_bulk_attr_t bulk[9];
int count = 0;
- ASSERT(zfsvfs != NULL);
- ASSERT(zfsvfs->z_vfs != NULL);
- ASSERT(zfsvfs->z_vfs->mnt_sb != NULL);
+ ASSERT(zsb != NULL);
- inode = iget_locked(zfsvfs->z_vfs->mnt_sb, db->db_object);
- zp = ITOZ(inode);
+ ip = new_inode(zsb->z_sb);
+ if (ip == NULL)
+ return (NULL);
- ASSERT(inode->i_state & I_NEW);
+ zp = ITOZ(ip);
ASSERT(zp->z_dirlocks == NULL);
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
zp->z_moved = 0;
-
- /*
- * Defer setting z_zfsvfs until the znode is ready to be a candidate for
- * the zfs_znode_move() callback.
- */
zp->z_sa_hdl = NULL;
zp->z_unlinked = 0;
zp->z_atime_dirty = 0;
@@ -367,59 +382,48 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
- zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
-
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
- &zp->z_mode, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
- &zp->z_gen, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
- &zp->z_size, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
- &zp->z_links, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);
+
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &zp->z_mode, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
&parent, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
&zp->z_atime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
- &zp->z_uid, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
- &zp->z_gid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
- iput(inode);
- return (NULL);
- }
- inode->i_mode = (umode_t)zp->z_mode;
- if ((S_ISCHR(inode->i_mode)) || (S_ISBLK(inode->i_mode))) {
- uint64_t rdev;
- VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs),
- &rdev, sizeof (rdev)) == 0);
- inode->i_rdev = zfs_cmpldev(rdev);
+ goto error;
}
- /* zp->z_set_ops_inode() must be set in sb->alloc_inode() */
- ASSERT(zp->z_set_ops_inode != NULL);
- zp->z_set_ops_inode(inode);
- unlock_new_inode(inode);
+ ip->i_ino = obj;
+ ip->i_mode = zp->z_mode;
+ ip->i_mtime = ip->i_atime = ip->i_ctime = CURRENT_TIME_SEC;
+ zfs_inode_set_ops(zsb, ip);
+
+ if (insert_inode_locked(ip))
+ goto error;
- mutex_enter(&zfsvfs->z_znodes_lock);
- list_insert_tail(&zfsvfs->z_all_znodes, zp);
+ mutex_enter(&zsb->z_znodes_lock);
+ list_insert_tail(&zsb->z_all_znodes, zp);
membar_producer();
- /*
- * Everything else must be valid before assigning z_zfsvfs makes the
- * znode eligible for zfs_znode_move().
- */
- zp->z_zfsvfs = zfsvfs;
- mutex_exit(&zfsvfs->z_znodes_lock);
+ mutex_exit(&zsb->z_znodes_lock);
- VFS_HOLD(zfsvfs->z_vfs);
+ unlock_new_inode(ip);
return (zp);
+
+error:
+ unlock_new_inode(ip);
+ iput(ip);
+ return NULL;
}
/*
@@ -432,35 +436,35 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
void
zfs_inode_update(znode_t *zp)
{
- zfsvfs_t *zfsvfs;
- struct inode *inode;
+ zfs_sb_t *zsb;
+ struct inode *ip;
uint32_t blksize;
uint64_t atime[2], mtime[2], ctime[2];
ASSERT(zp != NULL);
- zfsvfs = zp->z_zfsvfs;
- inode = ZTOI(zp);
-
- sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs), &atime, 16);
- sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zfsvfs), &mtime, 16);
- sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zfsvfs), &ctime, 16);
-
- spin_lock(&inode->i_lock);
- inode->i_generation = zp->z_gen;
- inode->i_uid = zp->z_uid;
- inode->i_gid = zp->z_gid;
- inode->i_nlink = zp->z_links;
- inode->i_mode = zp->z_mode;
- inode->i_blkbits = SPA_MINBLOCKSHIFT;
+ zsb = ZTOZSB(zp);
+ ip = ZTOI(zp);
+
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zsb), &atime, 16);
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zsb), &mtime, 16);
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zsb), &ctime, 16);
+
+ spin_lock(&ip->i_lock);
+ ip->i_generation = zp->z_gen;
+ ip->i_uid = zp->z_uid;
+ ip->i_gid = zp->z_gid;
+ ip->i_nlink = zp->z_links;
+ ip->i_mode = zp->z_mode;
+ ip->i_blkbits = SPA_MINBLOCKSHIFT;
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize,
- (u_longlong_t *)&inode->i_blocks);
+ (u_longlong_t *)&ip->i_blocks);
- ZFS_TIME_DECODE(&inode->i_atime, atime);
- ZFS_TIME_DECODE(&inode->i_mtime, mtime);
- ZFS_TIME_DECODE(&inode->i_ctime, ctime);
+ ZFS_TIME_DECODE(&ip->i_atime, atime);
+ ZFS_TIME_DECODE(&ip->i_mtime, mtime);
+ ZFS_TIME_DECODE(&ip->i_ctime, ctime);
- i_size_write(inode, zp->z_size);
- spin_unlock(&inode->i_lock);
+ i_size_write(ip, zp->z_size);
+ spin_unlock(&ip->i_lock);
}
static uint64_t empty_xattr;
@@ -491,7 +495,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
uint64_t mode, size, links, parent, pflags;
uint64_t dzp_pflags = 0;
uint64_t rdev = 0;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
dmu_buf_t *db;
timestruc_t now;
uint64_t gen, obj;
@@ -503,9 +507,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 };
- ASSERT(vap && (vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
-
- if (zfsvfs->z_replay) {
+ if (zsb->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
@@ -515,7 +517,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
gen = dmu_tx_get_txg(tx);
}
- obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
+ obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE;
@@ -528,32 +530,32 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
- if (vap->va_type == VDIR) {
- if (zfsvfs->z_replay) {
- err = zap_create_claim_norm(zfsvfs->z_os, obj,
- zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
+ if (S_ISDIR(vap->va_mode)) {
+ if (zsb->z_replay) {
+ err = zap_create_claim_norm(zsb->z_os, obj,
+ zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, tx);
ASSERT3U(err, ==, 0);
} else {
- obj = zap_create_norm(zfsvfs->z_os,
- zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
+ obj = zap_create_norm(zsb->z_os,
+ zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, tx);
}
} else {
- if (zfsvfs->z_replay) {
- err = dmu_object_claim(zfsvfs->z_os, obj,
+ if (zsb->z_replay) {
+ err = dmu_object_claim(zsb->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, tx);
ASSERT3U(err, ==, 0);
} else {
- obj = dmu_object_alloc(zfsvfs->z_os,
+ obj = dmu_object_alloc(zsb->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, tx);
}
}
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
- VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
+ ZFS_OBJ_HOLD_ENTER(zsb, obj);
+ VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
@@ -572,21 +574,20 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
flag |= IS_XATTR;
}
- if (zfsvfs->z_use_fuids)
+ if (zsb->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
- if (vap->va_type == VDIR) {
+ if (S_ISDIR(vap->va_mode)) {
size = 2; /* contents ("." and "..") */
links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
} else {
size = links = 0;
}
- if (vap->va_type == VBLK || vap->va_type == VCHR) {
+ if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
rdev = zfs_expldev(vap->va_rdev);
- }
parent = dzp->z_id;
mode = acl_ids->z_mode;
@@ -603,20 +604,20 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
ZFS_TIME_ENCODE(&now, crtime);
ZFS_TIME_ENCODE(&now, ctime);
- if (vap->va_mask & AT_ATIME) {
+ if (vap->va_mask & ATTR_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, atime);
} else {
ZFS_TIME_ENCODE(&now, atime);
}
- if (vap->va_mask & AT_MTIME) {
+ if (vap->va_mask & ATTR_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
ZFS_TIME_ENCODE(&now, mtime);
}
/* Now add in all of the "SA" attributes */
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
+ VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
@@ -628,75 +629,75 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
sa_attrs = kmem_alloc(sizeof(sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
if (obj_type == DMU_OT_ZNODE) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
NULL, &atime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
NULL, &crtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
NULL, &gen, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
NULL, &mode, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
NULL, &size, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
NULL, &parent, 8);
} else {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
NULL, &mode, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
NULL, &size, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
NULL, &gen, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
- &acl_ids->z_fuid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
- &acl_ids->z_fgid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb),
+ NULL, &acl_ids->z_fuid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb),
+ NULL, &acl_ids->z_fgid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
NULL, &parent, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
NULL, &pflags, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
NULL, &atime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
NULL, &crtime, 16);
}
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL,
&empty_xattr, 8);
}
if (obj_type == DMU_OT_ZNODE ||
- (vap->va_type == VBLK || vap->va_type == VCHR)) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
+ (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb),
NULL, &rdev, 8);
}
if (obj_type == DMU_OT_ZNODE) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
NULL, &pflags, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL,
&acl_ids->z_fuid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL,
&acl_ids->z_fgid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad,
sizeof (uint64_t) * 4);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
@@ -706,8 +707,11 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
- *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
+ *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl);
ASSERT(*zpp != NULL);
+ ASSERT(dzp != NULL);
+ err = zpl_xattr_security_init(ZTOI(*zpp), ZTOI(dzp));
+ ASSERT3S(err, ==, 0);
} else {
/*
* If we are creating the root node, the "parent" we
@@ -721,118 +725,17 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = mode;
- if (vap->va_mask & AT_XVATTR)
- zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
-
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
err = zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx);
ASSERT3S(err, ==, 0);
}
kmem_free(sa_attrs, sizeof(sa_bulk_attr_t) * ZPL_END);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
-}
-
-/*
- * zfs_xvattr_set only updates the in-core attributes
- * it is assumed the caller will be doing an sa_bulk_update
- * to push the changes out
- */
-void
-zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
-{
-#ifdef HAVE_XVATTR
- xoptattr_t *xoap;
-
- xoap = xva_getxoptattr(xvap);
- ASSERT(xoap);
-
- if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
- uint64_t times[2];
- ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs),
- &times, sizeof (times), tx);
- XVA_SET_RTN(xvap, XAT_CREATETIME);
- }
- if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
- ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_READONLY);
- }
- if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
- ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_HIDDEN);
- }
- if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
- ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_SYSTEM);
- }
- if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
- ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_ARCHIVE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
- ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_IMMUTABLE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
- ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_NOUNLINK);
- }
- if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
- ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_APPENDONLY);
- }
- if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
- ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_NODUMP);
- }
- if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
- ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_OPAQUE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
- ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
- xoap->xoa_av_quarantined, zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
- }
- if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
- ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
- }
- if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
- zfs_sa_set_scanstamp(zp, xvap, tx);
- XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
- }
- if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
- ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_REPARSE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
- ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_OFFLINE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
- ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_SPARSE);
- }
-#endif /* HAVE_XVATTR */
+ ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
int
-zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
+zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
@@ -842,11 +745,11 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
*zpp = NULL;
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
- err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
+ err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
if (err) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
@@ -856,7 +759,7 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EINVAL);
}
@@ -878,19 +781,18 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
if (zp->z_unlinked) {
err = ENOENT;
} else {
- VN_HOLD(ZTOV(zp));
+ igrab(ZTOI(zp));
*zpp = zp;
err = 0;
}
sa_buf_rele(db, NULL);
mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
/*
- * Not found create new znode/vnode
- * but only if file exists.
+ * Not found create new znode/vnode but only if file exists.
*
* There is a small window where zfs_vget() could
* find this object while a file create is still in
@@ -899,21 +801,21 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
- zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
- doi.doi_bonus_type, NULL);
+ zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size,
+ doi.doi_bonus_type, obj_num, NULL);
if (zp == NULL) {
err = ENOENT;
} else {
*zpp = zp;
}
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
int
zfs_rezget(znode_t *zp)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
@@ -923,7 +825,7 @@ zfs_rezget(znode_t *zp)
int count = 0;
uint64_t gen;
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
@@ -933,9 +835,9 @@ zfs_rezget(znode_t *zp)
mutex_exit(&zp->z_acl_lock);
ASSERT(zp->z_sa_hdl == NULL);
- err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
+ err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
if (err) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
@@ -945,33 +847,33 @@ zfs_rezget(znode_t *zp)
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EINVAL);
}
- zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
+ zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);
/* reload cached values */
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
&gen, sizeof (gen));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
&zp->z_size, sizeof (zp->z_size));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
&zp->z_links, sizeof (zp->z_links));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
&zp->z_atime, sizeof (zp->z_atime));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
&zp->z_uid, sizeof (zp->z_uid));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL,
&zp->z_gid, sizeof (zp->z_gid));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
&mode, sizeof (mode));
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EIO);
}
@@ -979,14 +881,14 @@ zfs_rezget(znode_t *zp)
if (gen != zp->z_gen) {
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EIO);
}
zp->z_unlinked = (zp->z_links == 0);
zp->z_blksz = doi.doi_data_block_size;
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (0);
}
@@ -994,27 +896,25 @@ zfs_rezget(znode_t *zp)
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- objset_t *os = zfsvfs->z_os;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ objset_t *os = zsb->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
}
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
- zfs_znode_free(zp);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
void
zfs_zinactive(znode_t *zp)
{
- vnode_t *vp = ZTOV(zp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
ASSERT(zp->z_sa_hdl);
@@ -1022,29 +922,8 @@ zfs_zinactive(znode_t *zp)
/*
* Don't allow a zfs_zget() while were trying to release this znode
*/
- ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
-
+ ZFS_OBJ_HOLD_ENTER(zsb, z_id);
mutex_enter(&zp->z_lock);
- mutex_enter(&vp->v_lock);
- vp->v_count--;
- if (vp->v_count > 0 || vn_has_cached_data(vp)) {
- /*
- * If the hold count is greater than zero, somebody has
- * obtained a new reference on this znode while we were
- * processing it here, so we are done. If we still have
- * mapped pages then we are also done, since we don't
- * want to inactivate the znode until the pages get pushed.
- *
- * XXX - if vn_has_cached_data(vp) is true, but count == 0,
- * this seems like it would leave the znode hanging with
- * no chance to go inactive...
- */
- mutex_exit(&vp->v_lock);
- mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
- return;
- }
- mutex_exit(&vp->v_lock);
/*
* If this was the last reference to a file with no links,
@@ -1052,39 +931,14 @@ zfs_zinactive(znode_t *zp)
*/
if (zp->z_unlinked) {
mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
zfs_rmnode(zp);
return;
}
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
- zfs_znode_free(zp);
-}
-
-void
-zfs_znode_free(znode_t *zp)
-{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- vn_invalid(ZTOV(zp));
-
- ASSERT(ZTOV(zp)->v_count == 0);
-
- mutex_enter(&zfsvfs->z_znodes_lock);
- POINTER_INVALIDATE(&zp->z_zfsvfs);
- list_remove(&zfsvfs->z_all_znodes, zp);
- mutex_exit(&zfsvfs->z_znodes_lock);
-
- if (zp->z_acl_cached) {
- zfs_acl_free(zp->z_acl_cached);
- zp->z_acl_cached = NULL;
- }
-
- kmem_cache_free(znode_cache, zp);
-
- VFS_RELE(zfsvfs->z_vfs);
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
}
void
@@ -1102,21 +956,21 @@ zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
zp->z_atime_dirty = 1;
}
- if (flag & AT_ATIME) {
+ if (flag & ATTR_ATIME) {
ZFS_TIME_ENCODE(&now, zp->z_atime);
}
- if (flag & AT_MTIME) {
+ if (flag & ATTR_MTIME) {
ZFS_TIME_ENCODE(&now, mtime);
- if (zp->z_zfsvfs->z_use_fuids) {
+ if (ZTOZSB(zp)->z_use_fuids) {
zp->z_pflags |= (ZFS_ARCHIVE |
ZFS_AV_MODIFIED);
}
}
- if (flag & AT_CTIME) {
+ if (flag & ATTR_CTIME) {
ZFS_TIME_ENCODE(&now, ctime);
- if (zp->z_zfsvfs->z_use_fuids)
+ if (ZTOZSB(zp)->z_use_fuids)
zp->z_pflags |= ZFS_ARCHIVE;
}
}
@@ -1146,7 +1000,7 @@ zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
- error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
+ error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
size, 0, tx);
if (error == ENOTSUP)
@@ -1157,7 +1011,7 @@ zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
-#ifdef HAVE_ZPL
+#ifdef HAVE_MMAP
/*
* This is a dummy interface used when pvn_vplist_dirty() should *not*
* be calling back into the fs for a putpage(). E.g.: when truncating
@@ -1171,7 +1025,7 @@ zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
ASSERT(0);
return (0);
}
-#endif /* HAVE_ZPL */
+#endif /* HAVE_MMAP */
/*
* Increase the file length
@@ -1185,7 +1039,7 @@ zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
static int
zfs_extend(znode_t *zp, uint64_t end)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
uint64_t newblksz;
@@ -1204,19 +1058,19 @@ zfs_extend(znode_t *zp, uint64_t end)
return (0);
}
top:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
- (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
+ (!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
- if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
+ if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, SPA_MAXBLOCKSIZE);
} else {
- newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
+ newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
@@ -1240,7 +1094,7 @@ top:
zp->z_size = end;
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
+ VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_range_unlock(rl);
@@ -1257,13 +1111,13 @@ top:
* off - start of section to free.
* len - length of section to free.
*
- * RETURN: 0 if success
+ * RETURN: 0 if success
* error code if failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
rl_t *rl;
int error;
@@ -1283,7 +1137,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
if (off + len > zp->z_size)
len = zp->z_size - off;
- error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
+ error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
zfs_range_unlock(rl);
@@ -1296,16 +1150,13 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
- * RETURN: 0 if success
+ * RETURN: 0 if success
* error code if failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-#ifdef HAVE_ZPL
- vnode_t *vp = ZTOV(zp);
-#endif /* HAVE_ZPL */
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
int error;
@@ -1325,13 +1176,13 @@ zfs_trunc(znode_t *zp, uint64_t end)
return (0);
}
- error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, -1);
+ error = dmu_free_long_range(zsb->z_os, zp->z_id, end, -1);
if (error) {
zfs_range_unlock(rl);
return (error);
}
top:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
@@ -1347,44 +1198,18 @@ top:
}
zp->z_size = end;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
NULL, &zp->z_pflags, 8);
}
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
dmu_tx_commit(tx);
-#ifdef HAVE_ZPL
- /*
- * Clear any mapped pages in the truncated region. This has to
- * happen outside of the transaction to avoid the possibility of
- * a deadlock with someone trying to push a page that we are
- * about to invalidate.
- */
- if (vn_has_cached_data(vp)) {
- page_t *pp;
- uint64_t start = end & PAGEMASK;
- int poff = end & PAGEOFFSET;
-
- if (poff != 0 && (pp = page_lookup(vp, start, SE_SHARED))) {
- /*
- * We need to zero a partial page.
- */
- pagezero(pp, poff, PAGESIZE - poff);
- start += PAGESIZE;
- page_unlock(pp);
- }
- error = pvn_vplist_dirty(vp, start, zfs_no_putpage,
- B_INVAL | B_TRUNC, NULL);
- ASSERT(error == 0);
- }
-#endif /* HAVE_ZPL */
-
zfs_range_unlock(rl);
return (0);
@@ -1399,25 +1224,25 @@ top:
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
- * RETURN: 0 if success
+ * RETURN: 0 if success
* error code if failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
-#ifdef HAVE_ZPL
- vnode_t *vp = ZTOV(zp);
-#endif /* HAVE_ZPL */
+#ifdef HAVE_MANDLOCKS
+ struct inode *ip = ZTOI(zp);
+#endif /* HAVE_MANDLOCKS */
dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- zilog_t *zilog = zfsvfs->z_log;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ zilog_t *zilog = zsb->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode,
sizeof (mode))) != 0)
return (error);
@@ -1429,17 +1254,17 @@ zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
return (error);
}
-#ifdef HAVE_ZPL
+#ifdef HAVE_MANDLOCKS
/*
* Check for any locks in the region to be freed.
*/
- if (MANDLOCK(vp, (mode_t)mode)) {
+ if (MANDLOCK(ip, (mode_t)mode)) {
uint64_t length = (len ? len : zp->z_size - off);
- if (error = chklock(vp, FWRITE, off, length, flag, NULL))
+ if (error = chklock(ip, FWRITE, off, length, flag, NULL))
return (error);
}
-#endif /* HAVE_ZPL */
+#endif /* HAVE_MANDLOCKS */
if (len == 0) {
error = zfs_trunc(zp, off);
@@ -1451,7 +1276,7 @@ zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
if (error || !log)
return (error);
log:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
@@ -1465,9 +1290,9 @@ log:
return (error);
}
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
@@ -1589,7 +1414,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
ASSERT(error == 0);
dmu_buf_rele(db, FTAG);
-#endif /* HAVE_ZPL */
}
#endif /* _KERNEL */