diff options
author | Brian Behlendorf <[email protected]> | 2017-03-13 15:08:40 -0700 |
---|---|---|
committer | GitHub <[email protected]> | 2017-03-13 15:08:40 -0700 |
commit | 09ec770c2cfdb105e1d4a6e7470f2456d37c65e0 (patch) | |
tree | 6491e5bdda80532388091def591f7ee9e59b6271 | |
parent | ef1bdf363c021525c1db9630647dea73498c6bfd (diff) | |
parent | 1c2555ef926521671eaca918f0aaaa97dbef02af (diff) |
Align mount options handling and type/function names with OpenZFS
Refactor the temporary mount option in a way which minimizes
differences with upstream. Additionally, replace the zfs_sb_t
type with zfsvfs_t and rename several functions to be consistent
with the upstream names.
Reviewed-by: George Melikov <[email protected]>
Reviewed-by: Tim Chase <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #5876
-rw-r--r-- | cmd/ztest/ztest.c | 2 | ||||
-rw-r--r-- | include/sys/zfs_acl.h | 6 | ||||
-rw-r--r-- | include/sys/zfs_ctldir.h | 6 | ||||
-rw-r--r-- | include/sys/zfs_dir.h | 2 | ||||
-rw-r--r-- | include/sys/zfs_fuid.h | 19 | ||||
-rw-r--r-- | include/sys/zfs_rlock.h | 2 | ||||
-rw-r--r-- | include/sys/zfs_vfsops.h | 104 | ||||
-rw-r--r-- | include/sys/zfs_vnops.h | 1 | ||||
-rw-r--r-- | include/sys/zfs_znode.h | 26 | ||||
-rw-r--r-- | module/zfs/zfs_acl.c | 134 | ||||
-rw-r--r-- | module/zfs/zfs_ctldir.c | 152 | ||||
-rw-r--r-- | module/zfs/zfs_dir.c | 156 | ||||
-rw-r--r-- | module/zfs/zfs_fuid.c | 163 | ||||
-rw-r--r-- | module/zfs/zfs_ioctl.c | 154 | ||||
-rw-r--r-- | module/zfs/zfs_replay.c | 115 | ||||
-rw-r--r-- | module/zfs/zfs_sa.c | 85 | ||||
-rw-r--r-- | module/zfs/zfs_vfsops.c | 1028 | ||||
-rw-r--r-- | module/zfs/zfs_vnops.c | 769 | ||||
-rw-r--r-- | module/zfs/zfs_znode.c | 356 | ||||
-rw-r--r-- | module/zfs/zpl_ctldir.c | 48 | ||||
-rw-r--r-- | module/zfs/zpl_file.c | 16 | ||||
-rw-r--r-- | module/zfs/zpl_inode.c | 26 | ||||
-rw-r--r-- | module/zfs/zpl_super.c | 235 | ||||
-rw-r--r-- | module/zfs/zpl_xattr.c | 32 |
24 files changed, 1810 insertions, 1827 deletions
diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index 0ef834403..ea1a3030b 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -2602,7 +2602,7 @@ ztest_zil_remount(ztest_ds_t *zd, uint64_t id) mutex_enter(&zd->zd_dirobj_lock); (void) rw_wrlock(&zd->zd_zilog_lock); - /* zfs_sb_teardown() */ + /* zfsvfs_teardown() */ zil_close(zd->zd_zilog); /* zfsvfs_setup() */ diff --git a/include/sys/zfs_acl.h b/include/sys/zfs_acl.h index 2c51f096e..2572fee86 100644 --- a/include/sys/zfs_acl.h +++ b/include/sys/zfs_acl.h @@ -202,13 +202,13 @@ typedef struct zfs_acl_ids { #define ZFS_ACL_PASSTHROUGH_X 5 struct znode; -struct zfs_sb; +struct zfsvfs; #ifdef _KERNEL int zfs_acl_ids_create(struct znode *, int, vattr_t *, cred_t *, vsecattr_t *, zfs_acl_ids_t *); void zfs_acl_ids_free(zfs_acl_ids_t *); -boolean_t zfs_acl_ids_overquota(struct zfs_sb *, zfs_acl_ids_t *); +boolean_t zfs_acl_ids_overquota(struct zfsvfs *, zfs_acl_ids_t *); int zfs_getacl(struct znode *, vsecattr_t *, boolean_t, cred_t *); int zfs_setacl(struct znode *, vsecattr_t *, boolean_t, cred_t *); void zfs_acl_rele(void *); @@ -225,7 +225,7 @@ int zfs_zaccess_delete(struct znode *, struct znode *, cred_t *); int zfs_zaccess_rename(struct znode *, struct znode *, struct znode *, struct znode *, cred_t *cr); void zfs_acl_free(zfs_acl_t *); -int zfs_vsec_2_aclp(struct zfs_sb *, umode_t, vsecattr_t *, cred_t *, +int zfs_vsec_2_aclp(struct zfsvfs *, umode_t, vsecattr_t *, cred_t *, struct zfs_fuid_info **, zfs_acl_t **); int zfs_aclset_common(struct znode *, zfs_acl_t *, cred_t *, dmu_tx_t *); uint64_t zfs_external_acl(struct znode *); diff --git a/include/sys/zfs_ctldir.h b/include/sys/zfs_ctldir.h index fb85b17f5..51933bc4f 100644 --- a/include/sys/zfs_ctldir.h +++ b/include/sys/zfs_ctldir.h @@ -50,9 +50,9 @@ extern int zfs_expire_snapshot; /* zfsctl generic functions */ -extern int zfsctl_create(zfs_sb_t *zsb); -extern void zfsctl_destroy(zfs_sb_t *zsb); -extern struct inode *zfsctl_root(znode_t *zp); +extern int zfsctl_create(zfsvfs_t *); +extern void zfsctl_destroy(zfsvfs_t *); +extern struct inode *zfsctl_root(znode_t *); extern void zfsctl_init(void); extern void zfsctl_fini(void); extern boolean_t zfsctl_is_node(struct inode *ip); diff --git a/include/sys/zfs_dir.h b/include/sys/zfs_dir.h index efda1236a..9ce3accfc 100644 --- a/include/sys/zfs_dir.h +++ b/include/sys/zfs_dir.h @@ -63,7 +63,7 @@ extern void zfs_rmnode(znode_t *); extern void zfs_dl_name_switch(zfs_dirlock_t *dl, char *new, char **old); extern boolean_t zfs_dirempty(znode_t *); extern void zfs_unlinked_add(znode_t *, dmu_tx_t *); -extern void zfs_unlinked_drain(zfs_sb_t *); +extern void zfs_unlinked_drain(zfsvfs_t *zfsvfs); extern int zfs_sticky_remove_access(znode_t *, znode_t *, cred_t *cr); extern int zfs_get_xattrdir(znode_t *, struct inode **, cred_t *, int); extern int zfs_make_xattrdir(znode_t *, vattr_t *, struct inode **, cred_t *); diff --git a/include/sys/zfs_fuid.h b/include/sys/zfs_fuid.h index deaebcc82..0feb3ce4b 100644 --- a/include/sys/zfs_fuid.h +++ b/include/sys/zfs_fuid.h @@ -33,7 +33,6 @@ #include <sys/zfs_vfsops.h> #endif #include <sys/avl.h> -#include <sys/list.h> #ifdef __cplusplus extern "C" { @@ -100,24 +99,24 @@ typedef struct zfs_fuid_info { #ifdef _KERNEL struct znode; -extern uid_t zfs_fuid_map_id(zfs_sb_t *, uint64_t, cred_t *, zfs_fuid_type_t); +extern uid_t zfs_fuid_map_id(zfsvfs_t *, uint64_t, cred_t *, zfs_fuid_type_t); extern void zfs_fuid_node_add(zfs_fuid_info_t **, const char *, uint32_t, uint64_t, uint64_t, zfs_fuid_type_t); -extern void zfs_fuid_destroy(zfs_sb_t *); -extern uint64_t zfs_fuid_create_cred(zfs_sb_t *, zfs_fuid_type_t, +extern void zfs_fuid_destroy(zfsvfs_t *); +extern uint64_t zfs_fuid_create_cred(zfsvfs_t *, zfs_fuid_type_t, cred_t *, zfs_fuid_info_t **); -extern uint64_t zfs_fuid_create(zfs_sb_t *, uint64_t, cred_t *, zfs_fuid_type_t, +extern uint64_t zfs_fuid_create(zfsvfs_t *, uint64_t, cred_t *, zfs_fuid_type_t, zfs_fuid_info_t **); extern void zfs_fuid_map_ids(struct znode *zp, cred_t *cr, uid_t *uid, uid_t *gid); extern zfs_fuid_info_t *zfs_fuid_info_alloc(void); extern void zfs_fuid_info_free(zfs_fuid_info_t *); -extern boolean_t zfs_groupmember(zfs_sb_t *, uint64_t, cred_t *); -void zfs_fuid_sync(zfs_sb_t *, dmu_tx_t *); -extern int zfs_fuid_find_by_domain(zfs_sb_t *, const char *domain, +extern boolean_t zfs_groupmember(zfsvfs_t *, uint64_t, cred_t *); +void zfs_fuid_sync(zfsvfs_t *, dmu_tx_t *); +extern int zfs_fuid_find_by_domain(zfsvfs_t *, const char *domain, char **retdomain, boolean_t addok); -extern const char *zfs_fuid_find_by_idx(zfs_sb_t *zsb, uint32_t idx); -extern void zfs_fuid_txhold(zfs_sb_t *zsb, dmu_tx_t *tx); +extern const char *zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx); +extern void zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx); #endif char *zfs_fuid_idx_domain(avl_tree_t *, uint32_t); diff --git a/include/sys/zfs_rlock.h b/include/sys/zfs_rlock.h index 5373f0d57..8483b4e8b 100644 --- a/include/sys/zfs_rlock.h +++ b/include/sys/zfs_rlock.h @@ -50,7 +50,7 @@ typedef struct zfs_rlock { avl_tree_t zr_avl; /* avl tree of range locks */ uint64_t *zr_size; /* points to znode->z_size */ uint_t *zr_blksz; /* points to znode->z_blksz */ - uint64_t *zr_max_blksz; /* points to zsb->z_max_blksz */ + uint64_t *zr_max_blksz; /* points to zfsvfs->z_max_blksz */ } zfs_rlock_t; typedef struct rl { diff --git a/include/sys/zfs_vfsops.h b/include/sys/zfs_vfsops.h index e38cadc33..aeecc472d 100644 --- a/include/sys/zfs_vfsops.h +++ b/include/sys/zfs_vfsops.h @@ -38,36 +38,46 @@ extern "C" { #endif -struct zfs_sb; +typedef struct zfsvfs zfsvfs_t; struct znode; -typedef struct zfs_mntopts { - char *z_osname; /* Objset name */ - char *z_mntpoint; /* Primary mount point */ - uint64_t z_xattr; - boolean_t z_readonly; - boolean_t z_do_readonly; - boolean_t z_setuid; - boolean_t z_do_setuid; - boolean_t z_exec; - boolean_t z_do_exec; - boolean_t z_devices; - boolean_t z_do_devices; - boolean_t z_do_xattr; - boolean_t z_atime; - boolean_t z_do_atime; - boolean_t z_relatime; - boolean_t z_do_relatime; - boolean_t z_nbmand; - boolean_t z_do_nbmand; -} zfs_mntopts_t; - -typedef struct zfs_sb { +/* + * This structure emulates the vfs_t from other platforms. It's purpose + * is to faciliate the handling of mount options and minimize structural + * differences between the platforms. + */ +typedef struct vfs { + struct zfsvfs *vfs_data; + char *vfs_mntpoint; /* Primary mount point */ + uint64_t vfs_xattr; + boolean_t vfs_readonly; + boolean_t vfs_do_readonly; + boolean_t vfs_setuid; + boolean_t vfs_do_setuid; + boolean_t vfs_exec; + boolean_t vfs_do_exec; + boolean_t vfs_devices; + boolean_t vfs_do_devices; + boolean_t vfs_do_xattr; + boolean_t vfs_atime; + boolean_t vfs_do_atime; + boolean_t vfs_relatime; + boolean_t vfs_do_relatime; + boolean_t vfs_nbmand; + boolean_t vfs_do_nbmand; +} vfs_t; + +typedef struct zfs_mnt { + const char *mnt_osname; /* Objset name */ + char *mnt_data; /* Raw mount options */ +} zfs_mnt_t; + +struct zfsvfs { + vfs_t *z_vfs; /* generic fs struct */ struct super_block *z_sb; /* generic super_block */ struct backing_dev_info z_bdi; /* generic backing dev info */ - struct zfs_sb *z_parent; /* parent fs */ + struct zfsvfs *z_parent; /* parent fs */ objset_t *z_os; /* objset reference */ - zfs_mntopts_t *z_mntopts; /* passed mount options */ uint64_t z_flags; /* super_block flags */ uint64_t z_root; /* id of root znode */ uint64_t z_unlinkedobj; /* id of unlinked zapobj */ @@ -117,7 +127,7 @@ typedef struct zfs_sb { uint64_t z_hold_size; /* znode hold array size */ avl_tree_t *z_hold_trees; /* znode hold trees */ kmutex_t *z_hold_locks; /* znode hold locks */ -} zfs_sb_t; +}; #define ZSB_XATTR 0x0001 /* Enable user xattrs */ @@ -178,44 +188,34 @@ typedef struct zfid_long { extern uint_t zfs_fsyncer_key; -extern int zfs_suspend_fs(zfs_sb_t *zsb); -extern int zfs_resume_fs(zfs_sb_t *zsb, struct dsl_dataset *ds); -extern int zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type, +extern int zfs_suspend_fs(zfsvfs_t *zfsvfs); +extern int zfs_resume_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds); +extern int zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type, const char *domain, uint64_t rid, uint64_t *valuep); -extern int zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, +extern int zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type, uint64_t *cookiep, void *vbuf, uint64_t *bufsizep); -extern int zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type, +extern int zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type, const char *domain, uint64_t rid, uint64_t quota); -extern boolean_t zfs_owner_overquota(zfs_sb_t *zsb, struct znode *, +extern boolean_t zfs_owner_overquota(zfsvfs_t *zfsvfs, struct znode *, boolean_t isgroup); -extern boolean_t zfs_fuid_overquota(zfs_sb_t *zsb, boolean_t isgroup, +extern boolean_t zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid); -extern boolean_t zfs_fuid_overobjquota(zfs_sb_t *zsb, boolean_t isgroup, +extern boolean_t zfs_fuid_overobjquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid); -extern int zfs_set_version(zfs_sb_t *zsb, uint64_t newvers); -extern int zfs_get_zplprop(objset_t *os, zfs_prop_t prop, - uint64_t *value); -extern zfs_mntopts_t *zfs_mntopts_alloc(void); -extern void zfs_mntopts_free(zfs_mntopts_t *zmo); -extern int zfs_sb_create(const char *name, zfs_mntopts_t *zmo, - zfs_sb_t **zsbp); -extern int zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting); -extern void zfs_sb_free(zfs_sb_t *zsb); -extern int zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, - int *objects); -extern int zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting); +extern int zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers); +extern int zfsvfs_create(const char *name, zfsvfs_t **zfvp); +extern void zfsvfs_free(zfsvfs_t *zfsvfs); extern int zfs_check_global_label(const char *dsname, const char *hexsl); -extern boolean_t zfs_is_readonly(zfs_sb_t *zsb); -extern int zfs_register_callbacks(zfs_sb_t *zsb); -extern void zfs_unregister_callbacks(zfs_sb_t *zsb); -extern int zfs_domount(struct super_block *sb, zfs_mntopts_t *zmo, int silent); +extern boolean_t zfs_is_readonly(zfsvfs_t *zfsvfs); +extern int zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent); extern void zfs_preumount(struct super_block *sb); extern int zfs_umount(struct super_block *sb); -extern int zfs_remount(struct super_block *sb, int *flags, zfs_mntopts_t *zmo); -extern int zfs_root(zfs_sb_t *zsb, struct inode **ipp); +extern int zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm); extern int zfs_statvfs(struct dentry *dentry, struct kstatfs *statp); extern int zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp); +extern int zfs_prune(struct super_block *sb, unsigned long nr_to_scan, + int *objects); #ifdef __cplusplus } diff --git a/include/sys/zfs_vnops.h b/include/sys/zfs_vnops.h index c86fec18d..f2f4d13f4 100644 --- a/include/sys/zfs_vnops.h +++ b/include/sys/zfs_vnops.h @@ -63,7 +63,6 @@ extern int zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm, cred_t *cr, int flags); extern int zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link, struct inode **ipp, cred_t *cr, int flags); -extern int zfs_follow_link(struct dentry *dentry, struct nameidata *nd); extern int zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr); extern int zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr, int flags); diff --git a/include/sys/zfs_znode.h b/include/sys/zfs_znode.h index a5ecb2842..c292f0373 100644 --- a/include/sys/zfs_znode.h +++ b/include/sys/zfs_znode.h @@ -233,25 +233,25 @@ typedef struct znode_hold { */ #define ZTOI(znode) (&((znode)->z_inode)) #define ITOZ(inode) (container_of((inode), znode_t, z_inode)) -#define ZTOZSB(znode) ((zfs_sb_t *)(ZTOI(znode)->i_sb->s_fs_info)) -#define ITOZSB(inode) ((zfs_sb_t *)((inode)->i_sb->s_fs_info)) +#define ZTOZSB(znode) ((zfsvfs_t *)(ZTOI(znode)->i_sb->s_fs_info)) +#define ITOZSB(inode) ((zfsvfs_t *)((inode)->i_sb->s_fs_info)) #define S_ISDEV(mode) (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode)) /* Called on entry to each ZFS vnode and vfs operation */ -#define ZFS_ENTER(zsb) \ +#define ZFS_ENTER(zfsvfs) \ { \ - rrm_enter_read(&(zsb)->z_teardown_lock, FTAG); \ - if ((zsb)->z_unmounted) { \ - ZFS_EXIT(zsb); \ + rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG); \ + if ((zfsvfs)->z_unmounted) { \ + ZFS_EXIT(zfsvfs); \ return (EIO); \ } \ } /* Must be called before exiting the vop */ -#define ZFS_EXIT(zsb) \ +#define ZFS_EXIT(zfsvfs) \ { \ - rrm_exit(&(zsb)->z_teardown_lock, FTAG); \ + rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG); \ } /* Verifies the znode is valid */ @@ -266,7 +266,7 @@ typedef struct znode_hold { */ #define ZFS_OBJ_MTX_SZ 64 #define ZFS_OBJ_MTX_MAX (1024 * 1024) -#define ZFS_OBJ_HASH(zsb, obj) ((obj) & ((zsb->z_hold_size) - 1)) +#define ZFS_OBJ_HASH(zfsvfs, obj) ((obj) & ((zfsvfs->z_hold_size) - 1)) extern unsigned int zfs_object_mutex_size; @@ -291,7 +291,7 @@ extern unsigned int zfs_object_mutex_size; #define STATE_CHANGED (ATTR_CTIME) #define CONTENT_MODIFIED (ATTR_MTIME | ATTR_CTIME) -extern int zfs_init_fs(zfs_sb_t *, znode_t **); +extern int zfs_init_fs(zfsvfs_t *, znode_t **); extern void zfs_set_dataprop(objset_t *); extern void zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *, dmu_tx_t *tx); @@ -302,7 +302,7 @@ extern int zfs_freesp(znode_t *, uint64_t, uint64_t, int, boolean_t); extern void zfs_znode_init(void); extern void zfs_znode_fini(void); extern int zfs_znode_hold_compare(const void *, const void *); -extern int zfs_zget(zfs_sb_t *, uint64_t, znode_t **); +extern int zfs_zget(zfsvfs_t *, uint64_t, znode_t **); extern int zfs_rezget(znode_t *); extern void zfs_zinactive(znode_t *); extern void zfs_znode_delete(znode_t *, dmu_tx_t *); @@ -343,8 +343,8 @@ extern void zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype, extern void zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp, vsecattr_t *vsecp, zfs_fuid_info_t *fuidp); extern void zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx); -extern void zfs_upgrade(zfs_sb_t *zsb, dmu_tx_t *tx); -extern int zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx); +extern void zfs_upgrade(zfsvfs_t *zfsvfs, dmu_tx_t *tx); +extern int zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx); #if defined(HAVE_UIO_RW) extern caddr_t zfs_map_page(page_t *, enum seg_rw); diff --git a/module/zfs/zfs_acl.c b/module/zfs/zfs_acl.c index defb8f448..0e7203ea6 100644 --- a/module/zfs/zfs_acl.c +++ b/module/zfs/zfs_acl.c @@ -371,23 +371,23 @@ static int zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount, zfs_acl_phys_t *aclphys) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); uint64_t acl_count; int size; int error; ASSERT(MUTEX_HELD(&zp->z_acl_lock)); if (zp->z_is_sa) { - if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zsb), + if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs), &size)) != 0) return (error); *aclsize = size; - if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zsb), + if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs), &acl_count, sizeof (acl_count))) != 0) return (error); *aclcount = acl_count; } else { - if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zsb), + if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs), aclphys, sizeof (*aclphys))) != 0) return (error); @@ -651,7 +651,7 @@ zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt, * ACE FUIDs will be created later. */ int -zfs_copy_ace_2_fuid(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *aclp, +zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *aclp, void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size, zfs_fuid_info_t **fuidp, cred_t *cr) { @@ -669,7 +669,7 @@ zfs_copy_ace_2_fuid(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *aclp, entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS; if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP && entry_type != ACE_EVERYONE) { - aceptr->z_fuid = zfs_fuid_create(zsb, acep->a_who, + aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who, cr, (entry_type == 0) ? ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp); } @@ -713,7 +713,7 @@ zfs_copy_ace_2_fuid(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *aclp, * Copy ZFS ACEs to fixed size ace_t layout */ static void -zfs_copy_fuid_2_ace(zfs_sb_t *zsb, zfs_acl_t *aclp, cred_t *cr, +zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr, void *datap, int filter) { uint64_t who; @@ -756,7 +756,7 @@ zfs_copy_fuid_2_ace(zfs_sb_t *zsb, zfs_acl_t *aclp, cred_t *cr, if ((entry_type != ACE_OWNER && entry_type != OWNING_GROUP && entry_type != ACE_EVERYONE)) { - acep->a_who = zfs_fuid_map_id(zsb, who, + acep->a_who = zfs_fuid_map_id(zfsvfs, who, cr, (entry_type & ACE_IDENTIFIER_GROUP) ? ZFS_ACE_GROUP : ZFS_ACE_USER); } else { @@ -1316,7 +1316,7 @@ int zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) { int error; - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); dmu_object_type_t otype; zfs_acl_locator_cb_t locate = { 0 }; uint64_t mode; @@ -1330,11 +1330,11 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) KUID_TO_SUID(ZTOI(zp)->i_uid), KGID_TO_SGID(ZTOI(zp)->i_gid)); zp->z_mode = ZTOI(zp)->i_mode = mode; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, sizeof (mode)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, sizeof (ctime)); if (zp->z_acl_cached) { @@ -1345,11 +1345,11 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) /* * Upgrade needed? */ - if (!zsb->z_use_fuids) { + if (!zfsvfs->z_use_fuids) { otype = DMU_OT_OLDACL; } else { if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) && - (zsb->z_version >= ZPL_VERSION_FUID)) + (zfsvfs->z_version >= ZPL_VERSION_FUID)) zfs_acl_xform(zp, aclp, cr); ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID); otype = DMU_OT_ACL; @@ -1362,9 +1362,9 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */ locate.cb_aclp = aclp; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs), zfs_acl_data_locator, &locate, aclp->z_acl_bytes); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL, &aclp->z_acl_count, sizeof (uint64_t)); } else { /* Painful legacy way */ zfs_acl_node_t *aclnode; @@ -1372,7 +1372,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) zfs_acl_phys_t acl_phys; uint64_t aoid; - if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zsb), + if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs), &acl_phys, sizeof (acl_phys))) != 0) return (error); @@ -1386,20 +1386,20 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) */ if (aoid && aclp->z_version != acl_phys.z_acl_version) { - error = dmu_object_free(zsb->z_os, aoid, tx); + error = dmu_object_free(zfsvfs->z_os, aoid, tx); if (error) return (error); aoid = 0; } if (aoid == 0) { - aoid = dmu_object_alloc(zsb->z_os, + aoid = dmu_object_alloc(zfsvfs->z_os, otype, aclp->z_acl_bytes, otype == DMU_OT_ACL ? DMU_OT_SYSACL : DMU_OT_NONE, otype == DMU_OT_ACL ? DN_OLD_MAX_BONUSLEN : 0, tx); } else { - (void) dmu_object_set_blocksize(zsb->z_os, + (void) dmu_object_set_blocksize(zfsvfs->z_os, aoid, aclp->z_acl_bytes, 0, tx); } acl_phys.z_acl_extern_obj = aoid; @@ -1407,7 +1407,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) aclnode = list_next(&aclp->z_acl, aclnode)) { if (aclnode->z_ace_count == 0) continue; - dmu_write(zsb->z_os, aoid, off, + dmu_write(zfsvfs->z_os, aoid, off, aclnode->z_size, aclnode->z_acldata, tx); off += aclnode->z_size; } @@ -1417,7 +1417,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) * Migrating back embedded? */ if (acl_phys.z_acl_extern_obj) { - error = dmu_object_free(zsb->z_os, + error = dmu_object_free(zfsvfs->z_os, acl_phys.z_acl_extern_obj, tx); if (error) return (error); @@ -1446,7 +1446,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) } acl_phys.z_acl_version = aclp->z_version; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL, &acl_phys, sizeof (acl_phys)); } @@ -1465,7 +1465,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) } static void -zfs_acl_chmod(zfs_sb_t *zsb, uint64_t mode, zfs_acl_t *aclp) +zfs_acl_chmod(zfsvfs_t *zfsvfs, uint64_t mode, zfs_acl_t *aclp) { void *acep = NULL; uint64_t who; @@ -1538,7 +1538,7 @@ zfs_acl_chmod(zfs_sb_t *zsb, uint64_t mode, zfs_acl_t *aclp) * Limit permissions to be no greater than * group permissions */ - if (zsb->z_acl_inherit == ZFS_ACL_RESTRICTED) { + if (zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) { if (!(mode & S_IRGRP)) access_mask &= ~ACE_READ_DATA; if (!(mode & S_IWGRP)) @@ -1590,11 +1590,11 @@ zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode) * strip off write_owner and write_acl */ static void -zfs_restricted_update(zfs_sb_t *zsb, zfs_acl_t *aclp, void *acep) +zfs_restricted_update(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, void *acep) { uint32_t mask = aclp->z_ops->ace_mask_get(acep); - if ((zsb->z_acl_inherit == ZFS_ACL_RESTRICTED) && + if ((zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) && (aclp->z_ops->ace_type_get(acep) == ALLOW)) { mask &= ~RESTRICTED_CLEAR; aclp->z_ops->ace_mask_set(acep, mask); @@ -1621,7 +1621,7 @@ zfs_ace_can_use(umode_t obj_mode, uint16_t acep_flags) * inherit inheritable ACEs from parent */ static zfs_acl_t * -zfs_acl_inherit(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *paclp, +zfs_acl_inherit(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *paclp, uint64_t mode, boolean_t *need_chmod) { void *pacep; @@ -1639,16 +1639,16 @@ zfs_acl_inherit(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *paclp, boolean_t passthrough, passthrough_x, noallow; passthrough_x = - zsb->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X; + zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X; passthrough = passthrough_x || - zsb->z_acl_inherit == ZFS_ACL_PASSTHROUGH; + zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH; noallow = - zsb->z_acl_inherit == ZFS_ACL_NOALLOW; + zfsvfs->z_acl_inherit == ZFS_ACL_NOALLOW; *need_chmod = B_TRUE; pacep = NULL; aclp = zfs_acl_alloc(paclp->z_version); - if (zsb->z_acl_inherit == ZFS_ACL_DISCARD || S_ISLNK(obj_mode)) + if (zfsvfs->z_acl_inherit == ZFS_ACL_DISCARD || S_ISLNK(obj_mode)) return (aclp); while ((pacep = zfs_acl_next_ace(paclp, pacep, &who, &access_mask, &iflags, &type))) { @@ -1712,7 +1712,7 @@ zfs_acl_inherit(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *paclp, newflags &= ~ALL_INHERIT; aclp->z_ops->ace_flags_set(acep, newflags|ACE_INHERITED_ACE); - zfs_restricted_update(zsb, aclp, acep); + zfs_restricted_update(zfsvfs, aclp, acep); continue; } @@ -1745,7 +1745,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids) { int error; - zfs_sb_t *zsb = ZTOZSB(dzp); + zfsvfs_t *zfsvfs = ZTOZSB(dzp); zfs_acl_t *paclp; gid_t gid = vap->va_gid; boolean_t need_chmod = B_TRUE; @@ -1755,7 +1755,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, acl_ids->z_mode = vap->va_mode; if (vsecp) - if ((error = zfs_vsec_2_aclp(zsb, vap->va_mode, vsecp, + if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_mode, vsecp, cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0) return (error); @@ -1765,19 +1765,19 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, /* * Determine uid and gid. */ - if ((flag & IS_ROOT_NODE) || zsb->z_replay || + if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay || ((flag & IS_XATTR) && (S_ISDIR(vap->va_mode)))) { - acl_ids->z_fuid = zfs_fuid_create(zsb, (uint64_t)vap->va_uid, + acl_ids->z_fuid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_uid, cr, ZFS_OWNER, &acl_ids->z_fuidp); - acl_ids->z_fgid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid, + acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid, cr, ZFS_GROUP, &acl_ids->z_fuidp); gid = vap->va_gid; } else { - acl_ids->z_fuid = zfs_fuid_create_cred(zsb, ZFS_OWNER, + acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER, cr, &acl_ids->z_fuidp); acl_ids->z_fgid = 0; if (vap->va_mask & AT_GID) { - acl_ids->z_fgid = zfs_fuid_create(zsb, + acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid, cr, ZFS_GROUP, &acl_ids->z_fuidp); gid = vap->va_gid; @@ -1793,13 +1793,13 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, acl_ids->z_fgid = KGID_TO_SGID( ZTOI(dzp)->i_gid); - gid = zfs_fuid_map_id(zsb, acl_ids->z_fgid, + gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid, cr, ZFS_GROUP); - if (zsb->z_use_fuids && + if (zfsvfs->z_use_fuids && IS_EPHEMERAL(acl_ids->z_fgid)) { domain = zfs_fuid_idx_domain( - &zsb->z_fuid_idx, + &zfsvfs->z_fuid_idx, FUID_INDEX(acl_ids->z_fgid)); rid = FUID_RID(acl_ids->z_fgid); zfs_fuid_node_add(&acl_ids->z_fuidp, @@ -1808,7 +1808,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, acl_ids->z_fgid, ZFS_GROUP); } } else { - acl_ids->z_fgid = zfs_fuid_create_cred(zsb, + acl_ids->z_fgid = zfs_fuid_create_cred(zfsvfs, ZFS_GROUP, cr, &acl_ids->z_fuidp); gid = crgetgid(cr); } @@ -1840,7 +1840,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, !(dzp->z_pflags & ZFS_XATTR)) { VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE, &paclp, B_FALSE)); - acl_ids->z_aclp = zfs_acl_inherit(zsb, + acl_ids->z_aclp = zfs_acl_inherit(zfsvfs, vap->va_mode, paclp, acl_ids->z_mode, &need_chmod); inherited = B_TRUE; } else { @@ -1853,7 +1853,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, if (need_chmod) { acl_ids->z_aclp->z_hints |= S_ISDIR(vap->va_mode) ? ZFS_ACL_AUTO_INHERIT : 0; - zfs_acl_chmod(zsb, acl_ids->z_mode, acl_ids->z_aclp); + zfs_acl_chmod(zfsvfs, acl_ids->z_mode, acl_ids->z_aclp); } } @@ -1883,12 +1883,12 @@ zfs_acl_ids_free(zfs_acl_ids_t *acl_ids) } boolean_t -zfs_acl_ids_overquota(zfs_sb_t *zsb, zfs_acl_ids_t *acl_ids) +zfs_acl_ids_overquota(zfsvfs_t *zfsvfs, zfs_acl_ids_t *acl_ids) { - return (zfs_fuid_overquota(zsb, B_FALSE, acl_ids->z_fuid) || - zfs_fuid_overquota(zsb, B_TRUE, acl_ids->z_fgid) || - zfs_fuid_overobjquota(zsb, B_FALSE, acl_ids->z_fuid) || - zfs_fuid_overobjquota(zsb, B_TRUE, acl_ids->z_fgid)); + return (zfs_fuid_overquota(zfsvfs, B_FALSE, acl_ids->z_fuid) || + zfs_fuid_overquota(zfsvfs, B_TRUE, acl_ids->z_fgid) || + zfs_fuid_overobjquota(zfsvfs, B_FALSE, acl_ids->z_fuid) || + zfs_fuid_overobjquota(zfsvfs, B_TRUE, acl_ids->z_fgid)); } /* @@ -1992,7 +1992,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) } int -zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode, +zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_mode, vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp) { zfs_acl_t *aclp; @@ -2003,7 +2003,7 @@ zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode, if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0) return (SET_ERROR(EINVAL)); - aclp = zfs_acl_alloc(zfs_acl_version(zsb->z_version)); + aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version)); aclp->z_hints = 0; aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t)); @@ -2016,7 +2016,7 @@ zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode, return (error); } } else { - if ((error = zfs_copy_ace_2_fuid(zsb, obj_mode, aclp, + if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_mode, aclp, vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt, &aclnode->z_size, fuidp, cr)) != 0) { zfs_acl_free(aclp); @@ -2052,8 +2052,8 @@ zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode, int zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) { - zfs_sb_t *zsb = ZTOZSB(zp); - zilog_t *zilog = zsb->z_log; + zfsvfs_t *zfsvfs = ZTOZSB(zp); + zilog_t *zilog = zfsvfs->z_log; ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT); dmu_tx_t *tx; int error; @@ -2071,7 +2071,7 @@ zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr))) return (error); - error = zfs_vsec_2_aclp(zsb, ZTOI(zp)->i_mode, vsecp, cr, &fuidp, + error = zfs_vsec_2_aclp(zfsvfs, ZTOI(zp)->i_mode, vsecp, cr, &fuidp, &aclp); if (error) return (error); @@ -2088,13 +2088,13 @@ top: mutex_enter(&zp->z_acl_lock); mutex_enter(&zp->z_lock); - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); - fuid_dirtied = zsb->z_fuid_dirty; + fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) - zfs_fuid_txhold(zsb, tx); + zfs_fuid_txhold(zfsvfs, tx); /* * If old version and ACL won't fit in bonus and we aren't @@ -2102,7 +2102,7 @@ top: */ if ((acl_obj = zfs_external_acl(zp)) != 0) { - if (zsb->z_version >= ZPL_VERSION_FUID && + if (zfsvfs->z_version >= ZPL_VERSION_FUID && zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) { dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); @@ -2137,7 +2137,7 @@ top: zp->z_acl_cached = aclp; if (fuid_dirtied) - zfs_fuid_sync(zsb, tx); + zfs_fuid_sync(zfsvfs, tx); zfs_log_acl(zilog, tx, zp, vsecp, fuidp); @@ -2218,7 +2218,7 @@ static int zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode, boolean_t anyaccess, cred_t *cr) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); zfs_acl_t *aclp; int error; uid_t uid = crgetuid(cr); @@ -2273,7 +2273,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode, who = gowner; /*FALLTHROUGH*/ case ACE_IDENTIFIER_GROUP: - checkit = zfs_groupmember(zsb, who, cr); + checkit = zfs_groupmember(zfsvfs, who, cr); break; case ACE_EVERYONE: checkit = B_TRUE; @@ -2284,7 +2284,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode, if (entry_type == 0) { uid_t newid; - newid = zfs_fuid_map_id(zsb, who, cr, + newid = zfs_fuid_map_id(zfsvfs, who, cr, ZFS_ACE_USER); if (newid != IDMAP_WK_CREATOR_OWNER_UID && uid == newid) @@ -2357,7 +2357,7 @@ static int zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode, boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); int err; *working_mode = v4_mode; @@ -2366,7 +2366,7 @@ zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode, /* * Short circuit empty requests */ - if (v4_mode == 0 || zsb->z_replay) { + if (v4_mode == 0 || zfsvfs->z_replay) { *working_mode = 0; return (0); } diff --git a/module/zfs/zfs_ctldir.c b/module/zfs/zfs_ctldir.c index 8847db7f1..eea1bb2e8 100644 --- a/module/zfs/zfs_ctldir.c +++ b/module/zfs/zfs_ctldir.c @@ -60,12 +60,12 @@ * * The '.zfs', '.zfs/snapshot', and all directories created under * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') all share the same - * share the same zfs_sb_t as the head filesystem (what '.zfs' lives under). + * share the same zfsvfs_t as the head filesystem (what '.zfs' lives under). * * File systems mounted on top of the '.zfs/snapshot/<snapname>' paths * (ie: snapshots) are complete ZFS filesystems and have their own unique - * zfs_sb_t. However, the fsid reported by these mounts will be the same - * as that used by the parent zfs_sb_t to make NFS happy. + * zfsvfs_t. However, the fsid reported by these mounts will be the same + * as that used by the parent zfsvfs_t to make NFS happy. */ #include <sys/types.h> @@ -448,14 +448,14 @@ zfsctl_is_snapdir(struct inode *ip) * Allocate a new inode with the passed id and ops. */ static struct inode * -zfsctl_inode_alloc(zfs_sb_t *zsb, uint64_t id, +zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id, const struct file_operations *fops, const struct inode_operations *ops) { - struct timespec now = current_fs_time(zsb->z_sb); + struct timespec now = current_fs_time(zfsvfs->z_sb); struct inode *ip; znode_t *zp; - ip = new_inode(zsb->z_sb); + ip = new_inode(zfsvfs->z_sb); if (ip == NULL) return (NULL); @@ -498,11 +498,11 @@ zfsctl_inode_alloc(zfs_sb_t *zsb, uint64_t id, return (NULL); } - mutex_enter(&zsb->z_znodes_lock); - list_insert_tail(&zsb->z_all_znodes, zp); - zsb->z_nr_znodes++; + mutex_enter(&zfsvfs->z_znodes_lock); + list_insert_tail(&zfsvfs->z_all_znodes, zp); + zfsvfs->z_nr_znodes++; membar_producer(); - mutex_exit(&zsb->z_znodes_lock); + mutex_exit(&zfsvfs->z_znodes_lock); unlock_new_inode(ip); @@ -513,18 +513,18 @@ zfsctl_inode_alloc(zfs_sb_t *zsb, uint64_t id, * Lookup the inode with given id, it will be allocated if needed. */ static struct inode * -zfsctl_inode_lookup(zfs_sb_t *zsb, uint64_t id, +zfsctl_inode_lookup(zfsvfs_t *zfsvfs, uint64_t id, const struct file_operations *fops, const struct inode_operations *ops) { struct inode *ip = NULL; while (ip == NULL) { - ip = ilookup(zsb->z_sb, (unsigned long)id); + ip = ilookup(zfsvfs->z_sb, (unsigned long)id); if (ip) break; /* May fail due to concurrent zfsctl_inode_alloc() */ - ip = zfsctl_inode_alloc(zsb, id, fops, ops); + ip = zfsctl_inode_alloc(zfsvfs, id, fops, ops); } return (ip); @@ -532,7 +532,7 @@ zfsctl_inode_lookup(zfs_sb_t *zsb, uint64_t id, /* * Create the '.zfs' directory. This directory is cached as part of the VFS - * structure. This results in a hold on the zfs_sb_t. The code in zfs_umount() + * structure. This results in a hold on the zfsvfs_t. The code in zfs_umount() * therefore checks against a vfs_count of 2 instead of 1. This reference * is removed when the ctldir is destroyed in the unmount. All other entities * under the '.zfs' directory are created dynamically as needed. @@ -541,13 +541,13 @@ zfsctl_inode_lookup(zfs_sb_t *zsb, uint64_t id, * of 64-bit inode numbers this support must be disabled on 32-bit systems. */ int -zfsctl_create(zfs_sb_t *zsb) +zfsctl_create(zfsvfs_t *zfsvfs) { - ASSERT(zsb->z_ctldir == NULL); + ASSERT(zfsvfs->z_ctldir == NULL); - zsb->z_ctldir = zfsctl_inode_alloc(zsb, ZFSCTL_INO_ROOT, + zfsvfs->z_ctldir = zfsctl_inode_alloc(zfsvfs, ZFSCTL_INO_ROOT, &zpl_fops_root, &zpl_ops_root); - if (zsb->z_ctldir == NULL) + if (zfsvfs->z_ctldir == NULL) return (SET_ERROR(ENOENT)); return (0); @@ -558,12 +558,12 @@ zfsctl_create(zfs_sb_t *zsb) * Only called when the filesystem is unmounted. */ void -zfsctl_destroy(zfs_sb_t *zsb) +zfsctl_destroy(zfsvfs_t *zfsvfs) { - if (zsb->z_issnap) { + if (zfsvfs->z_issnap) { zfs_snapentry_t *se; - spa_t *spa = zsb->z_os->os_spa; - uint64_t objsetid = dmu_objset_id(zsb->z_os); + spa_t *spa = zfsvfs->z_os->os_spa; + uint64_t objsetid = dmu_objset_id(zfsvfs->z_os); rw_enter(&zfs_snapshot_lock, RW_WRITER); if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid)) @@ -573,9 +573,9 @@ zfsctl_destroy(zfs_sb_t *zsb) zfsctl_snapshot_rele(se); } rw_exit(&zfs_snapshot_lock); - } else if (zsb->z_ctldir) { - iput(zsb->z_ctldir); - zsb->z_ctldir = NULL; + } else if (zfsvfs->z_ctldir) { + iput(zfsvfs->z_ctldir); + zfsvfs->z_ctldir = NULL; } } @@ -646,21 +646,21 @@ int zfsctl_fid(struct inode *ip, fid_t *fidp) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); uint64_t object = zp->z_id; zfid_short_t *zfid; int i; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); if (zfsctl_is_snapdir(ip)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (zfsctl_snapdir_fid(ip, fidp)); } if (fidp->fid_len < SHORT_FID_LEN) { fidp->fid_len = SHORT_FID_LEN; - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(ENOSPC)); } @@ -675,7 +675,7 @@ zfsctl_fid(struct inode *ip, fid_t *fidp) for (i = 0; i < sizeof (zfid->zf_gen); i++) zfid->zf_gen[i] = 0; - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } @@ -683,10 +683,10 @@ zfsctl_fid(struct inode *ip, fid_t *fidp) * Construct a full dataset name in full_name: "pool/dataset@snap_name" */ static int -zfsctl_snapshot_name(zfs_sb_t *zsb, const char *snap_name, int len, +zfsctl_snapshot_name(zfsvfs_t *zfsvfs, const char *snap_name, int len, char *full_name) { - objset_t *os = zsb->z_os; + objset_t *os = zfsvfs->z_os; if (zfs_component_namecheck(snap_name, NULL, NULL) != 0) return (SET_ERROR(EILSEQ)); @@ -736,17 +736,17 @@ out: * Returns full path in full_path: "/pool/dataset/.zfs/snapshot/snap_name/" */ static int -zfsctl_snapshot_path_objset(zfs_sb_t *zsb, uint64_t objsetid, +zfsctl_snapshot_path_objset(zfsvfs_t *zfsvfs, uint64_t objsetid, int path_len, char *full_path) { - objset_t *os = zsb->z_os; + objset_t *os = zfsvfs->z_os; fstrans_cookie_t cookie; char *snapname; boolean_t case_conflict; uint64_t id, pos = 0; int error = 0; - if (zsb->z_mntopts->z_mntpoint == NULL) + if (zfsvfs->z_vfs->vfs_mntpoint == NULL) return (ENOENT); cookie = spl_fstrans_mark(); @@ -754,7 +754,7 @@ zfsctl_snapshot_path_objset(zfs_sb_t *zsb, uint64_t objsetid, while (error == 0) { dsl_pool_config_enter(dmu_objset_pool(os), FTAG); - error = dmu_snapshot_list_next(zsb->z_os, + error = dmu_snapshot_list_next(zfsvfs->z_os, ZFS_MAX_DATASET_NAME_LEN, snapname, &id, &pos, &case_conflict); dsl_pool_config_exit(dmu_objset_pool(os), FTAG); @@ -767,7 +767,7 @@ zfsctl_snapshot_path_objset(zfs_sb_t *zsb, uint64_t objsetid, memset(full_path, 0, path_len); snprintf(full_path, path_len - 1, "%s/.zfs/snapshot/%s", - zsb->z_mntopts->z_mntpoint, snapname); + zfsvfs->z_vfs->vfs_mntpoint, snapname); out: kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN); spl_fstrans_unmark(cookie); @@ -782,18 +782,18 @@ int zfsctl_root_lookup(struct inode *dip, char *name, struct inode **ipp, int flags, cred_t *cr, int *direntflags, pathname_t *realpnp) { - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); int error = 0; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); if (strcmp(name, "..") == 0) { *ipp = dip->i_sb->s_root->d_inode; } else if (strcmp(name, ZFS_SNAPDIR_NAME) == 0) { - *ipp = zfsctl_inode_lookup(zsb, ZFSCTL_INO_SNAPDIR, + *ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIR, &zpl_fops_snapdir, &zpl_ops_snapdir); } else if (strcmp(name, ZFS_SHAREDIR_NAME) == 0) { - *ipp = zfsctl_inode_lookup(zsb, ZFSCTL_INO_SHARES, + *ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SHARES, &zpl_fops_shares, &zpl_ops_shares); } else { *ipp = NULL; @@ -802,7 +802,7 @@ zfsctl_root_lookup(struct inode *dip, char *name, struct inode **ipp, if (*ipp == NULL) error = SET_ERROR(ENOENT); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -816,24 +816,24 @@ int zfsctl_snapdir_lookup(struct inode *dip, char *name, struct inode **ipp, int flags, cred_t *cr, int *direntflags, pathname_t *realpnp) { - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); uint64_t id; int error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); - error = dmu_snapshot_lookup(zsb->z_os, name, &id); + error = dmu_snapshot_lookup(zfsvfs->z_os, name, &id); if (error) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } - *ipp = zfsctl_inode_lookup(zsb, ZFSCTL_INO_SNAPDIRS - id, + *ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIRS - id, &simple_dir_operations, &simple_dir_inode_operations); if (*ipp == NULL) error = SET_ERROR(ENOENT); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -847,22 +847,22 @@ int zfsctl_snapdir_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm, cred_t *cr, int flags) { - zfs_sb_t *zsb = ITOZSB(sdip); + zfsvfs_t *zfsvfs = ITOZSB(sdip); char *to, *from, *real, *fsname; int error; if (!zfs_admin_snapshot) return (EACCES); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); to = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); from = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); - if (zsb->z_case == ZFS_CASE_INSENSITIVE) { - error = dmu_snapshot_realname(zsb->z_os, snm, real, + if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) { + error = dmu_snapshot_realname(zfsvfs->z_os, snm, real, ZFS_MAX_DATASET_NAME_LEN, NULL); if (error == 0) { snm = real; @@ -871,7 +871,7 @@ zfsctl_snapdir_rename(struct inode *sdip, char *snm, } } - dmu_objset_name(zsb->z_os, fsname); + dmu_objset_name(zfsvfs->z_os, fsname); error = zfsctl_snapshot_name(ITOZSB(sdip), snm, ZFS_MAX_DATASET_NAME_LEN, from); @@ -912,7 +912,7 @@ out: kmem_free(real, ZFS_MAX_DATASET_NAME_LEN); kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -924,20 +924,20 @@ out: int zfsctl_snapdir_remove(struct inode *dip, char *name, cred_t *cr, int flags) { - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); char *snapname, *real; int error; if (!zfs_admin_snapshot) return (EACCES); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); snapname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); - if (zsb->z_case == ZFS_CASE_INSENSITIVE) { - error = dmu_snapshot_realname(zsb->z_os, name, real, + if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) { + error = dmu_snapshot_realname(zfsvfs->z_os, name, real, ZFS_MAX_DATASET_NAME_LEN, NULL); if (error == 0) { name = real; @@ -960,7 +960,7 @@ out: kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN); kmem_free(real, ZFS_MAX_DATASET_NAME_LEN); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -973,7 +973,7 @@ int zfsctl_snapdir_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp, cred_t *cr, int flags) { - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); char *dsname; int error; @@ -987,7 +987,7 @@ zfsctl_snapdir_mkdir(struct inode *dip, char *dirname, vattr_t *vap, goto out; } - dmu_objset_name(zsb->z_os, dsname); + dmu_objset_name(zfsvfs->z_os, dsname); error = zfs_secpolicy_snapshot_perms(dsname, cr); if (error != 0) @@ -1055,8 +1055,8 @@ zfsctl_snapshot_mount(struct path *path, int flags) { struct dentry *dentry = path->dentry; struct inode *ip = dentry->d_inode; - zfs_sb_t *zsb; - zfs_sb_t *snap_zsb; + zfsvfs_t *zfsvfs; + zfsvfs_t *snap_zfsvfs; zfs_snapentry_t *se; char *full_name, *full_path; char *argv[] = { "/usr/bin/env", "mount", "-t", "zfs", "-n", NULL, NULL, @@ -1068,13 +1068,13 @@ zfsctl_snapshot_mount(struct path *path, int flags) if (ip == NULL) return (EISDIR); - zsb = ITOZSB(ip); - ZFS_ENTER(zsb); + zfsvfs = ITOZSB(ip); + ZFS_ENTER(zfsvfs); full_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); full_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); - error = zfsctl_snapshot_name(zsb, dname(dentry), + error = zfsctl_snapshot_name(zfsvfs, dname(dentry), ZFS_MAX_DATASET_NAME_LEN, full_name); if (error) goto error; @@ -1134,14 +1134,14 @@ zfsctl_snapshot_mount(struct path *path, int flags) spath = *path; path_get(&spath); if (zpl_follow_down_one(&spath)) { - snap_zsb = ITOZSB(spath.dentry->d_inode); - snap_zsb->z_parent = zsb; + snap_zfsvfs = ITOZSB(spath.dentry->d_inode); + snap_zfsvfs->z_parent = zfsvfs; dentry = spath.dentry; spath.mnt->mnt_flags |= MNT_SHRINKABLE; rw_enter(&zfs_snapshot_lock, RW_WRITER); se = zfsctl_snapshot_alloc(full_name, full_path, - snap_zsb->z_os->os_spa, dmu_objset_id(snap_zsb->z_os), + snap_zfsvfs->z_os->os_spa, dmu_objset_id(snap_zfsvfs->z_os), dentry); zfsctl_snapshot_add(se); zfsctl_snapshot_unmount_delay_impl(se, zfs_expire_snapshot); @@ -1152,7 +1152,7 @@ error: kmem_free(full_name, ZFS_MAX_DATASET_NAME_LEN); kmem_free(full_path, MAXPATHLEN); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -1211,24 +1211,24 @@ int zfsctl_shares_lookup(struct inode *dip, char *name, struct inode **ipp, int flags, cred_t *cr, int *direntflags, pathname_t *realpnp) { - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); struct inode *ip; znode_t *dzp; int error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); - if (zsb->z_shares_dir == 0) { - ZFS_EXIT(zsb); + if (zfsvfs->z_shares_dir == 0) { + ZFS_EXIT(zfsvfs); return (SET_ERROR(ENOTSUP)); } - if ((error = zfs_zget(zsb, zsb->z_shares_dir, &dzp)) == 0) { + if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) { error = zfs_lookup(ZTOI(dzp), name, &ip, 0, cr, NULL, NULL); iput(ZTOI(dzp)); } - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } diff --git a/module/zfs/zfs_dir.c b/module/zfs/zfs_dir.c index ad159b1b6..1fcc69fd1 100644 --- a/module/zfs/zfs_dir.c +++ b/module/zfs/zfs_dir.c @@ -25,7 +25,6 @@ * Copyright 2017 Nexenta Systems, Inc. */ - #include <sys/types.h> #include <sys/param.h> #include <sys/time.h> @@ -66,13 +65,13 @@ * of names after deciding which is the appropriate lookup interface. */ static int -zfs_match_find(zfs_sb_t *zsb, znode_t *dzp, char *name, matchtype_t mt, +zfs_match_find(zfsvfs_t *zfsvfs, znode_t *dzp, char *name, matchtype_t mt, boolean_t update, int *deflags, pathname_t *rpnp, uint64_t *zoid) { boolean_t conflict = B_FALSE; int error; - if (zsb->z_norm) { + if (zfsvfs->z_norm) { size_t bufsz = 0; char *buf = NULL; @@ -85,10 +84,10 @@ zfs_match_find(zfs_sb_t *zsb, znode_t *dzp, char *name, matchtype_t mt, * In the non-mixed case we only expect there would ever * be one match, but we need to use the normalizing lookup. */ - error = zap_lookup_norm(zsb->z_os, dzp->z_id, name, 8, 1, + error = zap_lookup_norm(zfsvfs->z_os, dzp->z_id, name, 8, 1, zoid, mt, buf, bufsz, &conflict); } else { - error = zap_lookup(zsb->z_os, dzp->z_id, name, 8, 1, zoid); + error = zap_lookup(zfsvfs->z_os, dzp->z_id, name, 8, 1, zoid); } /* @@ -101,7 +100,7 @@ zfs_match_find(zfs_sb_t *zsb, znode_t *dzp, char *name, matchtype_t mt, if (error == EOVERFLOW) error = 0; - if (zsb->z_norm && !error && deflags) + if (zfsvfs->z_norm && !error && deflags) *deflags = conflict ? ED_CASE_CONFLICT : 0; *zoid = ZFS_DIRENT_OBJ(*zoid); @@ -153,7 +152,7 @@ int zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, int flag, int *direntflags, pathname_t *realpnp) { - zfs_sb_t *zsb = ZTOZSB(dzp); + zfsvfs_t *zfsvfs = ZTOZSB(dzp); zfs_dirlock_t *dl; boolean_t update; matchtype_t mt = 0; @@ -178,7 +177,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, /* * Case sensitivity and normalization preferences are set when * the file system is created. These are stored in the - * zsb->z_case and zsb->z_norm fields. These choices + * zfsvfs->z_case and zfsvfs->z_norm fields. These choices * affect what vnodes can be cached in the DNLC, how we * perform zap lookups, and the "width" of our dirlocks. * @@ -202,7 +201,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, * * See the table above zfs_dropname(). */ - if (zsb->z_norm != 0) { + if (zfsvfs->z_norm != 0) { mt = MT_NORMALIZE; /* @@ -210,9 +209,9 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, * lookup, and if so keep track of that so that during * normalization we don't fold case. */ - if ((zsb->z_case == ZFS_CASE_INSENSITIVE && + if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE && (flag & ZCIEXACT)) || - (zsb->z_case == ZFS_CASE_MIXED && !(flag & ZCILOOK))) { + (zfsvfs->z_case == ZFS_CASE_MIXED && !(flag & ZCILOOK))) { mt |= MT_MATCH_CASE; } } @@ -227,9 +226,9 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, * Maybe can add TO-UPPERed version of name to dnlc in ci-only * case for performance improvement? */ - update = !zsb->z_norm || - (zsb->z_case == ZFS_CASE_MIXED && - !(zsb->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK)); + update = !zfsvfs->z_norm || + (zfsvfs->z_case == ZFS_CASE_MIXED && + !(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK)); /* * ZRENAMING indicates we are in a situation where we should @@ -242,7 +241,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, if (flag & ZRENAMING) cmpflags = 0; else - cmpflags = zsb->z_norm; + cmpflags = zfsvfs->z_norm; /* * Wait until there are no locks on this name. @@ -322,7 +321,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, * See if there's an object by this name; if so, put a hold on it. */ if (flag & ZXATTR) { - error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zsb), &zoid, + error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &zoid, sizeof (zoid)); if (error == 0) error = (zoid == 0 ? SET_ERROR(ENOENT) : 0); @@ -343,11 +342,11 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, *zpp = VTOZ(vp); return (0); } else { - error = zfs_match_find(zsb, dzp, name, mt, + error = zfs_match_find(zfsvfs, dzp, name, mt, update, direntflags, realpnp, &zoid); } #else - error = zfs_match_find(zsb, dzp, name, mt, + error = zfs_match_find(zfsvfs, dzp, name, mt, update, direntflags, realpnp, &zoid); #endif /* HAVE_DNLC */ } @@ -361,7 +360,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, zfs_dirent_unlock(dl); return (SET_ERROR(EEXIST)); } - error = zfs_zget(zsb, zoid, zpp); + error = zfs_zget(zfsvfs, zoid, zpp); if (error) { zfs_dirent_unlock(dl); return (error); @@ -430,23 +429,23 @@ zfs_dirlook(znode_t *dzp, char *name, struct inode **ipp, int flags, *ipp = ZTOI(dzp); igrab(*ipp); } else if (name[0] == '.' && name[1] == '.' && name[2] == 0) { - zfs_sb_t *zsb = ZTOZSB(dzp); + zfsvfs_t *zfsvfs = ZTOZSB(dzp); /* * If we are a snapshot mounted under .zfs, return * the inode pointer for the snapshot directory. */ if ((error = sa_lookup(dzp->z_sa_hdl, - SA_ZPL_PARENT(zsb), &parent, sizeof (parent))) != 0) + SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0) return (error); - if (parent == dzp->z_id && zsb->z_parent != zsb) { - error = zfsctl_root_lookup(zsb->z_parent->z_ctldir, + if (parent == dzp->z_id && zfsvfs->z_parent != zfsvfs) { + error = zfsctl_root_lookup(zfsvfs->z_parent->z_ctldir, "snapshot", ipp, 0, kcred, NULL, NULL); return (error); } rw_enter(&dzp->z_parent_lock, RW_READER); - error = zfs_zget(zsb, parent, &zp); + error = zfs_zget(zfsvfs, parent, &zp); if (error == 0) *ipp = ZTOI(zp); rw_exit(&dzp->z_parent_lock); @@ -491,13 +490,13 @@ zfs_dirlook(znode_t *dzp, char *name, struct inode **ipp, int flags, void zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); ASSERT(zp->z_unlinked); ASSERT(ZTOI(zp)->i_nlink == 0); VERIFY3U(0, ==, - zap_add_int(zsb->z_os, zsb->z_unlinkedobj, zp->z_id, tx)); + zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); } /* @@ -505,7 +504,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx) * (force) umounted the file system. */ void -zfs_unlinked_drain(zfs_sb_t *zsb) +zfs_unlinked_drain(zfsvfs_t *zfsvfs) { zap_cursor_t zc; zap_attribute_t zap; @@ -516,7 +515,7 @@ zfs_unlinked_drain(zfs_sb_t *zsb) /* * Iterate over the contents of the unlinked set. */ - for (zap_cursor_init(&zc, zsb->z_os, zsb->z_unlinkedobj); + for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_unlinkedobj); zap_cursor_retrieve(&zc, &zap) == 0; zap_cursor_advance(&zc)) { @@ -524,7 +523,8 @@ zfs_unlinked_drain(zfs_sb_t *zsb) * See what kind of object we have in list */ - error = dmu_object_info(zsb->z_os, zap.za_first_integer, &doi); + error = dmu_object_info(zfsvfs->z_os, + zap.za_first_integer, &doi); if (error != 0) continue; @@ -534,7 +534,7 @@ zfs_unlinked_drain(zfs_sb_t *zsb) * We need to re-mark these list entries for deletion, * so we pull them back into core and set zp->z_unlinked. */ - error = zfs_zget(zsb, zap.za_first_integer, &zp); + error = zfs_zget(zfsvfs, zap.za_first_integer, &zp); /* * We may pick up znodes that are already marked for deletion. @@ -569,15 +569,15 @@ zfs_purgedir(znode_t *dzp) zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; - zfs_sb_t *zsb = ZTOZSB(dzp); + zfsvfs_t *zfsvfs = ZTOZSB(dzp); zfs_dirlock_t dl; int skipped = 0; int error; - for (zap_cursor_init(&zc, zsb->z_os, dzp->z_id); + for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { - error = zfs_zget(zsb, + error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); if (error) { skipped += 1; @@ -587,11 +587,11 @@ zfs_purgedir(znode_t *dzp) ASSERT(S_ISREG(ZTOI(xzp)->i_mode) || S_ISLNK(ZTOI(xzp)->i_mode)); - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); - dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); /* Is this really needed ? */ zfs_sa_upgrade_txholds(tx, xzp); dmu_tx_mark_netfree(tx); @@ -622,8 +622,8 @@ zfs_purgedir(znode_t *dzp) void zfs_rmnode(znode_t *zp) { - zfs_sb_t *zsb = ZTOZSB(zp); - objset_t *os = zsb->z_os; + zfsvfs_t *zfsvfs = ZTOZSB(zp); + objset_t *os = zfsvfs->z_os; znode_t *xzp = NULL; dmu_tx_t *tx; uint64_t acl_obj; @@ -672,10 +672,10 @@ zfs_rmnode(znode_t *zp) * If the file has extended attributes, we're going to unlink * the xattr dir. */ - error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), + error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xattr_obj, sizeof (xattr_obj)); if (error == 0 && xattr_obj) { - error = zfs_zget(zsb, xattr_obj, &xzp); + error = zfs_zget(zfsvfs, xattr_obj, &xzp); ASSERT(error == 0); } @@ -686,9 +686,9 @@ zfs_rmnode(znode_t *zp) */ tx = dmu_tx_create(os); dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END); - dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); if (xzp) { - dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, TRUE, NULL); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); } if (acl_obj) @@ -713,7 +713,7 @@ zfs_rmnode(znode_t *zp) xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */ clear_nlink(ZTOI(xzp)); /* no more links to it */ links = 0; - VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zsb), + VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), &links, sizeof (links), tx)); mutex_exit(&xzp->z_lock); zfs_unlinked_add(xzp, tx); @@ -721,7 +721,7 @@ zfs_rmnode(znode_t *zp) /* Remove this znode from the unlinked set */ VERIFY3U(0, ==, - zap_remove_int(zsb->z_os, zsb->z_unlinkedobj, zp->z_id, tx)); + zap_remove_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); zfs_znode_delete(zp, tx); @@ -748,7 +748,7 @@ int zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) { znode_t *dzp = dl->dl_dzp; - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); uint64_t value; int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode); sa_bulk_attr_t bulk[5]; @@ -772,17 +772,17 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) */ inc_nlink(ZTOI(zp)); links = ZTOI(zp)->i_nlink; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, - &links, sizeof (links)); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), + NULL, &links, sizeof (links)); } } - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &dzp->z_id, sizeof (dzp->z_id)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); if (!(flag & ZNEW)) { - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime); @@ -798,15 +798,15 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) inc_nlink(ZTOI(dzp)); links = ZTOI(dzp)->i_nlink; count = 0; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, &dzp->z_size, sizeof (dzp->z_size)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, sizeof (links)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, sizeof (mtime)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &dzp->z_pflags, sizeof (dzp->z_pflags)); zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime); error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx); @@ -881,7 +881,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, boolean_t *unlinkedp) { znode_t *dzp = dl->dl_dzp; - zfs_sb_t *zsb = ZTOZSB(dzp); + zfsvfs_t *zfsvfs = ZTOZSB(dzp); int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode); boolean_t unlinked = B_FALSE; sa_bulk_attr_t bulk[5]; @@ -925,15 +925,15 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, clear_nlink(ZTOI(zp)); unlinked = B_TRUE; } else { - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, sizeof (ctime)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime); } links = ZTOI(zp)->i_nlink; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, sizeof (links)); error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); count = 0; @@ -950,15 +950,15 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, if (zp_is_dir) drop_nlink(ZTOI(dzp)); /* ".." link from zp */ links = ZTOI(dzp)->i_nlink; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, sizeof (links)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, &dzp->z_size, sizeof (dzp->z_size)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, sizeof (mtime)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &dzp->z_pflags, sizeof (dzp->z_pflags)); zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime); error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx); @@ -987,7 +987,7 @@ zfs_dirempty(znode_t *dzp) int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); znode_t *xzp; dmu_tx_t *tx; int error; @@ -1005,19 +1005,19 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr) if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL, &acl_ids)) != 0) return (error); - if (zfs_acl_ids_overquota(zsb, &acl_ids)) { + if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); return (SET_ERROR(EDQUOT)); } - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); - fuid_dirtied = zsb->z_fuid_dirty; + fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) - zfs_fuid_txhold(zsb, tx); + zfs_fuid_txhold(zfsvfs, tx); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { zfs_acl_ids_free(&acl_ids); @@ -1027,19 +1027,19 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr) zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids); if (fuid_dirtied) - zfs_fuid_sync(zsb, tx); + zfs_fuid_sync(zfsvfs, tx); #ifdef DEBUG - error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zsb), + error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent)); ASSERT(error == 0 && parent == zp->z_id); #endif - VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), &xzp->z_id, + VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id, sizeof (xzp->z_id), tx)); if (!zp->z_unlinked) - (void) zfs_log_create(zsb->z_log, tx, TX_MKXATTR, zp, + (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL, acl_ids.z_fuidp, vap); zfs_acl_ids_free(&acl_ids); @@ -1066,7 +1066,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr) int zfs_get_xattrdir(znode_t *zp, struct inode **xipp, cred_t *cr, int flags) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); znode_t *xzp; zfs_dirlock_t *dl; vattr_t va; @@ -1087,7 +1087,7 @@ top: return (SET_ERROR(ENOENT)); } - if (zfs_is_readonly(zsb)) { + if (zfs_is_readonly(zfsvfs)) { zfs_dirent_unlock(dl); return (SET_ERROR(EROFS)); } @@ -1137,17 +1137,17 @@ zfs_sticky_remove_access(znode_t *zdp, znode_t *zp, cred_t *cr) uid_t uid; uid_t downer; uid_t fowner; - zfs_sb_t *zsb = ZTOZSB(zdp); + zfsvfs_t *zfsvfs = ZTOZSB(zdp); - if (zsb->z_replay) + if (zfsvfs->z_replay) return (0); if ((zdp->z_mode & S_ISVTX) == 0) return (0); - downer = zfs_fuid_map_id(zsb, KUID_TO_SUID(ZTOI(zdp)->i_uid), + downer = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(ZTOI(zdp)->i_uid), cr, ZFS_OWNER); - fowner = zfs_fuid_map_id(zsb, KUID_TO_SUID(ZTOI(zp)->i_uid), + fowner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(ZTOI(zp)->i_uid), cr, ZFS_OWNER); if ((uid = crgetuid(cr)) == downer || uid == fowner || diff --git a/module/zfs/zfs_fuid.c b/module/zfs/zfs_fuid.c index 3ab1c7ba0..5cfb0c975 100644 --- a/module/zfs/zfs_fuid.c +++ b/module/zfs/zfs_fuid.c @@ -46,7 +46,7 @@ * two AVL trees are created. One tree is keyed by the index number * and the other by the domain string. Nodes are never removed from * trees, but new entries may be added. If a new entry is added then - * the zsb->z_fuid_dirty flag is set to true and the caller will then + * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then * be responsible for calling zfs_fuid_sync() to sync the changes to disk. * */ @@ -191,34 +191,34 @@ zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx) * Load the fuid table(s) into memory. */ static void -zfs_fuid_init(zfs_sb_t *zsb) +zfs_fuid_init(zfsvfs_t *zfsvfs) { - rw_enter(&zsb->z_fuid_lock, RW_WRITER); + rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); - if (zsb->z_fuid_loaded) { - rw_exit(&zsb->z_fuid_lock); + if (zfsvfs->z_fuid_loaded) { + rw_exit(&zfsvfs->z_fuid_lock); return; } - zfs_fuid_avl_tree_create(&zsb->z_fuid_idx, &zsb->z_fuid_domain); + zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); - (void) zap_lookup(zsb->z_os, MASTER_NODE_OBJ, - ZFS_FUID_TABLES, 8, 1, &zsb->z_fuid_obj); - if (zsb->z_fuid_obj != 0) { - zsb->z_fuid_size = zfs_fuid_table_load(zsb->z_os, - zsb->z_fuid_obj, &zsb->z_fuid_idx, - &zsb->z_fuid_domain); + (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ, + ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj); + if (zfsvfs->z_fuid_obj != 0) { + zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os, + zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx, + &zfsvfs->z_fuid_domain); } - zsb->z_fuid_loaded = B_TRUE; - rw_exit(&zsb->z_fuid_lock); + zfsvfs->z_fuid_loaded = B_TRUE; + rw_exit(&zfsvfs->z_fuid_lock); } /* * sync out AVL trees to persistent storage. */ void -zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx) +zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { nvlist_t *nvp; nvlist_t **fuids; @@ -229,30 +229,30 @@ zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx) int numnodes; int i; - if (!zsb->z_fuid_dirty) { + if (!zfsvfs->z_fuid_dirty) { return; } - rw_enter(&zsb->z_fuid_lock, RW_WRITER); + rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); /* * First see if table needs to be created? */ - if (zsb->z_fuid_obj == 0) { - zsb->z_fuid_obj = dmu_object_alloc(zsb->z_os, + if (zfsvfs->z_fuid_obj == 0) { + zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os, DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE, sizeof (uint64_t), tx); - VERIFY(zap_add(zsb->z_os, MASTER_NODE_OBJ, + VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, sizeof (uint64_t), 1, - &zsb->z_fuid_obj, tx) == 0); + &zfsvfs->z_fuid_obj, tx) == 0); } VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); - numnodes = avl_numnodes(&zsb->z_fuid_idx); + numnodes = avl_numnodes(&zfsvfs->z_fuid_idx); fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP); - for (i = 0, domnode = avl_first(&zsb->z_fuid_domain); domnode; i++, - domnode = AVL_NEXT(&zsb->z_fuid_domain, domnode)) { + for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++, + domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) { VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX, domnode->f_idx) == 0); @@ -270,29 +270,30 @@ zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx) VERIFY(nvlist_pack(nvp, &packed, &nvsize, NV_ENCODE_XDR, KM_SLEEP) == 0); nvlist_free(nvp); - zsb->z_fuid_size = nvsize; - dmu_write(zsb->z_os, zsb->z_fuid_obj, 0, zsb->z_fuid_size, packed, tx); - kmem_free(packed, zsb->z_fuid_size); - VERIFY(0 == dmu_bonus_hold(zsb->z_os, zsb->z_fuid_obj, + zfsvfs->z_fuid_size = nvsize; + dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0, + zfsvfs->z_fuid_size, packed, tx); + kmem_free(packed, zfsvfs->z_fuid_size); + VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db)); dmu_buf_will_dirty(db, tx); - *(uint64_t *)db->db_data = zsb->z_fuid_size; + *(uint64_t *)db->db_data = zfsvfs->z_fuid_size; dmu_buf_rele(db, FTAG); - zsb->z_fuid_dirty = B_FALSE; - rw_exit(&zsb->z_fuid_lock); + zfsvfs->z_fuid_dirty = B_FALSE; + rw_exit(&zfsvfs->z_fuid_lock); } /* * Query domain table for a given domain. * * If domain isn't found and addok is set, it is added to AVL trees and - * the zsb->z_fuid_dirty flag will be set to TRUE. It will then be + * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be * necessary for the caller or another thread to detect the dirty table * and sync out the changes. */ int -zfs_fuid_find_by_domain(zfs_sb_t *zsb, const char *domain, +zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, char **retdomain, boolean_t addok) { fuid_domain_t searchnode, *findnode; @@ -313,23 +314,23 @@ zfs_fuid_find_by_domain(zfs_sb_t *zsb, const char *domain, searchnode.f_ksid = ksid_lookupdomain(domain); if (retdomain) *retdomain = searchnode.f_ksid->kd_name; - if (!zsb->z_fuid_loaded) - zfs_fuid_init(zsb); + if (!zfsvfs->z_fuid_loaded) + zfs_fuid_init(zfsvfs); retry: - rw_enter(&zsb->z_fuid_lock, rw); - findnode = avl_find(&zsb->z_fuid_domain, &searchnode, &loc); + rw_enter(&zfsvfs->z_fuid_lock, rw); + findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc); if (findnode) { - rw_exit(&zsb->z_fuid_lock); + rw_exit(&zfsvfs->z_fuid_lock); ksiddomain_rele(searchnode.f_ksid); return (findnode->f_idx); } else if (addok) { fuid_domain_t *domnode; uint64_t retidx; - if (rw == RW_READER && !rw_tryupgrade(&zsb->z_fuid_lock)) { - rw_exit(&zsb->z_fuid_lock); + if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) { + rw_exit(&zfsvfs->z_fuid_lock); rw = RW_WRITER; goto retry; } @@ -337,15 +338,15 @@ retry: domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); domnode->f_ksid = searchnode.f_ksid; - retidx = domnode->f_idx = avl_numnodes(&zsb->z_fuid_idx) + 1; + retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1; - avl_add(&zsb->z_fuid_domain, domnode); - avl_add(&zsb->z_fuid_idx, domnode); - zsb->z_fuid_dirty = B_TRUE; - rw_exit(&zsb->z_fuid_lock); + avl_add(&zfsvfs->z_fuid_domain, domnode); + avl_add(&zfsvfs->z_fuid_idx, domnode); + zfsvfs->z_fuid_dirty = B_TRUE; + rw_exit(&zfsvfs->z_fuid_lock); return (retidx); } else { - rw_exit(&zsb->z_fuid_lock); + rw_exit(&zfsvfs->z_fuid_lock); return (-1); } } @@ -357,23 +358,23 @@ retry: * */ const char * -zfs_fuid_find_by_idx(zfs_sb_t *zsb, uint32_t idx) +zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx) { char *domain; - if (idx == 0 || !zsb->z_use_fuids) + if (idx == 0 || !zfsvfs->z_use_fuids) return (NULL); - if (!zsb->z_fuid_loaded) - zfs_fuid_init(zsb); + if (!zfsvfs->z_fuid_loaded) + zfs_fuid_init(zfsvfs); - rw_enter(&zsb->z_fuid_lock, RW_READER); + rw_enter(&zfsvfs->z_fuid_lock, RW_READER); - if (zsb->z_fuid_obj || zsb->z_fuid_dirty) - domain = zfs_fuid_idx_domain(&zsb->z_fuid_idx, idx); + if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty) + domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx); else domain = nulldomain; - rw_exit(&zsb->z_fuid_lock); + rw_exit(&zfsvfs->z_fuid_lock); ASSERT(domain); return (domain); @@ -389,7 +390,7 @@ zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp) } uid_t -zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid, +zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { #ifdef HAVE_KSID @@ -400,7 +401,7 @@ zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid, if (index == 0) return (fuid); - domain = zfs_fuid_find_by_idx(zsb, index); + domain = zfs_fuid_find_by_idx(zfsvfs, index); ASSERT(domain != NULL); if (type == ZFS_OWNER || type == ZFS_ACE_USER) { @@ -495,7 +496,7 @@ zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid, * be used if it exists. */ uint64_t -zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type, +zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type, cred_t *cr, zfs_fuid_info_t **fuidp) { uint64_t idx; @@ -509,7 +510,7 @@ zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type, ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP); - if (!zsb->z_use_fuids || (ksid == NULL)) { + if (!zfsvfs->z_use_fuids || (ksid == NULL)) { id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr); if (IS_EPHEMERAL(id)) @@ -532,7 +533,7 @@ zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type, rid = ksid_getrid(ksid); domain = ksid_getdomain(ksid); - idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE); + idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type); @@ -550,10 +551,10 @@ zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type, * * During replay operations the domain+rid information is * found in the zfs_fuid_info_t that the replay code has - * attached to the zsb of the file system. + * attached to the zfsvfs of the file system. */ uint64_t -zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr, +zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr, zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp) { #ifdef HAVE_KSID @@ -574,11 +575,11 @@ zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr, * chmod. */ - if (!zsb->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0) + if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0) return (id); - if (zsb->z_replay) { - fuidp = zsb->z_fuid_replay; + if (zfsvfs->z_replay) { + fuidp = zfsvfs->z_fuid_replay; /* * If we are passed an ephemeral id, but no @@ -628,9 +629,9 @@ zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr, } } - idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE); + idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); - if (!zsb->z_replay) + if (!zfsvfs->z_replay) zfs_fuid_node_add(fuidpp, kdomain, rid, idx, id, type); else if (zfuid != NULL) { @@ -647,15 +648,15 @@ zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr, } void -zfs_fuid_destroy(zfs_sb_t *zsb) +zfs_fuid_destroy(zfsvfs_t *zfsvfs) { - rw_enter(&zsb->z_fuid_lock, RW_WRITER); - if (!zsb->z_fuid_loaded) { - rw_exit(&zsb->z_fuid_lock); + rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); + if (!zfsvfs->z_fuid_loaded) { + rw_exit(&zfsvfs->z_fuid_lock); return; } - zfs_fuid_table_destroy(&zsb->z_fuid_idx, &zsb->z_fuid_domain); - rw_exit(&zsb->z_fuid_lock); + zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); + rw_exit(&zfsvfs->z_fuid_lock); } /* @@ -709,7 +710,7 @@ zfs_fuid_info_free(zfs_fuid_info_t *fuidp) * Will use a straight FUID compare when possible. */ boolean_t -zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr) +zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr) { #ifdef HAVE_KSID ksid_t *ksid = crgetsid(cr, KSID_GROUP); @@ -733,7 +734,7 @@ zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr) } else { const char *domain; - domain = zfs_fuid_find_by_idx(zsb, idx); + domain = zfs_fuid_find_by_idx(zfsvfs, idx); ASSERT(domain != NULL); if (strcmp(domain, @@ -751,7 +752,7 @@ zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr) /* * Not found in ksidlist, check posix groups */ - gid = zfs_fuid_map_id(zsb, id, cr, ZFS_GROUP); + gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP); return (groupmember(gid, cr)); #else return (B_TRUE); @@ -759,17 +760,17 @@ zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr) } void -zfs_fuid_txhold(zfs_sb_t *zsb, dmu_tx_t *tx) +zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { - if (zsb->z_fuid_obj == 0) { + if (zfsvfs->z_fuid_obj == 0) { dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, - FUID_SIZE_ESTIMATE(zsb)); + FUID_SIZE_ESTIMATE(zfsvfs)); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); } else { - dmu_tx_hold_bonus(tx, zsb->z_fuid_obj); - dmu_tx_hold_write(tx, zsb->z_fuid_obj, 0, - FUID_SIZE_ESTIMATE(zsb)); + dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); + dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); } } #endif diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index 8aa69231a..6df61fecb 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -548,7 +548,7 @@ zfs_set_slabel_policy(const char *name, char *strval, cred_t *cr) /* * If the existing dataset label is nondefault, check if the * dataset is mounted (label cannot be changed while mounted). - * Get the zfs_sb_t; if there isn't one, then the dataset isn't + * Get the zfsvfs_t; if there isn't one, then the dataset isn't * mounted (or isn't a dataset, doesn't exist, ...). */ if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) != 0) { @@ -1394,7 +1394,7 @@ put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl) } static int -get_zfs_sb(const char *dsname, zfs_sb_t **zsbp) +getzfsvfs(const char *dsname, zfsvfs_t **zfvp) { objset_t *os; int error; @@ -1408,10 +1408,10 @@ get_zfs_sb(const char *dsname, zfs_sb_t **zsbp) } mutex_enter(&os->os_user_ptr_lock); - *zsbp = dmu_objset_get_user(os); + *zfvp = dmu_objset_get_user(os); /* bump s_active only when non-zero to prevent umount race */ - if (*zsbp == NULL || (*zsbp)->z_sb == NULL || - !atomic_inc_not_zero(&((*zsbp)->z_sb->s_active))) { + if (*zfvp == NULL || (*zfvp)->z_sb == NULL || + !atomic_inc_not_zero(&((*zfvp)->z_sb->s_active))) { error = SET_ERROR(ESRCH); } mutex_exit(&os->os_user_ptr_lock); @@ -1420,28 +1420,28 @@ get_zfs_sb(const char *dsname, zfs_sb_t **zsbp) } /* - * Find a zfs_sb_t for a mounted filesystem, or create our own, in which + * Find a zfsvfs_t for a mounted filesystem, or create our own, in which * case its z_sb will be NULL, and it will be opened as the owner. * If 'writer' is set, the z_teardown_lock will be held for RW_WRITER, * which prevents all inode ops from running. */ static int -zfs_sb_hold(const char *name, void *tag, zfs_sb_t **zsbp, boolean_t writer) +zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer) { int error = 0; - if (get_zfs_sb(name, zsbp) != 0) - error = zfs_sb_create(name, NULL, zsbp); + if (getzfsvfs(name, zfvp) != 0) + error = zfsvfs_create(name, zfvp); if (error == 0) { - rrm_enter(&(*zsbp)->z_teardown_lock, (writer) ? RW_WRITER : + rrm_enter(&(*zfvp)->z_teardown_lock, (writer) ? RW_WRITER : RW_READER, tag); - if ((*zsbp)->z_unmounted) { + if ((*zfvp)->z_unmounted) { /* * XXX we could probably try again, since the unmounting * thread should be just about to disassociate the - * objset from the zsb. + * objset from the zfsvfs. */ - rrm_exit(&(*zsbp)->z_teardown_lock, tag); + rrm_exit(&(*zfvp)->z_teardown_lock, tag); return (SET_ERROR(EBUSY)); } } @@ -1449,15 +1449,15 @@ zfs_sb_hold(const char *name, void *tag, zfs_sb_t **zsbp, boolean_t writer) } static void -zfs_sb_rele(zfs_sb_t *zsb, void *tag) +zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag) { - rrm_exit(&zsb->z_teardown_lock, tag); + rrm_exit(&zfsvfs->z_teardown_lock, tag); - if (zsb->z_sb) { - deactivate_super(zsb->z_sb); + if (zfsvfs->z_sb) { + deactivate_super(zfsvfs->z_sb); } else { - dmu_objset_disown(zsb->z_os, zsb); - zfs_sb_free(zsb); + dmu_objset_disown(zfsvfs->z_os, zfsvfs); + zfsvfs_free(zfsvfs); } } @@ -2324,7 +2324,7 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair) zfs_userquota_prop_t type; uint64_t rid; uint64_t quota; - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; int err; if (nvpair_type(pair) == DATA_TYPE_NVLIST) { @@ -2349,10 +2349,10 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair) rid = valary[1]; quota = valary[2]; - err = zfs_sb_hold(dsname, FTAG, &zsb, B_FALSE); + err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE); if (err == 0) { - err = zfs_set_userquota(zsb, type, domain, rid, quota); - zfs_sb_rele(zsb, FTAG); + err = zfs_set_userquota(zfsvfs, type, domain, rid, quota); + zfsvfs_rele(zfsvfs, FTAG); } return (err); @@ -2429,13 +2429,13 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source, break; case ZFS_PROP_VERSION: { - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; - if ((err = zfs_sb_hold(dsname, FTAG, &zsb, B_TRUE)) != 0) + if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0) break; - err = zfs_set_version(zsb, intval); - zfs_sb_rele(zsb, FTAG); + err = zfs_set_version(zfsvfs, intval); + zfsvfs_rele(zfsvfs, FTAG); if (err == 0 && intval >= ZPL_VERSION_USERSPACE) { zfs_cmd_t *zc; @@ -3640,23 +3640,23 @@ zfs_ioc_destroy(zfs_cmd_t *zc) static int zfs_ioc_rollback(const char *fsname, nvlist_t *args, nvlist_t *outnvl) { - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; zvol_state_t *zv; int error; - if (get_zfs_sb(fsname, &zsb) == 0) { + if (getzfsvfs(fsname, &zfsvfs) == 0) { dsl_dataset_t *ds; - ds = dmu_objset_ds(zsb->z_os); - error = zfs_suspend_fs(zsb); + ds = dmu_objset_ds(zfsvfs->z_os); + error = zfs_suspend_fs(zfsvfs); if (error == 0) { int resume_err; - error = dsl_dataset_rollback(fsname, zsb, outnvl); - resume_err = zfs_resume_fs(zsb, ds); + error = dsl_dataset_rollback(fsname, zfsvfs, outnvl); + resume_err = zfs_resume_fs(zfsvfs, ds); error = error ? error : resume_err; } - deactivate_super(zsb->z_sb); + deactivate_super(zfsvfs->z_sb); } else if ((zv = zvol_suspend(fsname)) != NULL) { error = dsl_dataset_rollback(fsname, zvol_tag(zv), outnvl); zvol_resume(zv); @@ -4246,25 +4246,25 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin, action_handle); if (error == 0) { - zfs_sb_t *zsb = NULL; + zfsvfs_t *zfsvfs = NULL; zvol_state_t *zv = NULL; - if (get_zfs_sb(tofs, &zsb) == 0) { + if (getzfsvfs(tofs, &zfsvfs) == 0) { /* online recv */ dsl_dataset_t *ds; int end_err; - ds = dmu_objset_ds(zsb->z_os); - error = zfs_suspend_fs(zsb); + ds = dmu_objset_ds(zfsvfs->z_os); + error = zfs_suspend_fs(zfsvfs); /* * If the suspend fails, then the recv_end will * likely also fail, and clean up after itself. */ - end_err = dmu_recv_end(&drc, zsb); + end_err = dmu_recv_end(&drc, zfsvfs); if (error == 0) - error = zfs_resume_fs(zsb, ds); + error = zfs_resume_fs(zfsvfs, ds); error = error ? error : end_err; - deactivate_super(zsb->z_sb); + deactivate_super(zfsvfs->z_sb); } else if ((zv = zvol_suspend(tofs)) != NULL) { error = dmu_recv_end(&drc, zvol_tag(zv)); zvol_resume(zv); @@ -4869,19 +4869,19 @@ zfs_ioc_promote(zfs_cmd_t *zc) static int zfs_ioc_userspace_one(zfs_cmd_t *zc) { - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; int error; if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS) return (SET_ERROR(EINVAL)); - error = zfs_sb_hold(zc->zc_name, FTAG, &zsb, B_FALSE); + error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE); if (error != 0) return (error); - error = zfs_userspace_one(zsb, + error = zfs_userspace_one(zfsvfs, zc->zc_objset_type, zc->zc_value, zc->zc_guid, &zc->zc_cookie); - zfs_sb_rele(zsb, FTAG); + zfsvfs_rele(zfsvfs, FTAG); return (error); } @@ -4900,7 +4900,7 @@ zfs_ioc_userspace_one(zfs_cmd_t *zc) static int zfs_ioc_userspace_many(zfs_cmd_t *zc) { - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; int bufsize = zc->zc_nvlist_dst_size; int error; void *buf; @@ -4908,13 +4908,13 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc) if (bufsize <= 0) return (SET_ERROR(ENOMEM)); - error = zfs_sb_hold(zc->zc_name, FTAG, &zsb, B_FALSE); + error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE); if (error != 0) return (error); buf = vmem_alloc(bufsize, KM_SLEEP); - error = zfs_userspace_many(zsb, zc->zc_objset_type, &zc->zc_cookie, + error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie, buf, &zc->zc_nvlist_dst_size); if (error == 0) { @@ -4923,7 +4923,7 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc) zc->zc_nvlist_dst_size); } vmem_free(buf, bufsize); - zfs_sb_rele(zsb, FTAG); + zfsvfs_rele(zfsvfs, FTAG); return (error); } @@ -4940,10 +4940,10 @@ zfs_ioc_userspace_upgrade(zfs_cmd_t *zc) { objset_t *os; int error = 0; - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; - if (get_zfs_sb(zc->zc_name, &zsb) == 0) { - if (!dmu_objset_userused_enabled(zsb->z_os)) { + if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) { + if (!dmu_objset_userused_enabled(zfsvfs->z_os)) { /* * If userused is not enabled, it may be because the * objset needs to be closed & reopened (to grow the @@ -4951,17 +4951,17 @@ zfs_ioc_userspace_upgrade(zfs_cmd_t *zc) */ dsl_dataset_t *ds; - ds = dmu_objset_ds(zsb->z_os); - error = zfs_suspend_fs(zsb); + ds = dmu_objset_ds(zfsvfs->z_os); + error = zfs_suspend_fs(zfsvfs); if (error == 0) { - dmu_objset_refresh_ownership(zsb->z_os, - zsb); - error = zfs_resume_fs(zsb, ds); + dmu_objset_refresh_ownership(zfsvfs->z_os, + zfsvfs); + error = zfs_resume_fs(zfsvfs, ds); } } if (error == 0) - error = dmu_objset_userspace_upgrade(zsb->z_os); - deactivate_super(zsb->z_sb); + error = dmu_objset_userspace_upgrade(zfsvfs->z_os); + deactivate_super(zfsvfs->z_sb); } else { /* XXX kind of reading contents without owning */ error = dmu_objset_hold(zc->zc_name, FTAG, &os); @@ -5127,10 +5127,10 @@ zfs_smb_acl_purge(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; - zfs_sb_t *zsb = ZTOZSB(dzp); + zfsvfs_t *zfsvfs = ZTOZSB(dzp); int error; - for (zap_cursor_init(&zc, zsb->z_os, dzp->z_id); + for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { if ((error = VOP_REMOVE(ZTOV(dzp), zap.za_name, kcred, @@ -5150,7 +5150,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc) znode_t *dzp; vnode_t *resourcevp = NULL; znode_t *sharedir; - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; nvlist_t *nvlist; char *src, *target; vattr_t vattr; @@ -5171,17 +5171,17 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc) } dzp = VTOZ(vp); - zsb = ZTOZSB(dzp); - ZFS_ENTER(zsb); + zfsvfs = ZTOZSB(dzp); + ZFS_ENTER(zfsvfs); /* * Create share dir if its missing. */ - mutex_enter(&zsb->z_lock); - if (zsb->z_shares_dir == 0) { + mutex_enter(&zfsvfs->z_lock); + if (zfsvfs->z_shares_dir == 0) { dmu_tx_t *tx; - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, TRUE, ZFS_SHARES_DIR); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); @@ -5189,22 +5189,22 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc) if (error != 0) { dmu_tx_abort(tx); } else { - error = zfs_create_share_dir(zsb, tx); + error = zfs_create_share_dir(zfsvfs, tx); dmu_tx_commit(tx); } if (error != 0) { - mutex_exit(&zsb->z_lock); + mutex_exit(&zfsvfs->z_lock); VN_RELE(vp); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } } - mutex_exit(&zsb->z_lock); + mutex_exit(&zfsvfs->z_lock); - ASSERT(zsb->z_shares_dir); - if ((error = zfs_zget(zsb, zsb->z_shares_dir, &sharedir)) != 0) { + ASSERT(zfsvfs->z_shares_dir); + if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &sharedir)) != 0) { VN_RELE(vp); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -5236,7 +5236,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc) zc->zc_nvlist_src_size, zc->zc_iflags, &nvlist)) != 0) { VN_RELE(vp); VN_RELE(ZTOV(sharedir)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } if (nvlist_lookup_string(nvlist, ZFS_SMB_ACL_SRC, &src) || @@ -5244,7 +5244,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc) &target)) { VN_RELE(vp); VN_RELE(ZTOV(sharedir)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); nvlist_free(nvlist); return (error); } @@ -5265,7 +5265,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc) VN_RELE(vp); VN_RELE(ZTOV(sharedir)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); #else diff --git a/module/zfs/zfs_replay.c b/module/zfs/zfs_replay.c index 1e72745f0..30efb4b57 100644 --- a/module/zfs/zfs_replay.c +++ b/module/zfs/zfs_replay.c @@ -72,7 +72,7 @@ zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode, /* ARGSUSED */ static int -zfs_replay_error(zfs_sb_t *zsb, lr_t *lr, boolean_t byteswap) +zfs_replay_error(zfsvfs_t *zfsvfs, lr_t *lr, boolean_t byteswap) { return (SET_ERROR(ENOTSUP)); } @@ -265,7 +265,8 @@ zfs_replay_swap_attrs(lr_attr_t *lrattr) * as option FUID information. */ static int -zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap) +zfs_replay_create_acl(zfsvfs_t *zfsvfs, + lr_acl_create_t *lracl, boolean_t byteswap) { char *name = NULL; /* location determined later */ lr_create_t *lr = (lr_create_t *)lracl; @@ -303,7 +304,7 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap) } } - if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) return (error); objid = LR_FOID_GET_OBJ(lr->lr_foid); @@ -325,7 +326,7 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap) xva.xva_vattr.va_nblocks = lr->lr_gen; xva.xva_vattr.va_fsid = dnodesize; - error = dmu_object_info(zsb->z_os, lr->lr_foid, NULL); + error = dmu_object_info(zfsvfs->z_os, lr->lr_foid, NULL); if (error != ENOENT) goto bail; @@ -336,7 +337,7 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap) aclstart = (caddr_t)(lracl + 1); fuidstart = (caddr_t)aclstart + ZIL_ACE_LENGTH(lracl->lr_acl_bytes); - zsb->z_fuid_replay = zfs_replay_fuids(fuidstart, + zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt, lr->lr_uid, lr->lr_gid); /*FALLTHROUGH*/ @@ -352,10 +353,10 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap) vsec.vsa_aclcnt = lracl->lr_aclcnt; vsec.vsa_aclentsz = lracl->lr_acl_bytes; vsec.vsa_aclflags = lracl->lr_acl_flags; - if (zsb->z_fuid_replay == NULL) { + if (zfsvfs->z_fuid_replay == NULL) { fuidstart = (caddr_t)(lracl + 1) + xvatlen + ZIL_ACE_LENGTH(lracl->lr_acl_bytes); - zsb->z_fuid_replay = + zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt, lr->lr_uid, lr->lr_gid); @@ -368,7 +369,7 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap) aclstart = (caddr_t)(lracl + 1); fuidstart = (caddr_t)aclstart + ZIL_ACE_LENGTH(lracl->lr_acl_bytes); - zsb->z_fuid_replay = zfs_replay_fuids(fuidstart, + zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt, lr->lr_uid, lr->lr_gid); /*FALLTHROUGH*/ @@ -383,10 +384,10 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap) vsec.vsa_aclcnt = lracl->lr_aclcnt; vsec.vsa_aclentsz = lracl->lr_acl_bytes; vsec.vsa_aclflags = lracl->lr_acl_flags; - if (zsb->z_fuid_replay == NULL) { + if (zfsvfs->z_fuid_replay == NULL) { fuidstart = (caddr_t)(lracl + 1) + xvatlen + ZIL_ACE_LENGTH(lracl->lr_acl_bytes); - zsb->z_fuid_replay = + zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt, lr->lr_uid, lr->lr_gid); @@ -404,15 +405,15 @@ bail: iput(ZTOI(dzp)); - if (zsb->z_fuid_replay) - zfs_fuid_info_free(zsb->z_fuid_replay); - zsb->z_fuid_replay = NULL; + if (zfsvfs->z_fuid_replay) + zfs_fuid_info_free(zfsvfs->z_fuid_replay); + zfsvfs->z_fuid_replay = NULL; return (error); } static int -zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap) +zfs_replay_create(zfsvfs_t *zfsvfs, lr_create_t *lr, boolean_t byteswap) { char *name = NULL; /* location determined later */ char *link; /* symlink content follows name */ @@ -437,7 +438,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap) } - if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) return (error); objid = LR_FOID_GET_OBJ(lr->lr_foid); @@ -459,7 +460,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap) xva.xva_vattr.va_nblocks = lr->lr_gen; xva.xva_vattr.va_fsid = dnodesize; - error = dmu_object_info(zsb->z_os, objid, NULL); + error = dmu_object_info(zfsvfs->z_os, objid, NULL); if (error != ENOENT) goto out; @@ -476,7 +477,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap) (int)lr->lr_common.lrc_txtype != TX_MKDIR_ATTR && (int)lr->lr_common.lrc_txtype != TX_CREATE_ATTR) { start = (lr + 1); - zsb->z_fuid_replay = + zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start, lr->lr_uid, lr->lr_gid); } @@ -487,7 +488,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap) xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize); zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva); start = (caddr_t)(lr + 1) + xvatlen; - zsb->z_fuid_replay = + zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start, lr->lr_uid, lr->lr_gid); name = (char *)start; @@ -505,7 +506,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap) xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize); zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva); start = (caddr_t)(lr + 1) + xvatlen; - zsb->z_fuid_replay = + zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start, lr->lr_uid, lr->lr_gid); name = (char *)start; @@ -537,14 +538,14 @@ out: iput(ZTOI(dzp)); - if (zsb->z_fuid_replay) - zfs_fuid_info_free(zsb->z_fuid_replay); - zsb->z_fuid_replay = NULL; + if (zfsvfs->z_fuid_replay) + zfs_fuid_info_free(zfsvfs->z_fuid_replay); + zfsvfs->z_fuid_replay = NULL; return (error); } static int -zfs_replay_remove(zfs_sb_t *zsb, lr_remove_t *lr, boolean_t byteswap) +zfs_replay_remove(zfsvfs_t *zfsvfs, lr_remove_t *lr, boolean_t byteswap) { char *name = (char *)(lr + 1); /* name follows lr_remove_t */ znode_t *dzp; @@ -554,7 +555,7 @@ zfs_replay_remove(zfs_sb_t *zsb, lr_remove_t *lr, boolean_t byteswap) if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); - if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) return (error); if (lr->lr_common.lrc_txtype & TX_CI) @@ -577,7 +578,7 @@ zfs_replay_remove(zfs_sb_t *zsb, lr_remove_t *lr, boolean_t byteswap) } static int -zfs_replay_link(zfs_sb_t *zsb, lr_link_t *lr, boolean_t byteswap) +zfs_replay_link(zfsvfs_t *zfsvfs, lr_link_t *lr, boolean_t byteswap) { char *name = (char *)(lr + 1); /* name follows lr_link_t */ znode_t *dzp, *zp; @@ -587,10 +588,10 @@ zfs_replay_link(zfs_sb_t *zsb, lr_link_t *lr, boolean_t byteswap) if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); - if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) return (error); - if ((error = zfs_zget(zsb, lr->lr_link_obj, &zp)) != 0) { + if ((error = zfs_zget(zfsvfs, lr->lr_link_obj, &zp)) != 0) { iput(ZTOI(dzp)); return (error); } @@ -607,7 +608,7 @@ zfs_replay_link(zfs_sb_t *zsb, lr_link_t *lr, boolean_t byteswap) } static int -zfs_replay_rename(zfs_sb_t *zsb, lr_rename_t *lr, boolean_t byteswap) +zfs_replay_rename(zfsvfs_t *zfsvfs, lr_rename_t *lr, boolean_t byteswap) { char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */ char *tname = sname + strlen(sname) + 1; @@ -618,10 +619,10 @@ zfs_replay_rename(zfs_sb_t *zsb, lr_rename_t *lr, boolean_t byteswap) if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); - if ((error = zfs_zget(zsb, lr->lr_sdoid, &sdzp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_sdoid, &sdzp)) != 0) return (error); - if ((error = zfs_zget(zsb, lr->lr_tdoid, &tdzp)) != 0) { + if ((error = zfs_zget(zfsvfs, lr->lr_tdoid, &tdzp)) != 0) { iput(ZTOI(sdzp)); return (error); } @@ -638,7 +639,7 @@ zfs_replay_rename(zfs_sb_t *zsb, lr_rename_t *lr, boolean_t byteswap) } static int -zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap) +zfs_replay_write(zfsvfs_t *zfsvfs, lr_write_t *lr, boolean_t byteswap) { char *data = (char *)(lr + 1); /* data follows lr_write_t */ znode_t *zp; @@ -648,7 +649,7 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap) if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); - if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0) { + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) { /* * As we can log writes out of order, it's possible the * file has been removed. In this case just drop the write @@ -671,10 +672,10 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap) * write needs to be there. So we write the whole block and * reduce the eof. This needs to be done within the single dmu * transaction created within vn_rdwr -> zfs_write. So a possible - * new end of file is passed through in zsb->z_replay_eof + * new end of file is passed through in zfsvfs->z_replay_eof */ - zsb->z_replay_eof = 0; /* 0 means don't change end of file */ + zfsvfs->z_replay_eof = 0; /* 0 means don't change end of file */ /* If it's a dmu_sync() block, write the whole block */ if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { @@ -684,7 +685,7 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap) length = blocksize; } if (zp->z_size < eod) - zsb->z_replay_eof = eod; + zfsvfs->z_replay_eof = eod; } written = zpl_write_common(ZTOI(zp), data, length, &offset, @@ -695,7 +696,7 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap) error = SET_ERROR(EIO); /* short write */ iput(ZTOI(zp)); - zsb->z_replay_eof = 0; /* safety */ + zfsvfs->z_replay_eof = 0; /* safety */ return (error); } @@ -707,7 +708,7 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap) * the file is grown. */ static int -zfs_replay_write2(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap) +zfs_replay_write2(zfsvfs_t *zfsvfs, lr_write_t *lr, boolean_t byteswap) { znode_t *zp; int error; @@ -716,13 +717,13 @@ zfs_replay_write2(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap) if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); - if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) return (error); top: end = lr->lr_offset + lr->lr_length; if (end > zp->z_size) { - dmu_tx_t *tx = dmu_tx_create(zsb->z_os); + dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); zp->z_size = end; dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); @@ -737,11 +738,11 @@ top: dmu_tx_abort(tx); return (error); } - (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb), + (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), (void *)&zp->z_size, sizeof (uint64_t), tx); /* Ensure the replayed seq is updated */ - (void) zil_replaying(zsb->z_log, tx); + (void) zil_replaying(zfsvfs->z_log, tx); dmu_tx_commit(tx); } @@ -752,7 +753,7 @@ top: } static int -zfs_replay_truncate(zfs_sb_t *zsb, lr_truncate_t *lr, boolean_t byteswap) +zfs_replay_truncate(zfsvfs_t *zfsvfs, lr_truncate_t *lr, boolean_t byteswap) { znode_t *zp; flock64_t fl; @@ -761,7 +762,7 @@ zfs_replay_truncate(zfs_sb_t *zsb, lr_truncate_t *lr, boolean_t byteswap) if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); - if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) return (error); bzero(&fl, sizeof (fl)); @@ -779,7 +780,7 @@ zfs_replay_truncate(zfs_sb_t *zsb, lr_truncate_t *lr, boolean_t byteswap) } static int -zfs_replay_setattr(zfs_sb_t *zsb, lr_setattr_t *lr, boolean_t byteswap) +zfs_replay_setattr(zfsvfs_t *zfsvfs, lr_setattr_t *lr, boolean_t byteswap) { znode_t *zp; xvattr_t xva; @@ -792,11 +793,11 @@ zfs_replay_setattr(zfs_sb_t *zsb, lr_setattr_t *lr, boolean_t byteswap) byteswap_uint64_array(lr, sizeof (*lr)); if ((lr->lr_mask & ATTR_XVATTR) && - zsb->z_version >= ZPL_VERSION_INITIAL) + zfsvfs->z_version >= ZPL_VERSION_INITIAL) zfs_replay_swap_attrs((lr_attr_t *)(lr + 1)); } - if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) return (error); zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode, @@ -820,20 +821,20 @@ zfs_replay_setattr(zfs_sb_t *zsb, lr_setattr_t *lr, boolean_t byteswap) } else xva.xva_vattr.va_mask &= ~ATTR_XVATTR; - zsb->z_fuid_replay = zfs_replay_fuid_domain(start, &start, + zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start, lr->lr_uid, lr->lr_gid); error = zfs_setattr(ZTOI(zp), vap, 0, kcred); - zfs_fuid_info_free(zsb->z_fuid_replay); - zsb->z_fuid_replay = NULL; + zfs_fuid_info_free(zfsvfs->z_fuid_replay); + zfsvfs->z_fuid_replay = NULL; iput(ZTOI(zp)); return (error); } static int -zfs_replay_acl_v0(zfs_sb_t *zsb, lr_acl_v0_t *lr, boolean_t byteswap) +zfs_replay_acl_v0(zfsvfs_t *zfsvfs, lr_acl_v0_t *lr, boolean_t byteswap) { ace_t *ace = (ace_t *)(lr + 1); /* ace array follows lr_acl_t */ vsecattr_t vsa; @@ -845,7 +846,7 @@ zfs_replay_acl_v0(zfs_sb_t *zsb, lr_acl_v0_t *lr, boolean_t byteswap) zfs_oldace_byteswap(ace, lr->lr_aclcnt); } - if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) return (error); bzero(&vsa, sizeof (vsa)); @@ -877,7 +878,7 @@ zfs_replay_acl_v0(zfs_sb_t *zsb, lr_acl_v0_t *lr, boolean_t byteswap) * */ static int -zfs_replay_acl(zfs_sb_t *zsb, lr_acl_t *lr, boolean_t byteswap) +zfs_replay_acl(zfsvfs_t *zfsvfs, lr_acl_t *lr, boolean_t byteswap) { ace_t *ace = (ace_t *)(lr + 1); vsecattr_t vsa; @@ -894,7 +895,7 @@ zfs_replay_acl(zfs_sb_t *zsb, lr_acl_t *lr, boolean_t byteswap) } } - if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0) + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) return (error); bzero(&vsa, sizeof (vsa)); @@ -908,17 +909,17 @@ zfs_replay_acl(zfs_sb_t *zsb, lr_acl_t *lr, boolean_t byteswap) void *fuidstart = (caddr_t)ace + ZIL_ACE_LENGTH(lr->lr_acl_bytes); - zsb->z_fuid_replay = + zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, &fuidstart, lr->lr_fuidcnt, lr->lr_domcnt, 0, 0); } error = zfs_setsecattr(ZTOI(zp), &vsa, 0, kcred); - if (zsb->z_fuid_replay) - zfs_fuid_info_free(zsb->z_fuid_replay); + if (zfsvfs->z_fuid_replay) + zfs_fuid_info_free(zfsvfs->z_fuid_replay); - zsb->z_fuid_replay = NULL; + zfsvfs->z_fuid_replay = NULL; iput(ZTOI(zp)); return (error); diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c index e32786902..7d9970cb8 100644 --- a/module/zfs/zfs_sa.c +++ b/module/zfs/zfs_sa.c @@ -120,13 +120,13 @@ zfs_sa_symlink(znode_t *zp, char *link, int len, dmu_tx_t *tx) void zfs_sa_get_scanstamp(znode_t *zp, xvattr_t *xvap) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); xoptattr_t *xoap; ASSERT(MUTEX_HELD(&zp->z_lock)); VERIFY((xoap = xva_getxoptattr(xvap)) != NULL); if (zp->z_is_sa) { - if (sa_lookup(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zsb), + if (sa_lookup(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs), &xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp)) != 0) return; @@ -154,13 +154,13 @@ zfs_sa_get_scanstamp(znode_t *zp, xvattr_t *xvap) void zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); xoptattr_t *xoap; ASSERT(MUTEX_HELD(&zp->z_lock)); VERIFY((xoap = xva_getxoptattr(xvap)) != NULL); if (zp->z_is_sa) - VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zsb), + VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs), &xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp), tx)); else { @@ -177,7 +177,7 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx) xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp)); zp->z_pflags |= ZFS_BONUS_SCANSTAMP; - VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zsb), + VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs), &zp->z_pflags, sizeof (uint64_t), tx)); } } @@ -185,7 +185,7 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx) int zfs_sa_get_xattr(znode_t *zp) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); char *obj; int size; int error; @@ -194,7 +194,7 @@ zfs_sa_get_xattr(znode_t *zp) ASSERT(!zp->z_xattr_cached); ASSERT(zp->z_is_sa); - error = sa_size(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), &size); + error = sa_size(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), &size); if (error) { if (error == ENOENT) return nvlist_alloc(&zp->z_xattr_cached, @@ -205,7 +205,7 @@ zfs_sa_get_xattr(znode_t *zp) obj = vmem_alloc(size, KM_SLEEP); - error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), obj, size); + error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), obj, size); if (error == 0) error = nvlist_unpack(obj, size, &zp->z_xattr_cached, KM_SLEEP); @@ -217,7 +217,7 @@ zfs_sa_get_xattr(znode_t *zp) int zfs_sa_set_xattr(znode_t *zp) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); dmu_tx_t *tx; char *obj; size_t size; @@ -240,7 +240,7 @@ zfs_sa_set_xattr(znode_t *zp) if (error) goto out_free; - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa_create(tx, size); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); @@ -248,7 +248,7 @@ zfs_sa_set_xattr(znode_t *zp) if (error) { dmu_tx_abort(tx); } else { - VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), + VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), obj, size, tx)); dmu_tx_commit(tx); } @@ -271,7 +271,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) { dmu_buf_t *db = sa_get_db(hdl); znode_t *zp = sa_get_userdata(hdl); - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); int count = 0; sa_bulk_attr_t *bulk, *sa_attrs; zfs_acl_locator_cb_t locate = { 0 }; @@ -309,18 +309,18 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) /* First do a bulk query of the attributes that aren't cached */ bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, &atime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zsb), NULL, &crtime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL, &parent, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zsb), NULL, &xattr, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zsb), NULL, &rdev, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &uid, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &gid, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &tmp_gen, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL, &znode_acl, 88); if (sa_bulk_lookup_locked(hdl, bulk, count) != 0) { @@ -334,42 +334,43 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) */ count = 0; sa_attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zsb), NULL, &mode, 8); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL, &zp->z_size, 8); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zsb), NULL, &tmp_gen, 8); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zsb), NULL, &uid, 8); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zsb), NULL, &gid, 8); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zsb), + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zfsvfs), + NULL, &tmp_gen, 8); + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8); + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8); + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, 8); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16); links = ZTOI(zp)->i_nlink; - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8); if (S_ISBLK(ZTOI(zp)->i_mode) || S_ISCHR(ZTOI(zp)->i_mode)) - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL, &zp->z_acl_cached->z_acl_count, 8); if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID) zfs_acl_xform(zp, zp->z_acl_cached, CRED()); locate.cb_aclp = zp->z_acl_cached; - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zsb), + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zfsvfs), zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes); if (xattr) - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zsb), + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8); /* if scanstamp then add scanstamp */ @@ -377,7 +378,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) { bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE, scanstamp, AV_SCANSTAMP_SZ); - SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zsb), + SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL, scanstamp, AV_SCANSTAMP_SZ); zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP; } @@ -386,7 +387,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs, count, tx) == 0); if (znode_acl.z_acl_extern_obj) - VERIFY(0 == dmu_object_free(zsb->z_os, + VERIFY(0 == dmu_object_free(zfsvfs->z_os, znode_acl.z_acl_extern_obj, tx)); zp->z_is_sa = B_TRUE; diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c index 3135319cb..9f01d30d3 100644 --- a/module/zfs/zfs_vfsops.c +++ b/module/zfs/zfs_vfsops.c @@ -68,11 +68,205 @@ #include <sys/zpl.h> #include "zfs_comutil.h" +enum { + TOKEN_RO, + TOKEN_RW, + TOKEN_SETUID, + TOKEN_NOSETUID, + TOKEN_EXEC, + TOKEN_NOEXEC, + TOKEN_DEVICES, + TOKEN_NODEVICES, + TOKEN_DIRXATTR, + TOKEN_SAXATTR, + TOKEN_XATTR, + TOKEN_NOXATTR, + TOKEN_ATIME, + TOKEN_NOATIME, + TOKEN_RELATIME, + TOKEN_NORELATIME, + TOKEN_NBMAND, + TOKEN_NONBMAND, + TOKEN_MNTPOINT, + TOKEN_LAST, +}; + +static const match_table_t zpl_tokens = { + { TOKEN_RO, MNTOPT_RO }, + { TOKEN_RW, MNTOPT_RW }, + { TOKEN_SETUID, MNTOPT_SETUID }, + { TOKEN_NOSETUID, MNTOPT_NOSETUID }, + { TOKEN_EXEC, MNTOPT_EXEC }, + { TOKEN_NOEXEC, MNTOPT_NOEXEC }, + { TOKEN_DEVICES, MNTOPT_DEVICES }, + { TOKEN_NODEVICES, MNTOPT_NODEVICES }, + { TOKEN_DIRXATTR, MNTOPT_DIRXATTR }, + { TOKEN_SAXATTR, MNTOPT_SAXATTR }, + { TOKEN_XATTR, MNTOPT_XATTR }, + { TOKEN_NOXATTR, MNTOPT_NOXATTR }, + { TOKEN_ATIME, MNTOPT_ATIME }, + { TOKEN_NOATIME, MNTOPT_NOATIME }, + { TOKEN_RELATIME, MNTOPT_RELATIME }, + { TOKEN_NORELATIME, MNTOPT_NORELATIME }, + { TOKEN_NBMAND, MNTOPT_NBMAND }, + { TOKEN_NONBMAND, MNTOPT_NONBMAND }, + { TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" }, + { TOKEN_LAST, NULL }, +}; + +static void +zfsvfs_vfs_free(vfs_t *vfsp) +{ + if (vfsp != NULL) { + if (vfsp->vfs_mntpoint != NULL) + strfree(vfsp->vfs_mntpoint); + + kmem_free(vfsp, sizeof (vfs_t)); + } +} + +static int +zfsvfs_parse_option(char *option, int token, substring_t *args, vfs_t *vfsp) +{ + switch (token) { + case TOKEN_RO: + vfsp->vfs_readonly = B_TRUE; + vfsp->vfs_do_readonly = B_TRUE; + break; + case TOKEN_RW: + vfsp->vfs_readonly = B_FALSE; + vfsp->vfs_do_readonly = B_TRUE; + break; + case TOKEN_SETUID: + vfsp->vfs_setuid = B_TRUE; + vfsp->vfs_do_setuid = B_TRUE; + break; + case TOKEN_NOSETUID: + vfsp->vfs_setuid = B_FALSE; + vfsp->vfs_do_setuid = B_TRUE; + break; + case TOKEN_EXEC: + vfsp->vfs_exec = B_TRUE; + vfsp->vfs_do_exec = B_TRUE; + break; + case TOKEN_NOEXEC: + vfsp->vfs_exec = B_FALSE; + vfsp->vfs_do_exec = B_TRUE; + break; + case TOKEN_DEVICES: + vfsp->vfs_devices = B_TRUE; + vfsp->vfs_do_devices = B_TRUE; + break; + case TOKEN_NODEVICES: + vfsp->vfs_devices = B_FALSE; + vfsp->vfs_do_devices = B_TRUE; + break; + case TOKEN_DIRXATTR: + vfsp->vfs_xattr = ZFS_XATTR_DIR; + vfsp->vfs_do_xattr = B_TRUE; + break; + case TOKEN_SAXATTR: + vfsp->vfs_xattr = ZFS_XATTR_SA; + vfsp->vfs_do_xattr = B_TRUE; + break; + case TOKEN_XATTR: + vfsp->vfs_xattr = ZFS_XATTR_DIR; + vfsp->vfs_do_xattr = B_TRUE; + break; + case TOKEN_NOXATTR: + vfsp->vfs_xattr = ZFS_XATTR_OFF; + vfsp->vfs_do_xattr = B_TRUE; + break; + case TOKEN_ATIME: + vfsp->vfs_atime = B_TRUE; + vfsp->vfs_do_atime = B_TRUE; + break; + case TOKEN_NOATIME: + vfsp->vfs_atime = B_FALSE; + vfsp->vfs_do_atime = B_TRUE; + break; + case TOKEN_RELATIME: + vfsp->vfs_relatime = B_TRUE; + vfsp->vfs_do_relatime = B_TRUE; + break; + case TOKEN_NORELATIME: + vfsp->vfs_relatime = B_FALSE; + vfsp->vfs_do_relatime = B_TRUE; + break; + case TOKEN_NBMAND: + vfsp->vfs_nbmand = B_TRUE; + vfsp->vfs_do_nbmand = B_TRUE; + break; + case TOKEN_NONBMAND: + vfsp->vfs_nbmand = B_FALSE; + vfsp->vfs_do_nbmand = B_TRUE; + break; + case TOKEN_MNTPOINT: + vfsp->vfs_mntpoint = match_strdup(&args[0]); + if (vfsp->vfs_mntpoint == NULL) + return (SET_ERROR(ENOMEM)); + + break; + default: + break; + } + + return (0); +} + +/* + * Parse the raw mntopts and return a vfs_t describing the options. + */ +static int +zfsvfs_parse_options(char *mntopts, vfs_t **vfsp) +{ + vfs_t *tmp_vfsp; + int error; + + tmp_vfsp = kmem_zalloc(sizeof (vfs_t), KM_SLEEP); + + if (mntopts != NULL) { + substring_t args[MAX_OPT_ARGS]; + char *tmp_mntopts, *p, *t; + int token; + + tmp_mntopts = t = strdup(mntopts); + if (tmp_mntopts == NULL) + return (SET_ERROR(ENOMEM)); + + while ((p = strsep(&t, ",")) != NULL) { + if (!*p) + continue; + + args[0].to = args[0].from = NULL; + token = match_token(p, zpl_tokens, args); + error = zfsvfs_parse_option(p, token, args, tmp_vfsp); + if (error) { + strfree(tmp_mntopts); + zfsvfs_vfs_free(tmp_vfsp); + return (error); + } + } + + strfree(tmp_mntopts); + } + + *vfsp = tmp_vfsp; + + return (0); +} + +boolean_t +zfs_is_readonly(zfsvfs_t *zfsvfs) +{ + return (!!(zfsvfs->z_sb->s_flags & MS_RDONLY)); +} + /*ARGSUSED*/ int zfs_sync(struct super_block *sb, int wait, cred_t *cr) { - zfs_sb_t *zsb = sb->s_fs_info; + zfsvfs_t *zfsvfs = sb->s_fs_info; /* * Data integrity is job one. We don't want a compromised kernel @@ -88,28 +282,28 @@ zfs_sync(struct super_block *sb, int wait, cred_t *cr) if (!wait) return (0); - if (zsb != NULL) { + if (zfsvfs != NULL) { /* * Sync a specific filesystem. */ dsl_pool_t *dp; - ZFS_ENTER(zsb); - dp = dmu_objset_pool(zsb->z_os); + ZFS_ENTER(zfsvfs); + dp = dmu_objset_pool(zfsvfs->z_os); /* * If the system is shutting down, then skip any * filesystems which may exist on a suspended pool. */ if (spa_suspended(dp->dp_spa)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } - if (zsb->z_log != NULL) - zil_commit(zsb->z_log, 0); + if (zfsvfs->z_log != NULL) + zil_commit(zfsvfs->z_log, 0); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); } else { /* * Sync all ZFS filesystems. This is what happens when you @@ -121,61 +315,53 @@ zfs_sync(struct super_block *sb, int wait, cred_t *cr) return (0); } -EXPORT_SYMBOL(zfs_sync); - -boolean_t -zfs_is_readonly(zfs_sb_t *zsb) -{ - return (!!(zsb->z_sb->s_flags & MS_RDONLY)); -} -EXPORT_SYMBOL(zfs_is_readonly); static void atime_changed_cb(void *arg, uint64_t newval) { - ((zfs_sb_t *)arg)->z_atime = newval; + ((zfsvfs_t *)arg)->z_atime = newval; } static void relatime_changed_cb(void *arg, uint64_t newval) { - ((zfs_sb_t *)arg)->z_relatime = newval; + ((zfsvfs_t *)arg)->z_relatime = newval; } static void xattr_changed_cb(void *arg, uint64_t newval) { - zfs_sb_t *zsb = arg; + zfsvfs_t *zfsvfs = arg; if (newval == ZFS_XATTR_OFF) { - zsb->z_flags &= ~ZSB_XATTR; + zfsvfs->z_flags &= ~ZSB_XATTR; } else { - zsb->z_flags |= ZSB_XATTR; + zfsvfs->z_flags |= ZSB_XATTR; if (newval == ZFS_XATTR_SA) - zsb->z_xattr_sa = B_TRUE; + zfsvfs->z_xattr_sa = B_TRUE; else - zsb->z_xattr_sa = B_FALSE; + zfsvfs->z_xattr_sa = B_FALSE; } } static void acltype_changed_cb(void *arg, uint64_t newval) { - zfs_sb_t *zsb = arg; + zfsvfs_t *zfsvfs = arg; switch (newval) { case ZFS_ACLTYPE_OFF: - zsb->z_acl_type = ZFS_ACLTYPE_OFF; - zsb->z_sb->s_flags &= ~MS_POSIXACL; + zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF; + zfsvfs->z_sb->s_flags &= ~MS_POSIXACL; break; case ZFS_ACLTYPE_POSIXACL: #ifdef CONFIG_FS_POSIX_ACL - zsb->z_acl_type = ZFS_ACLTYPE_POSIXACL; - zsb->z_sb->s_flags |= MS_POSIXACL; + zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIXACL; + zfsvfs->z_sb->s_flags |= MS_POSIXACL; #else - zsb->z_acl_type = ZFS_ACLTYPE_OFF; - zsb->z_sb->s_flags &= ~MS_POSIXACL; + zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF; + zfsvfs->z_sb->s_flags &= ~MS_POSIXACL; #endif /* CONFIG_FS_POSIX_ACL */ break; default: @@ -186,19 +372,19 @@ acltype_changed_cb(void *arg, uint64_t newval) static void blksz_changed_cb(void *arg, uint64_t newval) { - zfs_sb_t *zsb = arg; - ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zsb->z_os))); + zfsvfs_t *zfsvfs = arg; + ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os))); ASSERT3U(newval, >=, SPA_MINBLOCKSIZE); ASSERT(ISP2(newval)); - zsb->z_max_blksz = newval; + zfsvfs->z_max_blksz = newval; } static void readonly_changed_cb(void *arg, uint64_t newval) { - zfs_sb_t *zsb = arg; - struct super_block *sb = zsb->z_sb; + zfsvfs_t *zfsvfs = arg; + struct super_block *sb = zfsvfs->z_sb; if (sb == NULL) return; @@ -227,8 +413,8 @@ exec_changed_cb(void *arg, uint64_t newval) static void nbmand_changed_cb(void *arg, uint64_t newval) { - zfs_sb_t *zsb = arg; - struct super_block *sb = zsb->z_sb; + zfsvfs_t *zfsvfs = arg; + struct super_block *sb = zfsvfs->z_sb; if (sb == NULL) return; @@ -242,31 +428,33 @@ nbmand_changed_cb(void *arg, uint64_t newval) static void snapdir_changed_cb(void *arg, uint64_t newval) { - ((zfs_sb_t *)arg)->z_show_ctldir = newval; + ((zfsvfs_t *)arg)->z_show_ctldir = newval; } static void vscan_changed_cb(void *arg, uint64_t newval) { - ((zfs_sb_t *)arg)->z_vscan = newval; + ((zfsvfs_t *)arg)->z_vscan = newval; } static void acl_inherit_changed_cb(void *arg, uint64_t newval) { - ((zfs_sb_t *)arg)->z_acl_inherit = newval; + ((zfsvfs_t *)arg)->z_acl_inherit = newval; } -int -zfs_register_callbacks(zfs_sb_t *zsb) +static int +zfs_register_callbacks(vfs_t *vfsp) { struct dsl_dataset *ds = NULL; - objset_t *os = zsb->z_os; - zfs_mntopts_t *zmo = zsb->z_mntopts; + objset_t *os = NULL; + zfsvfs_t *zfsvfs = NULL; int error = 0; - ASSERT(zsb); - ASSERT(zmo); + ASSERT(vfsp); + zfsvfs = vfsp->vfs_data; + ASSERT(zfsvfs); + os = zfsvfs->z_os; /* * The act of registering our callbacks will destroy any mount @@ -274,9 +462,9 @@ zfs_register_callbacks(zfs_sb_t *zsb) * of mount options, we stash away the current values and * restore them after we register the callbacks. */ - if (zfs_is_readonly(zsb) || !spa_writeable(dmu_objset_spa(os))) { - zmo->z_do_readonly = B_TRUE; - zmo->z_readonly = B_TRUE; + if (zfs_is_readonly(zfsvfs) || !spa_writeable(dmu_objset_spa(os))) { + vfsp->vfs_do_readonly = B_TRUE; + vfsp->vfs_readonly = B_TRUE; } /* @@ -289,31 +477,32 @@ zfs_register_callbacks(zfs_sb_t *zsb) ds = dmu_objset_ds(os); dsl_pool_config_enter(dmu_objset_pool(os), FTAG); error = dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb, + zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_VSCAN), vscan_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_VSCAN), vscan_changed_cb, zfsvfs); error = error ? error : dsl_prop_register(ds, - zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zsb); + zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zfsvfs); dsl_pool_config_exit(dmu_objset_pool(os), FTAG); if (error) goto unregister; @@ -321,30 +510,29 @@ zfs_register_callbacks(zfs_sb_t *zsb) /* * Invoke our callbacks to restore temporary mount options. */ - if (zmo->z_do_readonly) - readonly_changed_cb(zsb, zmo->z_readonly); - if (zmo->z_do_setuid) - setuid_changed_cb(zsb, zmo->z_setuid); - if (zmo->z_do_exec) - exec_changed_cb(zsb, zmo->z_exec); - if (zmo->z_do_devices) - devices_changed_cb(zsb, zmo->z_devices); - if (zmo->z_do_xattr) - xattr_changed_cb(zsb, zmo->z_xattr); - if (zmo->z_do_atime) - atime_changed_cb(zsb, zmo->z_atime); - if (zmo->z_do_relatime) - relatime_changed_cb(zsb, zmo->z_relatime); - if (zmo->z_do_nbmand) - nbmand_changed_cb(zsb, zmo->z_nbmand); + if (vfsp->vfs_do_readonly) + readonly_changed_cb(zfsvfs, vfsp->vfs_readonly); + if (vfsp->vfs_do_setuid) + setuid_changed_cb(zfsvfs, vfsp->vfs_setuid); + if (vfsp->vfs_do_exec) + exec_changed_cb(zfsvfs, vfsp->vfs_exec); + if (vfsp->vfs_do_devices) + devices_changed_cb(zfsvfs, vfsp->vfs_devices); + if (vfsp->vfs_do_xattr) + xattr_changed_cb(zfsvfs, vfsp->vfs_xattr); + if (vfsp->vfs_do_atime) + atime_changed_cb(zfsvfs, vfsp->vfs_atime); + if (vfsp->vfs_do_relatime) + relatime_changed_cb(zfsvfs, vfsp->vfs_relatime); + if (vfsp->vfs_do_nbmand) + nbmand_changed_cb(zfsvfs, vfsp->vfs_nbmand); return (0); unregister: - dsl_prop_unregister_all(ds, zsb); + dsl_prop_unregister_all(ds, zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_register_callbacks); static int zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, @@ -410,7 +598,7 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, } static void -fuidstr_to_sid(zfs_sb_t *zsb, const char *fuidstr, +fuidstr_to_sid(zfsvfs_t *zfsvfs, const char *fuidstr, char *domainbuf, int buflen, uid_t *ridp) { uint64_t fuid; @@ -418,7 +606,7 @@ fuidstr_to_sid(zfs_sb_t *zsb, const char *fuidstr, fuid = strtonum(fuidstr, NULL); - domain = zfs_fuid_find_by_idx(zsb, FUID_INDEX(fuid)); + domain = zfs_fuid_find_by_idx(zfsvfs, FUID_INDEX(fuid)); if (domain) (void) strlcpy(domainbuf, domain, buflen); else @@ -427,7 +615,7 @@ fuidstr_to_sid(zfs_sb_t *zsb, const char *fuidstr, } static uint64_t -zfs_userquota_prop_to_obj(zfs_sb_t *zsb, zfs_userquota_prop_t type) +zfs_userquota_prop_to_obj(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type) { switch (type) { case ZFS_PROP_USERUSED: @@ -437,20 +625,20 @@ zfs_userquota_prop_to_obj(zfs_sb_t *zsb, zfs_userquota_prop_t type) case ZFS_PROP_GROUPOBJUSED: return (DMU_GROUPUSED_OBJECT); case ZFS_PROP_USERQUOTA: - return (zsb->z_userquota_obj); + return (zfsvfs->z_userquota_obj); case ZFS_PROP_GROUPQUOTA: - return (zsb->z_groupquota_obj); + return (zfsvfs->z_groupquota_obj); case ZFS_PROP_USEROBJQUOTA: - return (zsb->z_userobjquota_obj); + return (zfsvfs->z_userobjquota_obj); case ZFS_PROP_GROUPOBJQUOTA: - return (zsb->z_groupobjquota_obj); + return (zfsvfs->z_groupobjquota_obj); default: return (ZFS_NO_OBJECT); } } int -zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, +zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type, uint64_t *cookiep, void *vbuf, uint64_t *bufsizep) { int error; @@ -460,15 +648,15 @@ zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, uint64_t obj; int offset = 0; - if (!dmu_objset_userspace_present(zsb->z_os)) + if (!dmu_objset_userspace_present(zfsvfs->z_os)) return (SET_ERROR(ENOTSUP)); if ((type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED || type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA) && - !dmu_objset_userobjspace_present(zsb->z_os)) + !dmu_objset_userobjspace_present(zfsvfs->z_os)) return (SET_ERROR(ENOTSUP)); - obj = zfs_userquota_prop_to_obj(zsb, type); + obj = zfs_userquota_prop_to_obj(zfsvfs, type); if (obj == ZFS_NO_OBJECT) { *bufsizep = 0; return (0); @@ -477,7 +665,7 @@ zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED) offset = DMU_OBJACCT_PREFIX_LEN; - for (zap_cursor_init_serialized(&zc, zsb->z_os, obj, *cookiep); + for (zap_cursor_init_serialized(&zc, zfsvfs->z_os, obj, *cookiep); (error = zap_cursor_retrieve(&zc, &za)) == 0; zap_cursor_advance(&zc)) { if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) > @@ -492,7 +680,7 @@ zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, DMU_OBJACCT_PREFIX_LEN) == 0)) continue; - fuidstr_to_sid(zsb, za.za_name + offset, + fuidstr_to_sid(zfsvfs, za.za_name + offset, buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid); buf->zu_space = za.za_first_integer; @@ -507,20 +695,19 @@ zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, zap_cursor_fini(&zc); return (error); } -EXPORT_SYMBOL(zfs_userspace_many); /* * buf must be big enough (eg, 32 bytes) */ static int -id_to_fuidstr(zfs_sb_t *zsb, const char *domain, uid_t rid, +id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid, char *buf, boolean_t addok) { uint64_t fuid; int domainid = 0; if (domain && domain[0]) { - domainid = zfs_fuid_find_by_domain(zsb, domain, NULL, addok); + domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok); if (domainid == -1) return (SET_ERROR(ENOENT)); } @@ -530,7 +717,7 @@ id_to_fuidstr(zfs_sb_t *zsb, const char *domain, uid_t rid, } int -zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type, +zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type, const char *domain, uint64_t rid, uint64_t *valp) { char buf[20 + DMU_OBJACCT_PREFIX_LEN]; @@ -540,15 +727,15 @@ zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type, *valp = 0; - if (!dmu_objset_userspace_present(zsb->z_os)) + if (!dmu_objset_userspace_present(zfsvfs->z_os)) return (SET_ERROR(ENOTSUP)); if ((type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED || type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA) && - !dmu_objset_userobjspace_present(zsb->z_os)) + !dmu_objset_userobjspace_present(zfsvfs->z_os)) return (SET_ERROR(ENOTSUP)); - obj = zfs_userquota_prop_to_obj(zsb, type); + obj = zfs_userquota_prop_to_obj(zfsvfs, type); if (obj == ZFS_NO_OBJECT) return (0); @@ -557,19 +744,18 @@ zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type, offset = DMU_OBJACCT_PREFIX_LEN; } - err = id_to_fuidstr(zsb, domain, rid, buf + offset, B_FALSE); + err = id_to_fuidstr(zfsvfs, domain, rid, buf + offset, B_FALSE); if (err) return (err); - err = zap_lookup(zsb->z_os, obj, buf, 8, 1, valp); + err = zap_lookup(zfsvfs->z_os, obj, buf, 8, 1, valp); if (err == ENOENT) err = 0; return (err); } -EXPORT_SYMBOL(zfs_userspace_one); int -zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type, +zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type, const char *domain, uint64_t rid, uint64_t quota) { char buf[32]; @@ -578,229 +764,207 @@ zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type, uint64_t *objp; boolean_t fuid_dirtied; - if (zsb->z_version < ZPL_VERSION_USERSPACE) + if (zfsvfs->z_version < ZPL_VERSION_USERSPACE) return (SET_ERROR(ENOTSUP)); switch (type) { case ZFS_PROP_USERQUOTA: - objp = &zsb->z_userquota_obj; + objp = &zfsvfs->z_userquota_obj; break; case ZFS_PROP_GROUPQUOTA: - objp = &zsb->z_groupquota_obj; + objp = &zfsvfs->z_groupquota_obj; break; case ZFS_PROP_USEROBJQUOTA: - objp = &zsb->z_userobjquota_obj; + objp = &zfsvfs->z_userobjquota_obj; break; case ZFS_PROP_GROUPOBJQUOTA: - objp = &zsb->z_groupobjquota_obj; + objp = &zfsvfs->z_groupobjquota_obj; break; default: return (SET_ERROR(EINVAL)); } - err = id_to_fuidstr(zsb, domain, rid, buf, B_TRUE); + err = id_to_fuidstr(zfsvfs, domain, rid, buf, B_TRUE); if (err) return (err); - fuid_dirtied = zsb->z_fuid_dirty; + fuid_dirtied = zfsvfs->z_fuid_dirty; - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_zap(tx, *objp ? *objp : DMU_NEW_OBJECT, B_TRUE, NULL); if (*objp == 0) { dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE, zfs_userquota_prop_prefixes[type]); } if (fuid_dirtied) - zfs_fuid_txhold(zsb, tx); + zfs_fuid_txhold(zfsvfs, tx); err = dmu_tx_assign(tx, TXG_WAIT); if (err) { dmu_tx_abort(tx); return (err); } - mutex_enter(&zsb->z_lock); + mutex_enter(&zfsvfs->z_lock); if (*objp == 0) { - *objp = zap_create(zsb->z_os, DMU_OT_USERGROUP_QUOTA, + *objp = zap_create(zfsvfs->z_os, DMU_OT_USERGROUP_QUOTA, DMU_OT_NONE, 0, tx); - VERIFY(0 == zap_add(zsb->z_os, MASTER_NODE_OBJ, + VERIFY(0 == zap_add(zfsvfs->z_os, MASTER_NODE_OBJ, zfs_userquota_prop_prefixes[type], 8, 1, objp, tx)); } - mutex_exit(&zsb->z_lock); + mutex_exit(&zfsvfs->z_lock); if (quota == 0) { - err = zap_remove(zsb->z_os, *objp, buf, tx); + err = zap_remove(zfsvfs->z_os, *objp, buf, tx); if (err == ENOENT) err = 0; } else { - err = zap_update(zsb->z_os, *objp, buf, 8, 1, "a, tx); + err = zap_update(zfsvfs->z_os, *objp, buf, 8, 1, "a, tx); } ASSERT(err == 0); if (fuid_dirtied) - zfs_fuid_sync(zsb, tx); + zfs_fuid_sync(zfsvfs, tx); dmu_tx_commit(tx); return (err); } -EXPORT_SYMBOL(zfs_set_userquota); boolean_t -zfs_fuid_overobjquota(zfs_sb_t *zsb, boolean_t isgroup, uint64_t fuid) +zfs_fuid_overobjquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid) { char buf[20 + DMU_OBJACCT_PREFIX_LEN]; uint64_t used, quota, usedobj, quotaobj; int err; - if (!dmu_objset_userobjspace_present(zsb->z_os)) { - if (dmu_objset_userobjspace_upgradable(zsb->z_os)) - dmu_objset_userobjspace_upgrade(zsb->z_os); + if (!dmu_objset_userobjspace_present(zfsvfs->z_os)) { + if (dmu_objset_userobjspace_upgradable(zfsvfs->z_os)) + dmu_objset_userobjspace_upgrade(zfsvfs->z_os); return (B_FALSE); } usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT; - quotaobj = isgroup ? zsb->z_groupobjquota_obj : zsb->z_userobjquota_obj; - if (quotaobj == 0 || zsb->z_replay) + quotaobj = isgroup ? zfsvfs->z_groupobjquota_obj : + zfsvfs->z_userobjquota_obj; + if (quotaobj == 0 || zfsvfs->z_replay) return (B_FALSE); (void) sprintf(buf, "%llx", (longlong_t)fuid); - err = zap_lookup(zsb->z_os, quotaobj, buf, 8, 1, "a); + err = zap_lookup(zfsvfs->z_os, quotaobj, buf, 8, 1, "a); if (err != 0) return (B_FALSE); (void) sprintf(buf, DMU_OBJACCT_PREFIX "%llx", (longlong_t)fuid); - err = zap_lookup(zsb->z_os, usedobj, buf, 8, 1, &used); + err = zap_lookup(zfsvfs->z_os, usedobj, buf, 8, 1, &used); if (err != 0) return (B_FALSE); return (used >= quota); } boolean_t -zfs_fuid_overquota(zfs_sb_t *zsb, boolean_t isgroup, uint64_t fuid) +zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid) { char buf[20]; uint64_t used, quota, usedobj, quotaobj; int err; usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT; - quotaobj = isgroup ? zsb->z_groupquota_obj : zsb->z_userquota_obj; + quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj; - if (quotaobj == 0 || zsb->z_replay) + if (quotaobj == 0 || zfsvfs->z_replay) return (B_FALSE); (void) sprintf(buf, "%llx", (longlong_t)fuid); - err = zap_lookup(zsb->z_os, quotaobj, buf, 8, 1, "a); + err = zap_lookup(zfsvfs->z_os, quotaobj, buf, 8, 1, "a); if (err != 0) return (B_FALSE); - err = zap_lookup(zsb->z_os, usedobj, buf, 8, 1, &used); + err = zap_lookup(zfsvfs->z_os, usedobj, buf, 8, 1, &used); if (err != 0) return (B_FALSE); return (used >= quota); } -EXPORT_SYMBOL(zfs_fuid_overquota); boolean_t -zfs_owner_overquota(zfs_sb_t *zsb, znode_t *zp, boolean_t isgroup) +zfs_owner_overquota(zfsvfs_t *zfsvfs, znode_t *zp, boolean_t isgroup) { uint64_t fuid; uint64_t quotaobj; struct inode *ip = ZTOI(zp); - quotaobj = isgroup ? zsb->z_groupquota_obj : zsb->z_userquota_obj; + quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj; fuid = isgroup ? KGID_TO_SGID(ip->i_gid) : KUID_TO_SUID(ip->i_uid); - if (quotaobj == 0 || zsb->z_replay) + if (quotaobj == 0 || zfsvfs->z_replay) return (B_FALSE); - return (zfs_fuid_overquota(zsb, isgroup, fuid)); -} -EXPORT_SYMBOL(zfs_owner_overquota); - -zfs_mntopts_t * -zfs_mntopts_alloc(void) -{ - return (kmem_zalloc(sizeof (zfs_mntopts_t), KM_SLEEP)); -} - -void -zfs_mntopts_free(zfs_mntopts_t *zmo) -{ - if (zmo->z_osname) - strfree(zmo->z_osname); - - if (zmo->z_mntpoint) - strfree(zmo->z_mntpoint); - - kmem_free(zmo, sizeof (zfs_mntopts_t)); + return (zfs_fuid_overquota(zfsvfs, isgroup, fuid)); } int -zfs_sb_create(const char *osname, zfs_mntopts_t *zmo, zfs_sb_t **zsbp) +zfsvfs_create(const char *osname, zfsvfs_t **zfvp) { objset_t *os; - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; uint64_t zval; int i, size, error; uint64_t sa_obj; - zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP); - - /* - * Optional temporary mount options, free'd in zfs_sb_free(). - */ - zsb->z_mntopts = (zmo ? zmo : zfs_mntopts_alloc()); + zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP); /* * We claim to always be readonly so we can open snapshots; * other ZPL code will prevent us from writing to snapshots. */ - error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os); - if (error) - goto out_zmo; + error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zfsvfs, &os); + if (error) { + kmem_free(zfsvfs, sizeof (zfsvfs_t)); + return (error); + } /* * Initialize the zfs-specific filesystem structure. * Should probably make this a kmem cache, shuffle fields. */ - zsb->z_sb = NULL; - zsb->z_parent = zsb; - zsb->z_max_blksz = SPA_OLD_MAXBLOCKSIZE; - zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE; - zsb->z_os = os; - - error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version); + zfsvfs->z_vfs = NULL; + zfsvfs->z_sb = NULL; + zfsvfs->z_parent = zfsvfs; + zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE; + zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE; + zfsvfs->z_os = os; + + error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version); if (error) { goto out; - } else if (zsb->z_version > ZPL_VERSION) { + } else if (zfsvfs->z_version > ZPL_VERSION) { error = SET_ERROR(ENOTSUP); goto out; } if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0) goto out; - zsb->z_norm = (int)zval; + zfsvfs->z_norm = (int)zval; if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0) goto out; - zsb->z_utf8 = (zval != 0); + zfsvfs->z_utf8 = (zval != 0); if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0) goto out; - zsb->z_case = (uint_t)zval; + zfsvfs->z_case = (uint_t)zval; if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &zval)) != 0) goto out; - zsb->z_acl_type = (uint_t)zval; + zfsvfs->z_acl_type = (uint_t)zval; /* * Fold case on file systems that are always or sometimes case * insensitive. */ - if (zsb->z_case == ZFS_CASE_INSENSITIVE || - zsb->z_case == ZFS_CASE_MIXED) - zsb->z_norm |= U8_TEXTPREP_TOUPPER; + if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE || + zfsvfs->z_case == ZFS_CASE_MIXED) + zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER; - zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os); - zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os); + zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os); + zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os); - if (zsb->z_use_sa) { + if (zfsvfs->z_use_sa) { /* should either have both of these objects or none */ error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); @@ -809,7 +973,7 @@ zfs_sb_create(const char *osname, zfs_mntopts_t *zmo, zfs_sb_t **zsbp) error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval); if ((error == 0) && (zval == ZFS_XATTR_SA)) - zsb->z_xattr_sa = B_TRUE; + zfsvfs->z_xattr_sa = B_TRUE; } else { /* * Pre SA versions file systems should never touch @@ -819,99 +983,97 @@ zfs_sb_create(const char *osname, zfs_mntopts_t *zmo, zfs_sb_t **zsbp) } error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END, - &zsb->z_attr_table); + &zfsvfs->z_attr_table); if (error) goto out; - if (zsb->z_version >= ZPL_VERSION_SA) + if (zfsvfs->z_version >= ZPL_VERSION_SA) sa_register_update_callback(os, zfs_sa_upgrade); error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, - &zsb->z_root); + &zfsvfs->z_root); if (error) goto out; - ASSERT(zsb->z_root != 0); + ASSERT(zfsvfs->z_root != 0); error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1, - &zsb->z_unlinkedobj); + &zfsvfs->z_unlinkedobj); if (error) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA], - 8, 1, &zsb->z_userquota_obj); + 8, 1, &zfsvfs->z_userquota_obj); if (error && error != ENOENT) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA], - 8, 1, &zsb->z_groupquota_obj); + 8, 1, &zfsvfs->z_groupquota_obj); if (error && error != ENOENT) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA], - 8, 1, &zsb->z_userobjquota_obj); + 8, 1, &zfsvfs->z_userobjquota_obj); if (error && error != ENOENT) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA], - 8, 1, &zsb->z_groupobjquota_obj); + 8, 1, &zfsvfs->z_groupobjquota_obj); if (error && error != ENOENT) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1, - &zsb->z_fuid_obj); + &zfsvfs->z_fuid_obj); if (error && error != ENOENT) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1, - &zsb->z_shares_dir); + &zfsvfs->z_shares_dir); if (error && error != ENOENT) goto out; - mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL); - list_create(&zsb->z_all_znodes, sizeof (znode_t), + mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL); + list_create(&zfsvfs->z_all_znodes, sizeof (znode_t), offsetof(znode_t, z_link_node)); - rrm_init(&zsb->z_teardown_lock, B_FALSE); - rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL); - rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL); + rrm_init(&zfsvfs->z_teardown_lock, B_FALSE); + rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL); + rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL); size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX); - zsb->z_hold_size = size; - zsb->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size, KM_SLEEP); - zsb->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP); + zfsvfs->z_hold_size = size; + zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size, + KM_SLEEP); + zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP); for (i = 0; i != size; i++) { - avl_create(&zsb->z_hold_trees[i], zfs_znode_hold_compare, + avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare, sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node)); - mutex_init(&zsb->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL); + mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL); } - *zsbp = zsb; + *zfvp = zfsvfs; return (0); out: - dmu_objset_disown(os, zsb); -out_zmo: - *zsbp = NULL; - zfs_mntopts_free(zsb->z_mntopts); - kmem_free(zsb, sizeof (zfs_sb_t)); + dmu_objset_disown(os, zfsvfs); + *zfvp = NULL; + kmem_free(zfsvfs, sizeof (zfsvfs_t)); return (error); } -EXPORT_SYMBOL(zfs_sb_create); int -zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting) +zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting) { int error; - error = zfs_register_callbacks(zsb); + error = zfs_register_callbacks(zfsvfs->z_vfs); if (error) return (error); - zsb->z_log = zil_open(zsb->z_os, zfs_get_data); + zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data); /* * If we are not mounting (ie: online recv), then we don't @@ -925,11 +1087,11 @@ zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting) * During replay we remove the read only flag to * allow replays to succeed. */ - readonly = zfs_is_readonly(zsb); + readonly = zfs_is_readonly(zfsvfs); if (readonly != 0) - readonly_changed_cb(zsb, B_FALSE); + readonly_changed_cb(zfsvfs, B_FALSE); else - zfs_unlinked_drain(zsb); + zfs_unlinked_drain(zfsvfs); /* * Parse and replay the intent log. @@ -958,73 +1120,70 @@ zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting) * allocated and in the unlinked set, and there is an * intent log record saying to allocate it. */ - if (spa_writeable(dmu_objset_spa(zsb->z_os))) { + if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) { if (zil_replay_disable) { - zil_destroy(zsb->z_log, B_FALSE); + zil_destroy(zfsvfs->z_log, B_FALSE); } else { - zsb->z_replay = B_TRUE; - zil_replay(zsb->z_os, zsb, + zfsvfs->z_replay = B_TRUE; + zil_replay(zfsvfs->z_os, zfsvfs, zfs_replay_vector); - zsb->z_replay = B_FALSE; + zfsvfs->z_replay = B_FALSE; } } /* restore readonly bit */ if (readonly != 0) - readonly_changed_cb(zsb, B_TRUE); + readonly_changed_cb(zfsvfs, B_TRUE); } /* - * Set the objset user_ptr to track its zsb. + * Set the objset user_ptr to track its zfsvfs. */ - mutex_enter(&zsb->z_os->os_user_ptr_lock); - dmu_objset_set_user(zsb->z_os, zsb); - mutex_exit(&zsb->z_os->os_user_ptr_lock); + mutex_enter(&zfsvfs->z_os->os_user_ptr_lock); + dmu_objset_set_user(zfsvfs->z_os, zfsvfs); + mutex_exit(&zfsvfs->z_os->os_user_ptr_lock); return (0); } -EXPORT_SYMBOL(zfs_sb_setup); void -zfs_sb_free(zfs_sb_t *zsb) +zfsvfs_free(zfsvfs_t *zfsvfs) { - int i, size = zsb->z_hold_size; + int i, size = zfsvfs->z_hold_size; - zfs_fuid_destroy(zsb); + zfs_fuid_destroy(zfsvfs); - mutex_destroy(&zsb->z_znodes_lock); - mutex_destroy(&zsb->z_lock); - list_destroy(&zsb->z_all_znodes); - rrm_destroy(&zsb->z_teardown_lock); - rw_destroy(&zsb->z_teardown_inactive_lock); - rw_destroy(&zsb->z_fuid_lock); + mutex_destroy(&zfsvfs->z_znodes_lock); + mutex_destroy(&zfsvfs->z_lock); + list_destroy(&zfsvfs->z_all_znodes); + rrm_destroy(&zfsvfs->z_teardown_lock); + rw_destroy(&zfsvfs->z_teardown_inactive_lock); + rw_destroy(&zfsvfs->z_fuid_lock); for (i = 0; i != size; i++) { - avl_destroy(&zsb->z_hold_trees[i]); - mutex_destroy(&zsb->z_hold_locks[i]); + avl_destroy(&zfsvfs->z_hold_trees[i]); + mutex_destroy(&zfsvfs->z_hold_locks[i]); } - vmem_free(zsb->z_hold_trees, sizeof (avl_tree_t) * size); - vmem_free(zsb->z_hold_locks, sizeof (kmutex_t) * size); - zfs_mntopts_free(zsb->z_mntopts); - kmem_free(zsb, sizeof (zfs_sb_t)); + vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size); + vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size); + zfsvfs_vfs_free(zfsvfs->z_vfs); + kmem_free(zfsvfs, sizeof (zfsvfs_t)); } -EXPORT_SYMBOL(zfs_sb_free); static void -zfs_set_fuid_feature(zfs_sb_t *zsb) +zfs_set_fuid_feature(zfsvfs_t *zfsvfs) { - zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os); - zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os); + zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os); + zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os); } void -zfs_unregister_callbacks(zfs_sb_t *zsb) +zfs_unregister_callbacks(zfsvfs_t *zfsvfs) { - objset_t *os = zsb->z_os; + objset_t *os = zfsvfs->z_os; if (!dmu_objset_is_snapshot(os)) - dsl_prop_unregister_all(dmu_objset_ds(os), zsb); + dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs); } -EXPORT_SYMBOL(zfs_unregister_callbacks); #ifdef HAVE_MLSLABEL /* @@ -1053,23 +1212,22 @@ zfs_check_global_label(const char *dsname, const char *hexsl) } return (SET_ERROR(EACCES)); } -EXPORT_SYMBOL(zfs_check_global_label); #endif /* HAVE_MLSLABEL */ int zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) { - zfs_sb_t *zsb = dentry->d_sb->s_fs_info; + zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info; uint64_t refdbytes, availbytes, usedobjs, availobjs; uint64_t fsid; uint32_t bshift; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); - dmu_objset_space(zsb->z_os, + dmu_objset_space(zfsvfs->z_os, &refdbytes, &availbytes, &usedobjs, &availobjs); - fsid = dmu_objset_fsid_guid(zsb->z_os); + fsid = dmu_objset_fsid_guid(zfsvfs->z_os); /* * The underlying storage pool actually uses multiple block * size. Under Solaris frsize (fragment size) is reported as @@ -1079,8 +1237,8 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) * interchangeably. Thus we are forced to report both of them * as the filesystem's maximum block size. */ - statp->f_frsize = zsb->z_max_blksz; - statp->f_bsize = zsb->z_max_blksz; + statp->f_frsize = zfsvfs->z_max_blksz; + statp->f_bsize = zfsvfs->z_max_blksz; bshift = fls(statp->f_bsize) - 1; /* @@ -1114,27 +1272,25 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) */ bzero(statp->f_spare, sizeof (statp->f_spare)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_statvfs); int -zfs_root(zfs_sb_t *zsb, struct inode **ipp) +zfs_root(zfsvfs_t *zfsvfs, struct inode **ipp) { znode_t *rootzp; int error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); - error = zfs_zget(zsb, zsb->z_root, &rootzp); + error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp); if (error == 0) *ipp = ZTOI(rootzp); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_root); #ifdef HAVE_D_PRUNE_ALIASES /* @@ -1147,7 +1303,7 @@ EXPORT_SYMBOL(zfs_root); * end of the list so we're always scanning the oldest znodes first. */ static int -zfs_sb_prune_aliases(zfs_sb_t *zsb, unsigned long nr_to_scan) +zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan) { znode_t **zp_array, *zp; int max_array = MIN(nr_to_scan, PAGE_SIZE * 8 / sizeof (znode_t *)); @@ -1156,15 +1312,15 @@ zfs_sb_prune_aliases(zfs_sb_t *zsb, unsigned long nr_to_scan) zp_array = kmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP); - mutex_enter(&zsb->z_znodes_lock); - while ((zp = list_head(&zsb->z_all_znodes)) != NULL) { + mutex_enter(&zfsvfs->z_znodes_lock); + while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) { if ((i++ > nr_to_scan) || (j >= max_array)) break; ASSERT(list_link_active(&zp->z_link_node)); - list_remove(&zsb->z_all_znodes, zp); - list_insert_tail(&zsb->z_all_znodes, zp); + list_remove(&zfsvfs->z_all_znodes, zp); + list_insert_tail(&zfsvfs->z_all_znodes, zp); /* Skip active znodes and .zfs entries */ if (MUTEX_HELD(&zp->z_lock) || zp->z_is_ctldir) @@ -1176,7 +1332,7 @@ zfs_sb_prune_aliases(zfs_sb_t *zsb, unsigned long nr_to_scan) zp_array[j] = zp; j++; } - mutex_exit(&zsb->z_znodes_lock); + mutex_exit(&zfsvfs->z_znodes_lock); for (i = 0; i < j; i++) { zp = zp_array[i]; @@ -1202,9 +1358,9 @@ zfs_sb_prune_aliases(zfs_sb_t *zsb, unsigned long nr_to_scan) * blocks but can't because they are all pinned by entries in these caches. */ int -zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) +zfs_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) { - zfs_sb_t *zsb = sb->s_fs_info; + zfsvfs_t *zfsvfs = sb->s_fs_info; int error = 0; #if defined(HAVE_SHRINK) || defined(HAVE_SPLIT_SHRINKER_CALLBACK) struct shrinker *shrinker = &sb->s_shrink; @@ -1214,7 +1370,7 @@ zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) }; #endif - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); #if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \ defined(SHRINK_CONTROL_HAS_NID) && \ @@ -1234,7 +1390,7 @@ zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) *objects = (*shrinker->shrink)(shrinker, &sc); #elif defined(HAVE_D_PRUNE_ALIASES) #define D_PRUNE_ALIASES_IS_DEFAULT - *objects = zfs_sb_prune_aliases(zsb, nr_to_scan); + *objects = zfs_prune_aliases(zfsvfs, nr_to_scan); #else #error "No available dentry and inode cache pruning mechanism." #endif @@ -1242,41 +1398,40 @@ zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) #if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT) #undef D_PRUNE_ALIASES_IS_DEFAULT /* - * Fall back to zfs_sb_prune_aliases if the kernel's per-superblock + * Fall back to zfs_prune_aliases if the kernel's per-superblock * shrinker couldn't free anything, possibly due to the inodes being * allocated in a different memcg. */ if (*objects == 0) - *objects = zfs_sb_prune_aliases(zsb, nr_to_scan); + *objects = zfs_prune_aliases(zfsvfs, nr_to_scan); #endif - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); - dprintf_ds(zsb->z_os->os_dsl_dataset, + dprintf_ds(zfsvfs->z_os->os_dsl_dataset, "pruning, nr_to_scan=%lu objects=%d error=%d\n", nr_to_scan, *objects, error); return (error); } -EXPORT_SYMBOL(zfs_sb_prune); /* - * Teardown the zfs_sb_t. + * Teardown the zfsvfs_t. * * Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock' * and 'z_teardown_inactive_lock' held. */ -int -zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) +static int +zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting) { znode_t *zp; /* * If someone has not already unmounted this file system, * drain the iput_taskq to ensure all active references to the - * zfs_sb_t have been handled only then can it be safely destroyed. + * zfsvfs_t have been handled only then can it be safely destroyed. */ - if (zsb->z_os) { + if (zfsvfs->z_os) { /* * If we're unmounting we have to wait for the list to * drain completely. @@ -1291,15 +1446,15 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) * z_all_znodes list and thus increment z_nr_znodes. */ int round = 0; - while (zsb->z_nr_znodes > 0) { + while (zfsvfs->z_nr_znodes > 0) { taskq_wait_outstanding(dsl_pool_iput_taskq( - dmu_objset_pool(zsb->z_os)), 0); + dmu_objset_pool(zfsvfs->z_os)), 0); if (++round > 1 && !unmounting) break; } } - rrm_enter(&zsb->z_teardown_lock, RW_WRITER, FTAG); + rrm_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG); if (!unmounting) { /* @@ -1309,28 +1464,28 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) * super block. Note, 'z_parent' is self referential * for non-snapshots. */ - shrink_dcache_sb(zsb->z_parent->z_sb); + shrink_dcache_sb(zfsvfs->z_parent->z_sb); } /* * Close the zil. NB: Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ - if (zsb->z_log) { - zil_close(zsb->z_log); - zsb->z_log = NULL; + if (zfsvfs->z_log) { + zil_close(zfsvfs->z_log); + zfsvfs->z_log = NULL; } - rw_enter(&zsb->z_teardown_inactive_lock, RW_WRITER); + rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER); /* * If we are not unmounting (ie: online recv) and someone already * unmounted this file system while we were doing the switcheroo, * or a reopen of z_os failed then just bail out now. */ - if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) { - rw_exit(&zsb->z_teardown_inactive_lock); - rrm_exit(&zsb->z_teardown_lock, FTAG); + if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) { + rw_exit(&zfsvfs->z_teardown_inactive_lock); + rrm_exit(&zfsvfs->z_teardown_lock, FTAG); return (SET_ERROR(EIO)); } @@ -1342,13 +1497,13 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) * Release all holds on dbufs. */ if (!unmounting) { - mutex_enter(&zsb->z_znodes_lock); - for (zp = list_head(&zsb->z_all_znodes); zp != NULL; - zp = list_next(&zsb->z_all_znodes, zp)) { + mutex_enter(&zfsvfs->z_znodes_lock); + for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL; + zp = list_next(&zfsvfs->z_all_znodes, zp)) { if (zp->z_sa_hdl) zfs_znode_dmu_fini(zp); } - mutex_exit(&zsb->z_znodes_lock); + mutex_exit(&zfsvfs->z_znodes_lock); } /* @@ -1357,36 +1512,35 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) * other VFS ops will fail with EIO. */ if (unmounting) { - zsb->z_unmounted = B_TRUE; - rw_exit(&zsb->z_teardown_inactive_lock); - rrm_exit(&zsb->z_teardown_lock, FTAG); + zfsvfs->z_unmounted = B_TRUE; + rw_exit(&zfsvfs->z_teardown_inactive_lock); + rrm_exit(&zfsvfs->z_teardown_lock, FTAG); } /* * z_os will be NULL if there was an error in attempting to reopen - * zsb, so just return as the properties had already been + * zfsvfs, so just return as the properties had already been * * unregistered and cached data had been evicted before. */ - if (zsb->z_os == NULL) + if (zfsvfs->z_os == NULL) return (0); /* * Unregister properties. */ - zfs_unregister_callbacks(zsb); + zfs_unregister_callbacks(zfsvfs); /* * Evict cached data */ - if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) && - !zfs_is_readonly(zsb)) - txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); - dmu_objset_evict_dbufs(zsb->z_os); + if (dsl_dataset_is_dirty(dmu_objset_ds(zfsvfs->z_os)) && + !zfs_is_readonly(zfsvfs)) + txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); + dmu_objset_evict_dbufs(zfsvfs->z_os); return (0); } -EXPORT_SYMBOL(zfs_sb_teardown); #if !defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER) && \ !defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER) @@ -1394,33 +1548,41 @@ atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0); #endif int -zfs_domount(struct super_block *sb, zfs_mntopts_t *zmo, int silent) +zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent) { - const char *osname = zmo->z_osname; - zfs_sb_t *zsb; + const char *osname = zm->mnt_osname; struct inode *root_inode; uint64_t recordsize; - int error; + int error = 0; + zfsvfs_t *zfsvfs; - error = zfs_sb_create(osname, zmo, &zsb); + ASSERT(zm); + ASSERT(osname); + + error = zfsvfs_create(osname, &zfsvfs); if (error) return (error); + error = zfsvfs_parse_options(zm->mnt_data, &zfsvfs->z_vfs); + if (error) + goto out; + if ((error = dsl_prop_get_integer(osname, "recordsize", &recordsize, NULL))) goto out; - zsb->z_sb = sb; - sb->s_fs_info = zsb; + zfsvfs->z_vfs->vfs_data = zfsvfs; + zfsvfs->z_sb = sb; + sb->s_fs_info = zfsvfs; sb->s_magic = ZFS_SUPER_MAGIC; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_time_gran = 1; sb->s_blocksize = recordsize; sb->s_blocksize_bits = ilog2(recordsize); - zsb->z_bdi.ra_pages = 0; - sb->s_bdi = &zsb->z_bdi; + zfsvfs->z_bdi.ra_pages = 0; + sb->s_bdi = &zfsvfs->z_bdi; - error = -zpl_bdi_setup_and_register(&zsb->z_bdi, "zfs"); + error = -zpl_bdi_setup_and_register(&zfsvfs->z_bdi, "zfs"); if (error) goto out; @@ -1433,35 +1595,35 @@ zfs_domount(struct super_block *sb, zfs_mntopts_t *zmo, int silent) #endif /* HAVE_S_D_OP */ /* Set features for file system. */ - zfs_set_fuid_feature(zsb); + zfs_set_fuid_feature(zfsvfs); - if (dmu_objset_is_snapshot(zsb->z_os)) { + if (dmu_objset_is_snapshot(zfsvfs->z_os)) { uint64_t pval; - atime_changed_cb(zsb, B_FALSE); - readonly_changed_cb(zsb, B_TRUE); + atime_changed_cb(zfsvfs, B_FALSE); + readonly_changed_cb(zfsvfs, B_TRUE); if ((error = dsl_prop_get_integer(osname, "xattr", &pval, NULL))) goto out; - xattr_changed_cb(zsb, pval); + xattr_changed_cb(zfsvfs, pval); if ((error = dsl_prop_get_integer(osname, "acltype", &pval, NULL))) goto out; - acltype_changed_cb(zsb, pval); - zsb->z_issnap = B_TRUE; - zsb->z_os->os_sync = ZFS_SYNC_DISABLED; - zsb->z_snap_defer_time = jiffies; - - mutex_enter(&zsb->z_os->os_user_ptr_lock); - dmu_objset_set_user(zsb->z_os, zsb); - mutex_exit(&zsb->z_os->os_user_ptr_lock); + acltype_changed_cb(zfsvfs, pval); + zfsvfs->z_issnap = B_TRUE; + zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED; + zfsvfs->z_snap_defer_time = jiffies; + + mutex_enter(&zfsvfs->z_os->os_user_ptr_lock); + dmu_objset_set_user(zfsvfs->z_os, zfsvfs); + mutex_exit(&zfsvfs->z_os->os_user_ptr_lock); } else { - if ((error = zfs_sb_setup(zsb, B_TRUE))) + if ((error = zfsvfs_setup(zfsvfs, B_TRUE))) goto out; } /* Allocate a root inode for the filesystem. */ - error = zfs_root(zsb, &root_inode); + error = zfs_root(zfsvfs, &root_inode); if (error) { (void) zfs_umount(sb); goto out; @@ -1475,14 +1637,14 @@ zfs_domount(struct super_block *sb, zfs_mntopts_t *zmo, int silent) goto out; } - if (!zsb->z_issnap) - zfsctl_create(zsb); + if (!zfsvfs->z_issnap) + zfsctl_create(zfsvfs); - zsb->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb); + zfsvfs->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb); out: if (error) { - dmu_objset_disown(zsb->z_os, zsb); - zfs_sb_free(zsb); + dmu_objset_disown(zfsvfs->z_os, zfsvfs); + zfsvfs_free(zfsvfs); /* * make sure we don't have dangling sb->s_fs_info which * zfs_preumount will use. @@ -1492,7 +1654,6 @@ out: return (error); } -EXPORT_SYMBOL(zfs_domount); /* * Called when an unmount is requested and certain sanity checks have @@ -1504,10 +1665,10 @@ EXPORT_SYMBOL(zfs_domount); void zfs_preumount(struct super_block *sb) { - zfs_sb_t *zsb = sb->s_fs_info; + zfsvfs_t *zfsvfs = sb->s_fs_info; - /* zsb is NULL when zfs_domount fails during mount */ - if (zsb) { + /* zfsvfs is NULL when zfs_domount fails during mount */ + if (zfsvfs) { zfsctl_destroy(sb->s_fs_info); /* * Wait for iput_async before entering evict_inodes in @@ -1525,12 +1686,11 @@ zfs_preumount(struct super_block *sb) * empty. */ taskq_wait_outstanding(dsl_pool_iput_taskq( - dmu_objset_pool(zsb->z_os)), 0); + dmu_objset_pool(zfsvfs->z_os)), 0); taskq_wait_outstanding(dsl_pool_iput_taskq( - dmu_objset_pool(zsb->z_os)), 0); + dmu_objset_pool(zfsvfs->z_os)), 0); } } -EXPORT_SYMBOL(zfs_preumount); /* * Called once all other unmount released tear down has occurred. @@ -1540,17 +1700,17 @@ EXPORT_SYMBOL(zfs_preumount); int zfs_umount(struct super_block *sb) { - zfs_sb_t *zsb = sb->s_fs_info; + zfsvfs_t *zfsvfs = sb->s_fs_info; objset_t *os; - arc_remove_prune_callback(zsb->z_arc_prune); - VERIFY(zfs_sb_teardown(zsb, B_TRUE) == 0); - os = zsb->z_os; + arc_remove_prune_callback(zfsvfs->z_arc_prune); + VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0); + os = zfsvfs->z_os; bdi_destroy(sb->s_bdi); /* * z_os will be NULL if there was an error in - * attempting to reopen zsb. + * attempting to reopen zfsvfs. */ if (os != NULL) { /* @@ -1563,31 +1723,38 @@ zfs_umount(struct super_block *sb) /* * Finally release the objset */ - dmu_objset_disown(os, zsb); + dmu_objset_disown(os, zfsvfs); } - zfs_sb_free(zsb); + zfsvfs_free(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_umount); int -zfs_remount(struct super_block *sb, int *flags, zfs_mntopts_t *zmo) +zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm) { - zfs_sb_t *zsb = sb->s_fs_info; + zfsvfs_t *zfsvfs = sb->s_fs_info; + vfs_t *vfsp; int error; - zfs_unregister_callbacks(zsb); - error = zfs_register_callbacks(zsb); + error = zfsvfs_parse_options(zm->mnt_data, &vfsp); + if (error) + return (error); + + zfs_unregister_callbacks(zfsvfs); + zfsvfs_vfs_free(zfsvfs->z_vfs); + + vfsp->vfs_data = zfsvfs; + zfsvfs->z_vfs = vfsp; + (void) zfs_register_callbacks(vfsp); return (error); } -EXPORT_SYMBOL(zfs_remount); int zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) { - zfs_sb_t *zsb = sb->s_fs_info; + zfsvfs_t *zfsvfs = sb->s_fs_info; znode_t *zp; uint64_t object = 0; uint64_t fid_gen = 0; @@ -1638,11 +1805,11 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) return (zfsctl_snapdir_vget(sb, objsetid, fid_gen, ipp)); } - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); /* A zero fid_gen means we are in the .zfs control directories */ if (fid_gen == 0 && (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) { - *ipp = zsb->z_ctldir; + *ipp = zfsvfs->z_ctldir; ASSERT(*ipp != NULL); if (object == ZFSCTL_INO_SNAPDIR) { VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, @@ -1650,37 +1817,37 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) } else { igrab(*ipp); } - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } gen_mask = -1ULL >> (64 - 8 * i); dprintf("getting %llu [%llu mask %llx]\n", object, fid_gen, gen_mask); - if ((err = zfs_zget(zsb, object, &zp))) { - ZFS_EXIT(zsb); + if ((err = zfs_zget(zfsvfs, object, &zp))) { + ZFS_EXIT(zfsvfs); return (err); } /* Don't export xattr stuff */ if (zp->z_pflags & ZFS_XATTR) { iput(ZTOI(zp)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(ENOENT)); } - (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb), &zp_gen, + (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen, sizeof (uint64_t)); zp_gen = zp_gen & gen_mask; if (zp_gen == 0) zp_gen = 1; - if ((fid_gen == 0) && (zsb->z_root == object)) + if ((fid_gen == 0) && (zfsvfs->z_root == object)) fid_gen = zp_gen; if (zp->z_unlinked || zp_gen != fid_gen) { dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen, fid_gen); iput(ZTOI(zp)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(ENOENT)); } @@ -1688,13 +1855,12 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) if (*ipp) zfs_inode_update(ITOZ(*ipp)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_vget); /* - * Block out VFS ops and close zfs_sb_t + * Block out VFS ops and close zfsvfs_t * * Note, if successful, then we return with the 'z_teardown_lock' and * 'z_teardown_inactive_lock' write held. We leave ownership of the underlying @@ -1702,66 +1868,65 @@ EXPORT_SYMBOL(zfs_vget); * a subsequent rollback or recv operation and the resume thereafter. */ int -zfs_suspend_fs(zfs_sb_t *zsb) +zfs_suspend_fs(zfsvfs_t *zfsvfs) { int error; - if ((error = zfs_sb_teardown(zsb, B_FALSE)) != 0) + if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0) return (error); return (0); } -EXPORT_SYMBOL(zfs_suspend_fs); /* - * Reopen zfs_sb_t and release VFS ops. + * Reopen zfsvfs_t and release VFS ops. */ int -zfs_resume_fs(zfs_sb_t *zsb, dsl_dataset_t *ds) +zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds) { int err, err2; znode_t *zp; uint64_t sa_obj = 0; - ASSERT(RRM_WRITE_HELD(&zsb->z_teardown_lock)); - ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock)); + ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock)); + ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)); /* * We already own this, so just update the objset_t, as the one we * had before may have been evicted. */ - VERIFY3P(ds->ds_owner, ==, zsb); + VERIFY3P(ds->ds_owner, ==, zfsvfs); VERIFY(dsl_dataset_long_held(ds)); - VERIFY0(dmu_objset_from_ds(ds, &zsb->z_os)); + VERIFY0(dmu_objset_from_ds(ds, &zfsvfs->z_os)); /* * Make sure version hasn't changed */ - err = zfs_get_zplprop(zsb->z_os, ZFS_PROP_VERSION, - &zsb->z_version); + err = zfs_get_zplprop(zfsvfs->z_os, ZFS_PROP_VERSION, + &zfsvfs->z_version); if (err) goto bail; - err = zap_lookup(zsb->z_os, MASTER_NODE_OBJ, + err = zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); - if (err && zsb->z_version >= ZPL_VERSION_SA) + if (err && zfsvfs->z_version >= ZPL_VERSION_SA) goto bail; - if ((err = sa_setup(zsb->z_os, sa_obj, - zfs_attr_table, ZPL_END, &zsb->z_attr_table)) != 0) + if ((err = sa_setup(zfsvfs->z_os, sa_obj, + zfs_attr_table, ZPL_END, &zfsvfs->z_attr_table)) != 0) goto bail; - if (zsb->z_version >= ZPL_VERSION_SA) - sa_register_update_callback(zsb->z_os, + if (zfsvfs->z_version >= ZPL_VERSION_SA) + sa_register_update_callback(zfsvfs->z_os, zfs_sa_upgrade); - VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0); + VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0); - zfs_set_fuid_feature(zsb); - zsb->z_rollback_time = jiffies; + zfs_set_fuid_feature(zfsvfs); + zfsvfs->z_rollback_time = jiffies; /* * Attempt to re-establish all the active inodes with their @@ -1772,54 +1937,53 @@ zfs_resume_fs(zfs_sb_t *zsb, dsl_dataset_t *ds) * VFS prunes the dentry holding the remaining references * on the stale inode. */ - mutex_enter(&zsb->z_znodes_lock); - for (zp = list_head(&zsb->z_all_znodes); zp; - zp = list_next(&zsb->z_all_znodes, zp)) { + mutex_enter(&zfsvfs->z_znodes_lock); + for (zp = list_head(&zfsvfs->z_all_znodes); zp; + zp = list_next(&zfsvfs->z_all_znodes, zp)) { err2 = zfs_rezget(zp); if (err2) { remove_inode_hash(ZTOI(zp)); zp->z_is_stale = B_TRUE; } } - mutex_exit(&zsb->z_znodes_lock); + mutex_exit(&zfsvfs->z_znodes_lock); bail: /* release the VFS ops */ - rw_exit(&zsb->z_teardown_inactive_lock); - rrm_exit(&zsb->z_teardown_lock, FTAG); + rw_exit(&zfsvfs->z_teardown_inactive_lock); + rrm_exit(&zfsvfs->z_teardown_lock, FTAG); if (err) { /* * Since we couldn't setup the sa framework, try to force * unmount this file system. */ - if (zsb->z_os) - (void) zfs_umount(zsb->z_sb); + if (zfsvfs->z_os) + (void) zfs_umount(zfsvfs->z_sb); } return (err); } -EXPORT_SYMBOL(zfs_resume_fs); int -zfs_set_version(zfs_sb_t *zsb, uint64_t newvers) +zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers) { int error; - objset_t *os = zsb->z_os; + objset_t *os = zfsvfs->z_os; dmu_tx_t *tx; if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION) return (SET_ERROR(EINVAL)); - if (newvers < zsb->z_version) + if (newvers < zfsvfs->z_version) return (SET_ERROR(EINVAL)); if (zfs_spa_version_map(newvers) > - spa_version(dmu_objset_spa(zsb->z_os))) + spa_version(dmu_objset_spa(zfsvfs->z_os))) return (SET_ERROR(ENOTSUP)); tx = dmu_tx_create(os); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR); - if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) { + if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) { dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE, ZFS_SA_ATTRS); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); @@ -1838,10 +2002,10 @@ zfs_set_version(zfs_sb_t *zsb, uint64_t newvers) return (error); } - if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) { + if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) { uint64_t sa_obj; - ASSERT3U(spa_version(dmu_objset_spa(zsb->z_os)), >=, + ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=, SPA_VERSION_SA); sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE, DMU_OT_NONE, 0, tx); @@ -1855,17 +2019,16 @@ zfs_set_version(zfs_sb_t *zsb, uint64_t newvers) } spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx, - "from %llu to %llu", zsb->z_version, newvers); + "from %llu to %llu", zfsvfs->z_version, newvers); dmu_tx_commit(tx); - zsb->z_version = newvers; + zfsvfs->z_version = newvers; - zfs_set_fuid_feature(zsb); + zfs_set_fuid_feature(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_set_version); /* * Read a property stored within the master node. @@ -1911,7 +2074,6 @@ zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value) } return (error); } -EXPORT_SYMBOL(zfs_get_zplprop); /* * Return true if the coresponding vfs's unmounted flag is set. @@ -1921,7 +2083,7 @@ EXPORT_SYMBOL(zfs_get_zplprop); boolean_t zfs_get_vfs_flag_unmounted(objset_t *os) { - zfs_sb_t *zfvp; + zfsvfs_t *zfvp; boolean_t unmounted = B_FALSE; ASSERT(dmu_objset_type(os) == DMU_OST_ZFS); @@ -1956,3 +2118,25 @@ zfs_fini(void) zfs_znode_fini(); zfsctl_fini(); } + +#if defined(_KERNEL) && defined(HAVE_SPL) +EXPORT_SYMBOL(zfs_suspend_fs); +EXPORT_SYMBOL(zfs_resume_fs); +EXPORT_SYMBOL(zfs_userspace_one); +EXPORT_SYMBOL(zfs_userspace_many); +EXPORT_SYMBOL(zfs_set_userquota); +EXPORT_SYMBOL(zfs_owner_overquota); +EXPORT_SYMBOL(zfs_fuid_overquota); +EXPORT_SYMBOL(zfs_fuid_overobjquota); +EXPORT_SYMBOL(zfs_set_version); +EXPORT_SYMBOL(zfsvfs_create); +EXPORT_SYMBOL(zfsvfs_free); +EXPORT_SYMBOL(zfs_is_readonly); +EXPORT_SYMBOL(zfs_domount); +EXPORT_SYMBOL(zfs_preumount); +EXPORT_SYMBOL(zfs_umount); +EXPORT_SYMBOL(zfs_remount); +EXPORT_SYMBOL(zfs_statvfs); +EXPORT_SYMBOL(zfs_vget); +EXPORT_SYMBOL(zfs_prune); +#endif diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 502e8f4a2..4afae6c36 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -91,8 +91,8 @@ * to freed memory. The example below illustrates the following Big Rules: * * (1) A check must be made in each zfs thread for a mounted file system. - * This is done avoiding races using ZFS_ENTER(zsb). - * A ZFS_EXIT(zsb) is needed before all returns. Any znodes + * This is done avoiding races using ZFS_ENTER(zfsvfs). + * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros * can return EIO from the calling function. * @@ -127,7 +127,7 @@ * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() * forever, because the previous txg can't quiesce until B's tx commits. * - * If dmu_tx_assign() returns ERESTART and zsb->z_assign is TXG_NOWAIT, + * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, * then drop all locks, call dmu_tx_wait(), and try again. On subsequent * calls to dmu_tx_assign(), pass TXG_WAITED rather than TXG_NOWAIT, * to indicate that this operation has already called dmu_tx_wait(). @@ -148,7 +148,7 @@ * * In general, this is how things should be ordered in each vnode op: * - * ZFS_ENTER(zsb); // exit if unmounted + * ZFS_ENTER(zfsvfs); // exit if unmounted * top: * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab()) * rw_enter(...); // grab any other locks you need @@ -166,7 +166,7 @@ * goto top; * } * dmu_tx_abort(tx); // abort DMU tx - * ZFS_EXIT(zsb); // finished in zfs + * ZFS_EXIT(zfsvfs); // finished in zfs * return (error); // really out of space * } * error = do_real_work(); // do whatever this VOP does @@ -177,7 +177,7 @@ * zfs_dirent_unlock(dl); // unlock directory entry * iput(...); // release held vnodes * zil_commit(zilog, foid); // synchronous when necessary - * ZFS_EXIT(zsb); // finished in zfs + * ZFS_EXIT(zfsvfs); // finished in zfs * return (error); // done, report error */ @@ -198,23 +198,23 @@ int zfs_open(struct inode *ip, int mode, int flag, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); /* Honor ZFS_APPENDONLY file attribute */ if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) && ((flag & O_APPEND) == 0)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EPERM)); } /* Virus scan eligible files on open */ - if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) && + if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) && !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) { if (zfs_vscan(ip, cr, 0) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EACCES)); } } @@ -223,33 +223,31 @@ zfs_open(struct inode *ip, int mode, int flag, cred_t *cr) if (flag & O_SYNC) atomic_inc_32(&zp->z_sync_cnt); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_open); /* ARGSUSED */ int zfs_close(struct inode *ip, int flag, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); /* Decrement the synchronous opens in the znode */ if (flag & O_SYNC) atomic_dec_32(&zp->z_sync_cnt); - if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) && + if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) && !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) VERIFY(zfs_vscan(ip, cr, 1) == 0); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_close); #if defined(SEEK_HOLE) && defined(SEEK_DATA) /* @@ -302,18 +300,17 @@ int zfs_holey(struct inode *ip, int cmd, loff_t *off) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); int error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); error = zfs_holey_common(ip, cmd, off); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_holey); #endif /* SEEK_HOLE && SEEK_DATA */ #if defined(_KERNEL) @@ -443,7 +440,7 @@ int zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); ssize_t n, nbytes; int error = 0; rl_t *rl; @@ -451,11 +448,11 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) xuio_t *xuio = NULL; #endif /* HAVE_UIO_ZEROCOPY */ - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); if (zp->z_pflags & ZFS_AV_QUARANTINED) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EACCES)); } @@ -463,7 +460,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) * Validate file offset */ if (uio->uio_loffset < (offset_t)0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } @@ -471,15 +468,15 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) * Fasttrack empty reads */ if (uio->uio_resid == 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } /* * If we're in FRSYNC mode, sync out this znode before reading it. */ - if (ioflag & FRSYNC || zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) - zil_commit(zsb->z_log, zp->z_id); + if (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zfsvfs->z_log, zp->z_id); /* * Lock the range against changes. @@ -553,10 +550,9 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) out: zfs_range_unlock(rl); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_read); /* * Write the bytes to a file. @@ -587,12 +583,12 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) ssize_t tx_bytes; uint64_t end_size; dmu_tx_t *tx; - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); zilog_t *zilog; offset_t woff; ssize_t n, nbytes; rl_t *rl; - int max_blksz = zsb->z_max_blksz; + int max_blksz = zfsvfs->z_max_blksz; int error = 0; arc_buf_t *abuf; const iovec_t *aiov = NULL; @@ -618,21 +614,22 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T) limit = MAXOFFSET_T; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, + &zp->z_size, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, 8); /* * Callers might not be able to detect properly that we are read-only, * so check it explicitly here. */ - if (zfs_is_readonly(zsb)) { - ZFS_EXIT(zsb); + if (zfs_is_readonly(zfsvfs)) { + ZFS_EXIT(zfsvfs); return (SET_ERROR(EROFS)); } @@ -642,18 +639,18 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) && (uio->uio_loffset < zp->z_size))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EPERM)); } - zilog = zsb->z_log; + zilog = zfsvfs->z_log; /* * Validate file offset */ woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset; if (woff < 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } @@ -700,7 +697,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) if (woff >= limit) { zfs_range_unlock(rl); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EFBIG)); } @@ -720,8 +717,8 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) while (n > 0) { abuf = NULL; woff = uio->uio_loffset; - if (zfs_owner_overquota(zsb, zp, B_FALSE) || - zfs_owner_overquota(zsb, zp, B_TRUE)) { + if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) || + zfs_owner_overquota(zfsvfs, zp, B_TRUE)) { if (abuf != NULL) dmu_return_arcbuf(abuf); error = SET_ERROR(EDQUOT); @@ -768,7 +765,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) /* * Start a transaction. */ - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz)); zfs_sa_upgrade_txholds(tx, zp); @@ -828,7 +825,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) if (tx_bytes < max_blksz && (!write_eof || aiov->iov_base != abuf->b_data)) { ASSERT(xuio); - dmu_write(zsb->z_os, zp->z_id, woff, + dmu_write(zfsvfs->z_os, zp->z_id, woff, // cppcheck-suppress nullPointer aiov->iov_len, aiov->iov_base, tx); dmu_return_arcbuf(abuf); @@ -841,16 +838,17 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) ASSERT(tx_bytes <= uio->uio_resid); uioskip(uio, tx_bytes); } - - if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) - update_pages(ip, woff, tx_bytes, zsb->z_os, zp->z_id); + if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) { + update_pages(ip, woff, + tx_bytes, zfsvfs->z_os, zp->z_id); + } /* * If we made no progress, we're done. If we made even * partial progress, update the znode and ZIL accordingly. */ if (tx_bytes == 0) { - (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb), + (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), (void *)&zp->z_size, sizeof (uint64_t), tx); dmu_tx_commit(tx); ASSERT(error != 0); @@ -878,7 +876,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) uint64_t newmode; zp->z_mode &= ~(S_ISUID | S_ISGID); ip->i_mode = newmode = zp->z_mode; - (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zsb), + (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), (void *)&newmode, sizeof (uint64_t), tx); } mutex_exit(&zp->z_acl_lock); @@ -899,8 +897,8 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) * the file size to the specified eof. Note, there's no * concurrency during replay. */ - if (zsb->z_replay && zsb->z_replay_eof != 0) - zp->z_size = zsb->z_replay_eof; + if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0) + zp->z_size = zfsvfs->z_replay_eof; error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); @@ -924,19 +922,18 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) * If we're in replay mode, or we made no progress, return error. * Otherwise, it's at least a partial write, so it's successful. */ - if (zsb->z_replay || uio->uio_resid == start_resid) { - ZFS_EXIT(zsb); + if (zfsvfs->z_replay || uio->uio_resid == start_resid) { + ZFS_EXIT(zfsvfs); return (error); } if (ioflag & (FSYNC | FDSYNC) || - zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, zp->z_id); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_write); /* * Drop a reference on the passed inode asynchronously. This ensures @@ -991,8 +988,8 @@ static int zil_fault_io = 0; int zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) { - zfs_sb_t *zsb = arg; - objset_t *os = zsb->z_os; + zfsvfs_t *zfsvfs = arg; + objset_t *os = zfsvfs->z_os; znode_t *zp; uint64_t object = lr->lr_foid; uint64_t offset = lr->lr_offset; @@ -1008,7 +1005,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) /* * Nothing to do if the file has been removed */ - if (zfs_zget(zsb, object, &zp) != 0) + if (zfs_zget(zfsvfs, object, &zp) != 0) return (SET_ERROR(ENOENT)); if (zp->z_unlinked) { /* @@ -1020,7 +1017,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) } zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP); - zgd->zgd_zilog = zsb->z_log; + zgd->zgd_zilog = zfsvfs->z_log; zgd->zgd_private = zp; /* @@ -1116,10 +1113,10 @@ int zfs_access(struct inode *ip, int mode, int flag, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); int error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); if (flag & V_ACE_MASK) @@ -1127,10 +1124,9 @@ zfs_access(struct inode *ip, int mode, int flag, cred_t *cr) else error = zfs_zaccess_rwx(zp, mode, flag, cr); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_access); /* * Lookup an entry in a directory, or an extended attribute directory. @@ -1156,7 +1152,7 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags, cred_t *cr, int *direntflags, pathname_t *realpnp) { znode_t *zdp = ITOZ(dip); - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); int error = 0; /* @@ -1208,7 +1204,7 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags, } } - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zdp); *ipp = NULL; @@ -1219,12 +1215,12 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags, * Maybe someday we will. */ if (zdp->z_pflags & ZFS_XATTR) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -1238,12 +1234,12 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags, *ipp = NULL; } - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } if (!S_ISDIR(dip->i_mode)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(ENOTDIR)); } @@ -1252,13 +1248,13 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags, */ if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } - if (zsb->z_utf8 && u8_validate(nm, strlen(nm), + if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EILSEQ)); } @@ -1266,10 +1262,9 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags, if ((error == 0) && (*ipp)) zfs_inode_update(ITOZ(*ipp)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_lookup); /* * Attempt to create a new entry in a directory. If the entry @@ -1300,7 +1295,7 @@ zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl, int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp) { znode_t *zp, *dzp = ITOZ(dip); - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); zilog_t *zilog; objset_t *os; zfs_dirlock_t *dl; @@ -1321,28 +1316,28 @@ zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl, gid = crgetgid(cr); uid = crgetuid(cr); - if (zsb->z_use_fuids == B_FALSE && + if (zfsvfs->z_use_fuids == B_FALSE && (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) return (SET_ERROR(EINVAL)); if (name == NULL) return (SET_ERROR(EINVAL)); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(dzp); - os = zsb->z_os; - zilog = zsb->z_log; + os = zfsvfs->z_os; + zilog = zfsvfs->z_log; - if (zsb->z_utf8 && u8_validate(name, strlen(name), + if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EILSEQ)); } if (vap->va_mask & ATTR_XVATTR) { if ((error = secpolicy_xvattr((xvattr_t *)vap, crgetuid(cr), cr, vap->va_mode)) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } } @@ -1371,7 +1366,7 @@ top: zfs_acl_ids_free(&acl_ids); if (strcmp(name, "..") == 0) error = SET_ERROR(EISDIR); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } } @@ -1406,7 +1401,7 @@ top: goto out; have_acl = B_TRUE; - if (zfs_acl_ids_overquota(zsb, &acl_ids)) { + if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); error = SET_ERROR(EDQUOT); goto out; @@ -1417,12 +1412,12 @@ top: dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + ZFS_SA_BASE_ATTR_SIZE); - fuid_dirtied = zsb->z_fuid_dirty; + fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) - zfs_fuid_txhold(zsb, tx); + zfs_fuid_txhold(zfsvfs, tx); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); - if (!zsb->z_use_sa && + if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, acl_ids.z_aclp->z_acl_bytes); @@ -1438,13 +1433,13 @@ top: } zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); if (fuid_dirtied) - zfs_fuid_sync(zsb, tx); + zfs_fuid_sync(zfsvfs, tx); (void) zfs_link_create(dl, zp, tx, ZNEW); txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap); @@ -1516,13 +1511,12 @@ out: *ipp = ZTOI(zp); } - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_create); /* ARGSUSED */ int @@ -1530,7 +1524,7 @@ zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl, int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp) { znode_t *zp = NULL, *dzp = ITOZ(dip); - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); objset_t *os; dmu_tx_t *tx; int error; @@ -1549,18 +1543,18 @@ zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl, gid = crgetgid(cr); uid = crgetuid(cr); - if (zsb->z_use_fuids == B_FALSE && + if (zfsvfs->z_use_fuids == B_FALSE && (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) return (SET_ERROR(EINVAL)); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(dzp); - os = zsb->z_os; + os = zfsvfs->z_os; if (vap->va_mask & ATTR_XVATTR) { if ((error = secpolicy_xvattr((xvattr_t *)vap, crgetuid(cr), cr, vap->va_mode)) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } } @@ -1583,7 +1577,7 @@ top: goto out; have_acl = B_TRUE; - if (zfs_acl_ids_overquota(zsb, &acl_ids)) { + if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); error = SET_ERROR(EDQUOT); goto out; @@ -1593,12 +1587,12 @@ top: dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + ZFS_SA_BASE_ATTR_SIZE); - dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); - fuid_dirtied = zsb->z_fuid_dirty; + fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) - zfs_fuid_txhold(zsb, tx); - if (!zsb->z_use_sa && + zfs_fuid_txhold(zfsvfs, tx); + if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, acl_ids.z_aclp->z_acl_bytes); @@ -1613,13 +1607,13 @@ top: } zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids); if (fuid_dirtied) - zfs_fuid_sync(zsb, tx); + zfs_fuid_sync(zfsvfs, tx); /* Add to unlinked set */ zp->z_unlinked = 1; @@ -1637,7 +1631,7 @@ out: *ipp = ZTOI(zp); } - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -1665,7 +1659,7 @@ zfs_remove(struct inode *dip, char *name, cred_t *cr, int flags) znode_t *zp, *dzp = ITOZ(dip); znode_t *xzp; struct inode *ip; - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); zilog_t *zilog; uint64_t acl_obj, xattr_obj; uint64_t xattr_obj_unlinked = 0; @@ -1685,9 +1679,9 @@ zfs_remove(struct inode *dip, char *name, cred_t *cr, int flags) if (name == NULL) return (SET_ERROR(EINVAL)); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(dzp); - zilog = zsb->z_log; + zilog = zfsvfs->z_log; if (flags & FIGNORECASE) { zflg |= ZCILOOK; @@ -1705,7 +1699,7 @@ top: NULL, realnmp))) { if (realnmp) pn_free(realnmp); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -1741,7 +1735,7 @@ top: * allow for either case. */ obj = zp->z_id; - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); zfs_sa_upgrade_txholds(tx, zp); @@ -1754,10 +1748,10 @@ top: } /* are there any extended attributes? */ - error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), + error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xattr_obj, sizeof (xattr_obj)); if (error == 0 && xattr_obj) { - error = zfs_zget(zsb, xattr_obj, &xzp); + error = zfs_zget(zfsvfs, xattr_obj, &xzp); ASSERT0(error); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); @@ -1769,7 +1763,7 @@ top: mutex_exit(&zp->z_lock); /* charge as an update -- would be nice not to charge at all */ - dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); /* * Mark this transaction as typically resulting in a net free of space @@ -1794,7 +1788,7 @@ top: iput(ip); if (xzp) iput(ZTOI(xzp)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -1815,7 +1809,7 @@ top: * zfs_sa_upgrade(). */ mutex_enter(&zp->z_lock); - (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), + (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xattr_obj_unlinked, sizeof (xattr_obj_unlinked)); delete_now = may_delete_now && !toobig && atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped) && @@ -1830,7 +1824,7 @@ top: xzp->z_unlinked = 1; clear_nlink(ZTOI(xzp)); links = 0; - error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zsb), + error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), &links, sizeof (links), tx); ASSERT3U(error, ==, 0); mutex_exit(&xzp->z_lock); @@ -1838,10 +1832,10 @@ top: if (zp->z_is_sa) error = sa_remove(zp->z_sa_hdl, - SA_ZPL_XATTR(zsb), tx); + SA_ZPL_XATTR(zfsvfs), tx); else error = sa_update(zp->z_sa_hdl, - SA_ZPL_XATTR(zsb), &null_xattr, + SA_ZPL_XATTR(zfsvfs), &null_xattr, sizeof (uint64_t), tx); ASSERT0(error); } @@ -1880,13 +1874,12 @@ out: zfs_iput_async(ZTOI(xzp)); } - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_remove); /* * Create a new directory and insert it into dip using the name @@ -1913,7 +1906,7 @@ zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp, cred_t *cr, int flags, vsecattr_t *vsecp) { znode_t *zp, *dzp = ITOZ(dip); - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); zilog_t *zilog; zfs_dirlock_t *dl; uint64_t txtype; @@ -1934,25 +1927,25 @@ zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp, */ uid = crgetuid(cr); - if (zsb->z_use_fuids == B_FALSE && + if (zfsvfs->z_use_fuids == B_FALSE && (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) return (SET_ERROR(EINVAL)); if (dirname == NULL) return (SET_ERROR(EINVAL)); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(dzp); - zilog = zsb->z_log; + zilog = zfsvfs->z_log; if (dzp->z_pflags & ZFS_XATTR) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } - if (zsb->z_utf8 && u8_validate(dirname, + if (zfsvfs->z_utf8 && u8_validate(dirname, strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EILSEQ)); } if (flags & FIGNORECASE) @@ -1961,14 +1954,14 @@ zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp, if (vap->va_mask & ATTR_XVATTR) { if ((error = secpolicy_xvattr((xvattr_t *)vap, crgetuid(cr), cr, vap->va_mode)) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } } if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, vsecp, &acl_ids)) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } /* @@ -1984,34 +1977,34 @@ top: if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, NULL, NULL))) { zfs_acl_ids_free(&acl_ids); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) { zfs_acl_ids_free(&acl_ids); zfs_dirent_unlock(dl); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } - if (zfs_acl_ids_overquota(zsb, &acl_ids)) { + if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); zfs_dirent_unlock(dl); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EDQUOT)); } /* * Add a new entry to the directory. */ - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); - fuid_dirtied = zsb->z_fuid_dirty; + fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) - zfs_fuid_txhold(zsb, tx); - if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { + zfs_fuid_txhold(zfsvfs, tx); + if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, acl_ids.z_aclp->z_acl_bytes); } @@ -2030,7 +2023,7 @@ top: } zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -2040,7 +2033,7 @@ top: zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); if (fuid_dirtied) - zfs_fuid_sync(zsb, tx); + zfs_fuid_sync(zfsvfs, tx); /* * Now put new name in parent dir. @@ -2061,15 +2054,14 @@ top: zfs_dirent_unlock(dl); - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); zfs_inode_update(dzp); zfs_inode_update(zp); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_mkdir); /* * Remove a directory subdir entry. If the current working @@ -2095,7 +2087,7 @@ zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr, znode_t *dzp = ITOZ(dip); znode_t *zp; struct inode *ip; - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); zilog_t *zilog; zfs_dirlock_t *dl; dmu_tx_t *tx; @@ -2106,9 +2098,9 @@ zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr, if (name == NULL) return (SET_ERROR(EINVAL)); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(dzp); - zilog = zsb->z_log; + zilog = zfsvfs->z_log; if (flags & FIGNORECASE) zflg |= ZCILOOK; @@ -2120,7 +2112,7 @@ top: */ if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -2152,10 +2144,10 @@ top: */ rw_enter(&zp->z_parent_lock, RW_WRITER); - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); - dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, dzp); dmu_tx_mark_netfree(tx); @@ -2173,7 +2165,7 @@ top: } dmu_tx_abort(tx); iput(ip); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -2197,13 +2189,12 @@ out: zfs_inode_update(zp); iput(ip); - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_rmdir); /* * Read as many directory entries as will fit into the provided @@ -2230,7 +2221,7 @@ int zfs_readdir(struct inode *ip, struct dir_context *ctx, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); objset_t *os; zap_cursor_t zc; zap_attribute_t zap; @@ -2241,10 +2232,10 @@ zfs_readdir(struct inode *ip, struct dir_context *ctx, cred_t *cr) uint64_t parent; uint64_t offset; /* must be unsigned; checks for < 1 */ - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); - if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zsb), + if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0) goto out; @@ -2255,7 +2246,7 @@ zfs_readdir(struct inode *ip, struct dir_context *ctx, cred_t *cr) goto out; error = 0; - os = zsb->z_os; + os = zfsvfs->z_os; offset = ctx->pos; prefetch = zp->z_zn_prefetch; @@ -2361,11 +2352,10 @@ update: if (error == ENOENT) error = 0; out: - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_readdir); ulong_t zfs_fsync_sync_cnt = 4; @@ -2373,21 +2363,20 @@ int zfs_fsync(struct inode *ip, int syncflag, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt); - if (zsb->z_os->os_sync != ZFS_SYNC_DISABLED) { - ZFS_ENTER(zsb); + if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) { + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); - zil_commit(zsb->z_log, zp->z_id); - ZFS_EXIT(zsb); + zil_commit(zfsvfs->z_log, zp->z_id); + ZFS_EXIT(zfsvfs); } tsd_set(zfs_fsyncer_key, NULL); return (0); } -EXPORT_SYMBOL(zfs_fsync); /* @@ -2409,7 +2398,7 @@ int zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); int error = 0; uint64_t links; uint64_t atime[2], mtime[2], ctime[2]; @@ -2419,17 +2408,17 @@ zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) sa_bulk_attr_t bulk[3]; int count = 0; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, &atime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -2442,7 +2431,7 @@ zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) (vap->va_uid != crgetuid(cr))) { if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0, skipaclchk, cr))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } } @@ -2457,7 +2446,7 @@ zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) vap->va_mode = zp->z_mode; vap->va_fsid = ZTOI(zp)->i_sb->s_dev; vap->va_nodeid = zp->z_id; - if ((zp->z_id == zsb->z_root) && zfs_show_ctldir(zp)) + if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp)) links = ZTOI(zp)->i_nlink + 1; else links = ZTOI(zp)->i_nlink; @@ -2470,7 +2459,7 @@ zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) * Add in any requested optional attributes and the create time. * Also set the corresponding bits in the returned attribute bitmap. */ - if ((xoap = xva_getxoptattr(xvap)) != NULL && zsb->z_use_fuids) { + if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) { if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { xoap->xoa_archive = ((zp->z_pflags & ZFS_ARCHIVE) != 0); @@ -2545,7 +2534,7 @@ zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { uint64_t times[2]; - (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zsb), + (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), times, sizeof (times)); ZFS_TIME_DECODE(&xoap->xoa_createtime, times); XVA_SET_RTN(xvap, XAT_CREATETIME); @@ -2585,13 +2574,12 @@ zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) /* * Block size hasn't been set; suggest maximal I/O transfers. */ - vap->va_blksize = zsb->z_max_blksz; + vap->va_blksize = zfsvfs->z_max_blksz; } - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_getattr); /* * Get the basic file attributes and place them in the provided kstat @@ -2610,11 +2598,11 @@ int zfs_getattr_fast(struct inode *ip, struct kstat *sp) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); uint32_t blksize; u_longlong_t nblocks; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); mutex_enter(&zp->z_lock); @@ -2629,7 +2617,7 @@ zfs_getattr_fast(struct inode *ip, struct kstat *sp) /* * Block size hasn't been set; suggest maximal I/O transfers. */ - sp->blksize = zsb->z_max_blksz; + sp->blksize = zfsvfs->z_max_blksz; } mutex_exit(&zp->z_lock); @@ -2638,17 +2626,16 @@ zfs_getattr_fast(struct inode *ip, struct kstat *sp) * Required to prevent NFS client from detecting different inode * numbers of snapshot root dentry before and after snapshot mount. */ - if (zsb->z_issnap) { + if (zfsvfs->z_issnap) { if (ip->i_sb->s_root->d_inode == ip) sp->ino = ZFSCTL_INO_SNAPDIRS - - dmu_objset_id(zsb->z_os); + dmu_objset_id(zfsvfs->z_os); } - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_getattr_fast); /* * Set the file attributes to the values contained in the @@ -2672,7 +2659,7 @@ int zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); zilog_t *zilog; dmu_tx_t *tx; vattr_t oldva; @@ -2699,31 +2686,31 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) if (mask == 0) return (0); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); - zilog = zsb->z_log; + zilog = zfsvfs->z_log; /* * Make sure that if we have ephemeral uid/gid or xvattr specified * that file system is at proper version level */ - if (zsb->z_use_fuids == B_FALSE && + if (zfsvfs->z_use_fuids == B_FALSE && (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) || ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) || (mask & ATTR_XVATTR))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EISDIR)); } if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } @@ -2775,7 +2762,7 @@ top: aclp = NULL; /* Can this be moved to before the top label? */ - if (zfs_is_readonly(zsb)) { + if (zfs_is_readonly(zfsvfs)) { err = EROFS; goto out3; } @@ -2832,7 +2819,7 @@ top: take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr)); take_group = (mask & ATTR_GID) && - zfs_groupmember(zsb, vap->va_gid, cr); + zfs_groupmember(zfsvfs, vap->va_gid, cr); /* * If both ATTR_UID and ATTR_GID are set then take_owner and @@ -2992,7 +2979,7 @@ top: mask = vap->va_mask; if ((mask & (ATTR_UID | ATTR_GID))) { - err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), + err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xattr_obj, sizeof (xattr_obj)); if (err == 0 && xattr_obj) { @@ -3001,10 +2988,10 @@ top: goto out2; } if (mask & ATTR_UID) { - new_kuid = zfs_fuid_create(zsb, + new_kuid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp); if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) && - zfs_fuid_overquota(zsb, B_FALSE, new_kuid)) { + zfs_fuid_overquota(zfsvfs, B_FALSE, new_kuid)) { if (attrzp) iput(ZTOI(attrzp)); err = EDQUOT; @@ -3013,10 +3000,10 @@ top: } if (mask & ATTR_GID) { - new_kgid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid, - cr, ZFS_GROUP, &fuidp); + new_kgid = zfs_fuid_create(zfsvfs, + (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp); if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) && - zfs_fuid_overquota(zsb, B_TRUE, new_kgid)) { + zfs_fuid_overquota(zfsvfs, B_TRUE, new_kgid)) { if (attrzp) iput(ZTOI(attrzp)); err = EDQUOT; @@ -3024,7 +3011,7 @@ top: } } } - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); if (mask & ATTR_MODE) { uint64_t pmode = zp->z_mode; @@ -3039,7 +3026,7 @@ top: * Are we upgrading ACL from old V0 format * to V1 format? */ - if (zsb->z_version >= ZPL_VERSION_FUID && + if (zfsvfs->z_version >= ZPL_VERSION_FUID && zfs_znode_acl_version(zp) == ZFS_ACL_VERSION_INITIAL) { dmu_tx_hold_free(tx, acl_obj, 0, @@ -3068,9 +3055,9 @@ top: dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE); } - fuid_dirtied = zsb->z_fuid_dirty; + fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) - zfs_fuid_txhold(zsb, tx); + zfs_fuid_txhold(zfsvfs, tx); zfs_sa_upgrade_txholds(tx, zp); @@ -3092,7 +3079,7 @@ top: mutex_enter(&zp->z_acl_lock); mutex_enter(&zp->z_lock); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); if (attrzp) { @@ -3100,7 +3087,7 @@ top: mutex_enter(&attrzp->z_acl_lock); mutex_enter(&attrzp->z_lock); SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, - SA_ZPL_FLAGS(zsb), NULL, &attrzp->z_pflags, + SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags, sizeof (attrzp->z_pflags)); } @@ -3109,11 +3096,11 @@ top: if (mask & ATTR_UID) { ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid); new_uid = zfs_uid_read(ZTOI(zp)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &new_uid, sizeof (new_uid)); if (attrzp) { SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, - SA_ZPL_UID(zsb), NULL, &new_uid, + SA_ZPL_UID(zfsvfs), NULL, &new_uid, sizeof (new_uid)); ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid); } @@ -3122,17 +3109,17 @@ top: if (mask & ATTR_GID) { ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid); new_gid = zfs_gid_read(ZTOI(zp)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &new_gid, sizeof (new_gid)); if (attrzp) { SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, - SA_ZPL_GID(zsb), NULL, &new_gid, + SA_ZPL_GID(zfsvfs), NULL, &new_gid, sizeof (new_gid)); ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid); } } if (!(mask & ATTR_MODE)) { - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &new_mode, sizeof (new_mode)); new_mode = zp->z_mode; } @@ -3145,7 +3132,7 @@ top: } if (mask & ATTR_MODE) { - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &new_mode, sizeof (new_mode)); zp->z_mode = ZTOI(zp)->i_mode = new_mode; ASSERT3P(aclp, !=, NULL); @@ -3160,7 +3147,7 @@ top: if ((mask & ATTR_ATIME) || zp->z_atime_dirty) { zp->z_atime_dirty = 0; ZFS_TIME_ENCODE(&ip->i_atime, atime); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, sizeof (atime)); } @@ -3169,7 +3156,7 @@ top: ZTOI(zp)->i_mtime = timespec_trunc(vap->va_mtime, ZTOI(zp)->i_sb->s_time_gran); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, sizeof (mtime)); } @@ -3177,13 +3164,13 @@ top: ZFS_TIME_ENCODE(&vap->va_ctime, ctime); ZTOI(zp)->i_ctime = timespec_trunc(vap->va_ctime, ZTOI(zp)->i_sb->s_time_gran); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); } if (attrzp && mask) { SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, - SA_ZPL_CTIME(zsb), NULL, &ctime, + SA_ZPL_CTIME(zfsvfs), NULL, &ctime, sizeof (ctime)); } @@ -3225,7 +3212,7 @@ top: } if (fuid_dirtied) - zfs_fuid_sync(zsb, tx); + zfs_fuid_sync(zfsvfs, tx); if (mask != 0) zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp); @@ -3269,17 +3256,16 @@ out: } out2: - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); out3: kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * 7); kmem_free(bulk, sizeof (sa_bulk_attr_t) * 7); kmem_free(tmpxvattr, sizeof (xvattr_t)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (err); } -EXPORT_SYMBOL(zfs_setattr); typedef struct zfs_zlock { krwlock_t *zl_rwlock; /* lock we acquired */ @@ -3401,7 +3387,7 @@ zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm, { znode_t *tdzp, *szp, *tzp; znode_t *sdzp = ITOZ(sdip); - zfs_sb_t *zsb = ITOZSB(sdip); + zfsvfs_t *zfsvfs = ITOZSB(sdip); zilog_t *zilog; zfs_dirlock_t *sdl, *tdl; dmu_tx_t *tx; @@ -3414,9 +3400,9 @@ zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm, if (snm == NULL || tnm == NULL) return (SET_ERROR(EINVAL)); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(sdzp); - zilog = zsb->z_log; + zilog = zfsvfs->z_log; tdzp = ITOZ(tdip); ZFS_VERIFY_ZP(tdzp); @@ -3426,13 +3412,13 @@ zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm, * super blocks. */ if (tdip->i_sb != sdip->i_sb || zfsctl_is_node(tdip)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EXDEV)); } - if (zsb->z_utf8 && u8_validate(tnm, + if (zfsvfs->z_utf8 && u8_validate(tnm, strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EILSEQ)); } @@ -3450,7 +3436,7 @@ top: * See the comment in zfs_link() for why this is considered bad. */ if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } @@ -3469,10 +3455,10 @@ top: * First compare the two name arguments without * considering any case folding. */ - int nofold = (zsb->z_norm & ~U8_TEXTPREP_TOUPPER); + int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER); cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error); - ASSERT(error == 0 || !zsb->z_utf8); + ASSERT(error == 0 || !zfsvfs->z_utf8); if (cmp == 0) { /* * POSIX: "If the old argument and the new argument @@ -3480,7 +3466,7 @@ top: * the rename() function shall return successfully * and perform no other action." */ - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } /* @@ -3501,10 +3487,10 @@ top: * is an exact match, we will allow this to proceed as * a name-change request. */ - if ((zsb->z_case == ZFS_CASE_INSENSITIVE || - (zsb->z_case == ZFS_CASE_MIXED && + if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE || + (zfsvfs->z_case == ZFS_CASE_MIXED && flags & FIGNORECASE)) && - u8_strcmp(snm, tnm, 0, zsb->z_norm, U8_UNICODE_LATEST, + u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST, &error) == 0) { /* * case preserving rename request, require exact @@ -3552,7 +3538,7 @@ top: if (strcmp(snm, "..") == 0) serr = EINVAL; - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (serr); } if (terr) { @@ -3564,7 +3550,7 @@ top: if (strcmp(tnm, "..") == 0) terr = EINVAL; - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (terr); } @@ -3616,7 +3602,7 @@ top: } } - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm); @@ -3631,7 +3617,7 @@ top: } zfs_sa_upgrade_txholds(tx, szp); - dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT); if (error) { if (zl != NULL) @@ -3655,7 +3641,7 @@ top: iput(ZTOI(szp)); if (tzp) iput(ZTOI(tzp)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -3667,7 +3653,7 @@ top: if (error == 0) { szp->z_pflags |= ZFS_AV_MODIFIED; - error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zsb), + error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs), (void *)&szp->z_pflags, sizeof (uint64_t), tx); ASSERT0(error); @@ -3717,13 +3703,12 @@ out: iput(ZTOI(tzp)); } - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_rename); /* * Insert the indicated symbolic reference entry into the directory. @@ -3749,7 +3734,7 @@ zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link, znode_t *zp, *dzp = ITOZ(dip); zfs_dirlock_t *dl; dmu_tx_t *tx; - zfs_sb_t *zsb = ITOZSB(dip); + zfsvfs_t *zfsvfs = ITOZSB(dip); zilog_t *zilog; uint64_t len = strlen(link); int error; @@ -3764,26 +3749,26 @@ zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link, if (name == NULL) return (SET_ERROR(EINVAL)); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(dzp); - zilog = zsb->z_log; + zilog = zfsvfs->z_log; - if (zsb->z_utf8 && u8_validate(name, strlen(name), + if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EILSEQ)); } if (flags & FIGNORECASE) zflg |= ZCILOOK; if (len > MAXPATHLEN) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(ENAMETOOLONG)); } if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, NULL, &acl_ids)) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } top: @@ -3795,36 +3780,36 @@ top: error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL); if (error) { zfs_acl_ids_free(&acl_ids); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) { zfs_acl_ids_free(&acl_ids); zfs_dirent_unlock(dl); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } - if (zfs_acl_ids_overquota(zsb, &acl_ids)) { + if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); zfs_dirent_unlock(dl); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EDQUOT)); } - tx = dmu_tx_create(zsb->z_os); - fuid_dirtied = zsb->z_fuid_dirty; + tx = dmu_tx_create(zfsvfs->z_os); + fuid_dirtied = zfsvfs->z_fuid_dirty; dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len)); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + ZFS_SA_BASE_ATTR_SIZE + len); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); - if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { + if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, acl_ids.z_aclp->z_acl_bytes); } if (fuid_dirtied) - zfs_fuid_txhold(zsb, tx); + zfs_fuid_txhold(zfsvfs, tx); error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT); if (error) { zfs_dirent_unlock(dl); @@ -3836,7 +3821,7 @@ top: } zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -3847,18 +3832,18 @@ top: zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); if (fuid_dirtied) - zfs_fuid_sync(zsb, tx); + zfs_fuid_sync(zfsvfs, tx); mutex_enter(&zp->z_lock); if (zp->z_is_sa) - error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zsb), + error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs), link, len, tx); else zfs_sa_symlink(zp, link, len, tx); mutex_exit(&zp->z_lock); zp->z_size = len; - (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb), + (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), &zp->z_size, sizeof (zp->z_size), tx); /* * Insert the new object into the directory. @@ -3880,13 +3865,12 @@ top: *ipp = ZTOI(zp); - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_symlink); /* * Return, in the buffer contained in the provided uio structure, @@ -3907,24 +3891,23 @@ int zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); int error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); mutex_enter(&zp->z_lock); if (zp->z_is_sa) error = sa_lookup_uio(zp->z_sa_hdl, - SA_ZPL_SYMLINK(zsb), uio); + SA_ZPL_SYMLINK(zfsvfs), uio); else error = zfs_sa_readlink(zp, uio); mutex_exit(&zp->z_lock); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_readlink); /* * Insert a new entry into directory tdip referencing sip. @@ -3948,7 +3931,7 @@ zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr, { znode_t *dzp = ITOZ(tdip); znode_t *tzp, *szp; - zfs_sb_t *zsb = ITOZSB(tdip); + zfsvfs_t *zfsvfs = ITOZSB(tdip); zilog_t *zilog; zfs_dirlock_t *dl; dmu_tx_t *tx; @@ -3967,16 +3950,16 @@ zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr, if (name == NULL) return (SET_ERROR(EINVAL)); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(dzp); - zilog = zsb->z_log; + zilog = zfsvfs->z_log; /* * POSIX dictates that we return EPERM here. * Better choices include ENOTSUP or EISDIR. */ if (S_ISDIR(sip->i_mode)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EPERM)); } @@ -3988,25 +3971,25 @@ zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr, * super blocks. */ if (sip->i_sb != tdip->i_sb || zfsctl_is_node(sip)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EXDEV)); } /* Prevent links to .zfs/shares files */ - if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zsb), + if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parent, sizeof (uint64_t))) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } - if (parent == zsb->z_shares_dir) { - ZFS_EXIT(zsb); + if (parent == zfsvfs->z_shares_dir) { + ZFS_EXIT(zfsvfs); return (SET_ERROR(EPERM)); } - if (zsb->z_utf8 && u8_validate(name, + if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EILSEQ)); } if (flags & FIGNORECASE) @@ -4019,18 +4002,19 @@ zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr, * imposed in attribute space. */ if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } - owner = zfs_fuid_map_id(zsb, KUID_TO_SUID(sip->i_uid), cr, ZFS_OWNER); + owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid), + cr, ZFS_OWNER); if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EPERM)); } if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -4040,15 +4024,15 @@ top: */ error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL); if (error) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); if (is_tmpfile) - dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); zfs_sa_upgrade_txholds(tx, szp); zfs_sa_upgrade_txholds(tx, dzp); @@ -4062,7 +4046,7 @@ top: goto top; } dmu_tx_abort(tx); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } /* unmark z_unlinked so zfs_link_create will not reject */ @@ -4080,8 +4064,8 @@ top: * operation are sync safe. */ if (is_tmpfile) { - VERIFY(zap_remove_int(zsb->z_os, zsb->z_unlinkedobj, - szp->z_id, tx) == 0); + VERIFY(zap_remove_int(zfsvfs->z_os, + zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0); } else { if (flags & FIGNORECASE) txtype |= TX_CI; @@ -4096,18 +4080,17 @@ top: zfs_dirent_unlock(dl); - if (!is_tmpfile && zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); if (is_tmpfile) - txg_wait_synced(dmu_objset_pool(zsb->z_os), txg); + txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg); zfs_inode_update(dzp); zfs_inode_update(szp); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_link); static void zfs_putpage_commit_cb(void *arg) @@ -4137,7 +4120,7 @@ int zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); loff_t offset; loff_t pgoff; unsigned int pglen; @@ -4150,7 +4133,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) int cnt = 0; struct address_space *mapping; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); ASSERT(PageLocked(pp)); @@ -4163,7 +4146,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) /* Page is beyond end of file */ if (pgoff >= offset) { unlock_page(pp); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } @@ -4177,8 +4160,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) * is to register a page_mkwrite() handler to count the page * against its quota when it is about to be dirtied. */ - if (zfs_owner_overquota(zsb, zp, B_FALSE) || - zfs_owner_overquota(zsb, zp, B_TRUE)) { + if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) || + zfs_owner_overquota(zfsvfs, zp, B_TRUE)) { err = EDQUOT; } #endif @@ -4217,7 +4200,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) { unlock_page(pp); zfs_range_unlock(rl); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } @@ -4229,7 +4212,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(pp); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } @@ -4237,7 +4220,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) if (!clear_page_dirty_for_io(pp)) { unlock_page(pp); zfs_range_unlock(rl); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } @@ -4249,7 +4232,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) set_page_writeback(pp); unlock_page(pp); - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); zfs_sa_upgrade_txholds(tx, zp); @@ -4264,18 +4247,19 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) ClearPageError(pp); end_page_writeback(pp); zfs_range_unlock(rl); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (err); } va = kmap(pp); ASSERT3U(pglen, <=, PAGE_SIZE); - dmu_write(zsb->z_os, zp->z_id, pgoff, pglen, va, tx); + dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx); kunmap(pp); - SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); - SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zsb), NULL, &zp->z_pflags, 8); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL, + &zp->z_pflags, 8); /* Preserve the mtime and ctime provided by the inode */ ZFS_TIME_ENCODE(&ip->i_mtime, mtime); @@ -4285,7 +4269,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx); - zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0, + zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0, zfs_putpage_commit_cb, pp); dmu_tx_commit(tx); @@ -4297,10 +4281,10 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) * writepages() normally handles the entire commit for * performance reasons. */ - zil_commit(zsb->z_log, zp->z_id); + zil_commit(zfsvfs->z_log, zp->z_id); } - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (err); } @@ -4312,17 +4296,17 @@ int zfs_dirty_inode(struct inode *ip, int flags) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); dmu_tx_t *tx; uint64_t mode, atime[2], mtime[2], ctime[2]; sa_bulk_attr_t bulk[4]; int error = 0; int cnt = 0; - if (zfs_is_readonly(zsb) || dmu_objset_is_snapshot(zsb->z_os)) + if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os)) return (0); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); #ifdef I_DIRTY_TIME @@ -4339,7 +4323,7 @@ zfs_dirty_inode(struct inode *ip, int flags) } #endif - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); zfs_sa_upgrade_txholds(tx, zp); @@ -4353,10 +4337,10 @@ zfs_dirty_inode(struct inode *ip, int flags) mutex_enter(&zp->z_lock); zp->z_atime_dirty = 0; - SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zsb), NULL, &mode, 8); - SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zsb), NULL, &atime, 16); - SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); /* Preserve the mode, mtime and ctime provided by the inode */ ZFS_TIME_ENCODE(&ip->i_atime, atime); @@ -4371,34 +4355,33 @@ zfs_dirty_inode(struct inode *ip, int flags) dmu_tx_commit(tx); out: - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_dirty_inode); /*ARGSUSED*/ void zfs_inactive(struct inode *ip) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); uint64_t atime[2]; int error; int need_unlock = 0; /* Only read lock if we haven't already write locked, e.g. rollback */ - if (!RW_WRITE_HELD(&zsb->z_teardown_inactive_lock)) { + if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) { need_unlock = 1; - rw_enter(&zsb->z_teardown_inactive_lock, RW_READER); + rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER); } if (zp->z_sa_hdl == NULL) { if (need_unlock) - rw_exit(&zsb->z_teardown_inactive_lock); + rw_exit(&zfsvfs->z_teardown_inactive_lock); return; } if (zp->z_atime_dirty && zp->z_unlinked == 0) { - dmu_tx_t *tx = dmu_tx_create(zsb->z_os); + dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); zfs_sa_upgrade_txholds(tx, zp); @@ -4408,7 +4391,7 @@ zfs_inactive(struct inode *ip) } else { ZFS_TIME_ENCODE(&ip->i_atime, atime); mutex_enter(&zp->z_lock); - (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zsb), + (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs), (void *)&atime, sizeof (atime), tx); zp->z_atime_dirty = 0; mutex_exit(&zp->z_lock); @@ -4418,9 +4401,8 @@ zfs_inactive(struct inode *ip) zfs_zinactive(zp); if (need_unlock) - rw_exit(&zsb->z_teardown_inactive_lock); + rw_exit(&zfsvfs->z_teardown_inactive_lock); } -EXPORT_SYMBOL(zfs_inactive); /* * Bounds-check the seek operation. @@ -4441,7 +4423,6 @@ zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp) return (0); return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0); } -EXPORT_SYMBOL(zfs_seek); /* * Fill pages with data from the disk. @@ -4450,7 +4431,7 @@ static int zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); objset_t *os; struct page *cur_pp; u_offset_t io_off, total; @@ -4459,7 +4440,7 @@ zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages) unsigned page_idx; int err; - os = zsb->z_os; + os = zfsvfs->z_os; io_len = nr_pages << PAGE_SHIFT; i_size = i_size_read(ip); io_off = page_offset(pl[0]); @@ -4507,21 +4488,20 @@ int zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); int err; if (pl == NULL) return (0); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); err = zfs_fillpage(ip, pl, nr_pages); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (err); } -EXPORT_SYMBOL(zfs_getpage); /* * Check ZFS specific permissions to memory map a section of a file. @@ -4541,32 +4521,31 @@ zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len, unsigned long vm_flags) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); if ((vm_flags & VM_WRITE) && (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EPERM)); } if ((vm_flags & (VM_READ | VM_EXEC)) && (zp->z_pflags & ZFS_AV_QUARANTINED)) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EACCES)); } if (off < 0 || len > MAXOFFSET_T - off) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(ENXIO)); } - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_map); /* * convoff - converts the given data (start, whence) to the @@ -4640,15 +4619,15 @@ zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag, offset_t offset, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); uint64_t off, len; int error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); if (cmd != F_FREESP) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } @@ -4656,18 +4635,18 @@ zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag, * Callers might not be able to detect properly that we are read-only, * so check it explicitly here. */ - if (zfs_is_readonly(zsb)) { - ZFS_EXIT(zsb); + if (zfs_is_readonly(zfsvfs)) { + ZFS_EXIT(zfsvfs); return (SET_ERROR(EROFS)); } if ((error = convoff(ip, bfp, 0, offset))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } if (bfp->l_len < 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } @@ -4678,7 +4657,7 @@ zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag, * operates directly on inodes, so we need to check access rights. */ if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -4687,29 +4666,28 @@ zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag, error = zfs_freesp(zp, off, len, flag, TRUE); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_space); /*ARGSUSED*/ int zfs_fid(struct inode *ip, fid_t *fidp) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); uint32_t gen; uint64_t gen64; uint64_t object = zp->z_id; zfid_short_t *zfid; int size, i, error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); - if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb), + if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &gen64, sizeof (uint64_t))) != 0) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -4730,51 +4708,48 @@ zfs_fid(struct inode *ip, fid_t *fidp) for (i = 0; i < sizeof (zfid->zf_gen); i++) zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i)); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } -EXPORT_SYMBOL(zfs_fid); /*ARGSUSED*/ int zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); int error; boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); error = zfs_getacl(zp, vsecp, skipaclchk, cr); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_getsecattr); /*ARGSUSED*/ int zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); int error; boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; - zilog_t *zilog = zsb->z_log; + zilog_t *zilog = zfsvfs->z_log; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); error = zfs_setacl(zp, vsecp, skipaclchk, cr); - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } -EXPORT_SYMBOL(zfs_setsecattr); #ifdef HAVE_UIO_ZEROCOPY /* @@ -4792,8 +4767,8 @@ static int zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); - int max_blksz = zsb->z_max_blksz; + zfsvfs_t *zfsvfs = ITOZSB(ip); + int max_blksz = zfsvfs->z_max_blksz; uio_t *uio = &xuio->xu_uio; ssize_t size = uio->uio_resid; offset_t offset = uio->uio_loffset; @@ -4806,7 +4781,7 @@ zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr) if (xuio->xu_type != UIOTYPE_ZEROCOPY) return (SET_ERROR(EINVAL)); - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); switch (ioflag) { case UIO_WRITE: @@ -4816,7 +4791,7 @@ zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr) */ blksz = max_blksz; if (size < blksz || zp->z_blksz != blksz) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } /* @@ -4881,7 +4856,7 @@ zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr) blksz = zcr_blksz_max; /* avoid potential complexity of dealing with it */ if (blksz > max_blksz) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } @@ -4890,18 +4865,18 @@ zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr) size = maxsize; if (size < blksz) { - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } break; default: - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } uio->uio_extflg = UIO_XUIO; XUIO_XUZC_RW(xuio) = ioflag; - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (0); } @@ -4933,6 +4908,36 @@ zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr) #endif /* HAVE_UIO_ZEROCOPY */ #if defined(_KERNEL) && defined(HAVE_SPL) +EXPORT_SYMBOL(zfs_open); +EXPORT_SYMBOL(zfs_close); +EXPORT_SYMBOL(zfs_read); +EXPORT_SYMBOL(zfs_write); +EXPORT_SYMBOL(zfs_access); +EXPORT_SYMBOL(zfs_lookup); +EXPORT_SYMBOL(zfs_create); +EXPORT_SYMBOL(zfs_tmpfile); +EXPORT_SYMBOL(zfs_remove); +EXPORT_SYMBOL(zfs_mkdir); +EXPORT_SYMBOL(zfs_rmdir); +EXPORT_SYMBOL(zfs_readdir); +EXPORT_SYMBOL(zfs_fsync); +EXPORT_SYMBOL(zfs_getattr); +EXPORT_SYMBOL(zfs_getattr_fast); +EXPORT_SYMBOL(zfs_setattr); +EXPORT_SYMBOL(zfs_rename); +EXPORT_SYMBOL(zfs_symlink); +EXPORT_SYMBOL(zfs_readlink); +EXPORT_SYMBOL(zfs_link); +EXPORT_SYMBOL(zfs_inactive); +EXPORT_SYMBOL(zfs_space); +EXPORT_SYMBOL(zfs_fid); +EXPORT_SYMBOL(zfs_getsecattr); +EXPORT_SYMBOL(zfs_setsecattr); +EXPORT_SYMBOL(zfs_getpage); +EXPORT_SYMBOL(zfs_putpage); +EXPORT_SYMBOL(zfs_dirty_inode); +EXPORT_SYMBOL(zfs_map); + /* CSTYLED */ module_param(zfs_delete_blocks, ulong, 0644); MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async"); diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index 92241d6a5..1597940e6 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -236,44 +236,44 @@ zfs_znode_hold_compare(const void *a, const void *b) } boolean_t -zfs_znode_held(zfs_sb_t *zsb, uint64_t obj) +zfs_znode_held(zfsvfs_t *zfsvfs, uint64_t obj) { znode_hold_t *zh, search; - int i = ZFS_OBJ_HASH(zsb, obj); + int i = ZFS_OBJ_HASH(zfsvfs, obj); boolean_t held; search.zh_obj = obj; - mutex_enter(&zsb->z_hold_locks[i]); - zh = avl_find(&zsb->z_hold_trees[i], &search, NULL); + mutex_enter(&zfsvfs->z_hold_locks[i]); + zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL); held = (zh && MUTEX_HELD(&zh->zh_lock)) ? B_TRUE : B_FALSE; - mutex_exit(&zsb->z_hold_locks[i]); + mutex_exit(&zfsvfs->z_hold_locks[i]); return (held); } static znode_hold_t * -zfs_znode_hold_enter(zfs_sb_t *zsb, uint64_t obj) +zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj) { znode_hold_t *zh, *zh_new, search; - int i = ZFS_OBJ_HASH(zsb, obj); + int i = ZFS_OBJ_HASH(zfsvfs, obj); boolean_t found = B_FALSE; zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP); zh_new->zh_obj = obj; search.zh_obj = obj; - mutex_enter(&zsb->z_hold_locks[i]); - zh = avl_find(&zsb->z_hold_trees[i], &search, NULL); + mutex_enter(&zfsvfs->z_hold_locks[i]); + zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL); if (likely(zh == NULL)) { zh = zh_new; - avl_add(&zsb->z_hold_trees[i], zh); + avl_add(&zfsvfs->z_hold_trees[i], zh); } else { ASSERT3U(zh->zh_obj, ==, obj); found = B_TRUE; } refcount_add(&zh->zh_refcount, NULL); - mutex_exit(&zsb->z_hold_locks[i]); + mutex_exit(&zfsvfs->z_hold_locks[i]); if (found == B_TRUE) kmem_cache_free(znode_hold_cache, zh_new); @@ -286,28 +286,28 @@ zfs_znode_hold_enter(zfs_sb_t *zsb, uint64_t obj) } static void -zfs_znode_hold_exit(zfs_sb_t *zsb, znode_hold_t *zh) +zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh) { - int i = ZFS_OBJ_HASH(zsb, zh->zh_obj); + int i = ZFS_OBJ_HASH(zfsvfs, zh->zh_obj); boolean_t remove = B_FALSE; - ASSERT(zfs_znode_held(zsb, zh->zh_obj)); + ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj)); ASSERT3S(refcount_count(&zh->zh_refcount), >, 0); mutex_exit(&zh->zh_lock); - mutex_enter(&zsb->z_hold_locks[i]); + mutex_enter(&zfsvfs->z_hold_locks[i]); if (refcount_remove(&zh->zh_refcount, NULL) == 0) { - avl_remove(&zsb->z_hold_trees[i], zh); + avl_remove(&zfsvfs->z_hold_trees[i], zh); remove = B_TRUE; } - mutex_exit(&zsb->z_hold_locks[i]); + mutex_exit(&zfsvfs->z_hold_locks[i]); if (remove == B_TRUE) kmem_cache_free(znode_hold_cache, zh); } int -zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx) +zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { #ifdef HAVE_SMB_SHARE zfs_acl_ids_t acl_ids; @@ -355,17 +355,17 @@ zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx) } static void -zfs_znode_sa_init(zfs_sb_t *zsb, znode_t *zp, +zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp, dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl) { - ASSERT(zfs_znode_held(zsb, zp->z_id)); + ASSERT(zfs_znode_held(zfsvfs, zp->z_id)); mutex_enter(&zp->z_lock); ASSERT(zp->z_sa_hdl == NULL); ASSERT(zp->z_acl_cached == NULL); if (sa_hdl == NULL) { - VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, zp, + VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp, SA_HDL_SHARED, &zp->z_sa_hdl)); } else { zp->z_sa_hdl = sa_hdl; @@ -408,14 +408,14 @@ void zfs_inode_destroy(struct inode *ip) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); - mutex_enter(&zsb->z_znodes_lock); + mutex_enter(&zfsvfs->z_znodes_lock); if (list_link_active(&zp->z_link_node)) { - list_remove(&zsb->z_all_znodes, zp); - zsb->z_nr_znodes--; + list_remove(&zfsvfs->z_all_znodes, zp); + zfsvfs->z_nr_znodes--; } - mutex_exit(&zsb->z_znodes_lock); + mutex_exit(&zfsvfs->z_znodes_lock); if (zp->z_acl_cached) { zfs_acl_free(zp->z_acl_cached); @@ -431,7 +431,7 @@ zfs_inode_destroy(struct inode *ip) } static void -zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip) +zfs_inode_set_ops(zfsvfs_t *zfsvfs, struct inode *ip) { uint64_t rdev = 0; @@ -457,7 +457,7 @@ zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip) */ case S_IFCHR: case S_IFBLK: - (void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zsb), &rdev, + (void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &rdev, sizeof (rdev)); /*FALLTHROUGH*/ case S_IFIFO: @@ -517,13 +517,13 @@ zfs_set_inode_flags(znode_t *zp, struct inode *ip) void zfs_inode_update(znode_t *zp) { - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; struct inode *ip; uint32_t blksize; u_longlong_t i_blocks; ASSERT(zp != NULL); - zsb = ZTOZSB(zp); + zfsvfs = ZTOZSB(zp); ip = ZTOI(zp); /* Skip .zfs control nodes which do not exist on disk. */ @@ -547,7 +547,7 @@ zfs_inode_update(znode_t *zp) * return the znode */ static znode_t * -zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, +zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz, dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl) { znode_t *zp; @@ -561,9 +561,9 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, sa_bulk_attr_t bulk[11]; int count = 0; - ASSERT(zsb != NULL); + ASSERT(zfsvfs != NULL); - ip = new_inode(zsb->z_sb); + ip = new_inode(zfsvfs->z_sb); if (ip == NULL) return (NULL); @@ -587,21 +587,22 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, zp->z_range_lock.zr_blksz = &zp->z_blksz; zp->z_range_lock.zr_max_blksz = &ZTOZSB(zp)->z_max_blksz; - zfs_znode_sa_init(zsb, zp, db, obj_type, hdl); + zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &tmp_gen, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &links, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, + &zp->z_size, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &z_uid, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &z_gid, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, &atime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &z_uid, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &z_gid, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0) { if (hdl == NULL) @@ -628,7 +629,7 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, ip->i_ino = obj; zfs_inode_update(zp); - zfs_inode_set_ops(zsb, ip); + zfs_inode_set_ops(zfsvfs, ip); /* * The only way insert_inode_locked() can fail is if the ip->i_ino @@ -640,11 +641,11 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, */ VERIFY3S(insert_inode_locked(ip), ==, 0); - mutex_enter(&zsb->z_znodes_lock); - list_insert_tail(&zsb->z_all_znodes, zp); - zsb->z_nr_znodes++; + mutex_enter(&zfsvfs->z_znodes_lock); + list_insert_tail(&zfsvfs->z_all_znodes, zp); + zfsvfs->z_nr_znodes++; membar_producer(); - mutex_exit(&zsb->z_znodes_lock); + mutex_exit(&zfsvfs->z_znodes_lock); unlock_new_inode(ip); return (zp); @@ -661,9 +662,9 @@ error: void zfs_mark_inode_dirty(struct inode *ip) { - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); - if (zfs_is_readonly(zsb) || dmu_objset_is_snapshot(zsb->z_os)) + if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os)) return; mark_inode_dirty(ip); @@ -697,7 +698,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, uint64_t mode, size, links, parent, pflags; uint64_t dzp_pflags = 0; uint64_t rdev = 0; - zfs_sb_t *zsb = ZTOZSB(dzp); + zfsvfs_t *zfsvfs = ZTOZSB(dzp); dmu_buf_t *db; timestruc_t now; uint64_t gen, obj; @@ -710,7 +711,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, zfs_acl_locator_cb_t locate = { 0 }; znode_hold_t *zh; - if (zsb->z_replay) { + if (zfsvfs->z_replay) { obj = vap->va_nodeid; now = vap->va_ctime; /* see zfs_replay_create() */ gen = vap->va_nblocks; /* ditto */ @@ -719,13 +720,13 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, obj = 0; gethrestime(&now); gen = dmu_tx_get_txg(tx); - dnodesize = dmu_objset_dnodesize(zsb->z_os); + dnodesize = dmu_objset_dnodesize(zfsvfs->z_os); } if (dnodesize == 0) dnodesize = DNODE_MIN_SIZE; - obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE; + obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE; bonuslen = (obj_type == DMU_OT_SA) ? DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE; @@ -740,29 +741,29 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, * assertions below. */ if (S_ISDIR(vap->va_mode)) { - if (zsb->z_replay) { - VERIFY0(zap_create_claim_norm_dnsize(zsb->z_os, obj, - zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS, + if (zfsvfs->z_replay) { + VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj, + zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS, obj_type, bonuslen, dnodesize, tx)); } else { - obj = zap_create_norm_dnsize(zsb->z_os, - zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS, + obj = zap_create_norm_dnsize(zfsvfs->z_os, + zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS, obj_type, bonuslen, dnodesize, tx); } } else { - if (zsb->z_replay) { - VERIFY0(dmu_object_claim_dnsize(zsb->z_os, obj, + if (zfsvfs->z_replay) { + VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj, DMU_OT_PLAIN_FILE_CONTENTS, 0, obj_type, bonuslen, dnodesize, tx)); } else { - obj = dmu_object_alloc_dnsize(zsb->z_os, + obj = dmu_object_alloc_dnsize(zfsvfs->z_os, DMU_OT_PLAIN_FILE_CONTENTS, 0, obj_type, bonuslen, dnodesize, tx); } } - zh = zfs_znode_hold_enter(zsb, obj); - VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db)); + zh = zfs_znode_hold_enter(zfsvfs, obj); + VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db)); /* * If this is the root, fix up the half-initialized parent pointer @@ -781,7 +782,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, flag |= IS_XATTR; } - if (zsb->z_use_fuids) + if (zfsvfs->z_use_fuids) pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED; else pflags = 0; @@ -825,7 +826,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, } /* Now add in all of the "SA" attributes */ - VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED, + VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED, &sa_hdl)); /* @@ -837,74 +838,74 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP); if (obj_type == DMU_OT_ZNODE) { - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs), NULL, &size, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8); } else { - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs), NULL, &size, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL, &acl_ids->z_fuid, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL, &acl_ids->z_fgid, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs), NULL, &pflags, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16); } - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8); + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8); if (obj_type == DMU_OT_ZNODE) { - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL, &empty_xattr, 8); } if (obj_type == DMU_OT_ZNODE || (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) { - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8); } if (obj_type == DMU_OT_ZNODE) { - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs), NULL, &pflags, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL, &acl_ids->z_fuid, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL, &acl_ids->z_fgid, 8); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad, + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad, sizeof (uint64_t) * 4); - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL, &acl_phys, sizeof (zfs_acl_phys_t)); } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) { - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL, + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL, &acl_ids->z_aclp->z_acl_count, 8); locate.cb_aclp = acl_ids->z_aclp; - SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb), + SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs), zfs_acl_data_locator, &locate, acl_ids->z_aclp->z_acl_bytes); mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags, @@ -914,7 +915,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0); if (!(flag & IS_ROOT_NODE)) { - *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl); + *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, obj, sa_hdl); VERIFY(*zpp != NULL); VERIFY(dzp != NULL); } else { @@ -936,7 +937,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx)); } kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); } /* @@ -1043,7 +1044,7 @@ zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx) } int -zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp) +zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp) { dmu_object_info_t doi; dmu_buf_t *db; @@ -1055,11 +1056,11 @@ zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp) *zpp = NULL; again: - zh = zfs_znode_hold_enter(zsb, obj_num); + zh = zfs_znode_hold_enter(zfsvfs, obj_num); - err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db); + err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); if (err) { - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); return (err); } @@ -1069,7 +1070,7 @@ again: (doi.doi_bonus_type == DMU_OT_ZNODE && doi.doi_bonus_size < sizeof (znode_phys_t)))) { sa_buf_rele(db, NULL); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); return (SET_ERROR(EINVAL)); } @@ -1105,7 +1106,7 @@ again: if (igrab(ZTOI(zp)) == NULL) { mutex_exit(&zp->z_lock); sa_buf_rele(db, NULL); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); /* inode might need this to finish evict */ cond_resched(); goto again; @@ -1114,7 +1115,7 @@ again: err = 0; mutex_exit(&zp->z_lock); sa_buf_rele(db, NULL); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); return (err); } @@ -1128,21 +1129,21 @@ again: * if zfs_znode_alloc() fails it will drop the hold on the * bonus buffer. */ - zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size, + zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size, doi.doi_bonus_type, obj_num, NULL); if (zp == NULL) { err = SET_ERROR(ENOENT); } else { *zpp = zp; } - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); return (err); } int zfs_rezget(znode_t *zp) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); dmu_object_info_t doi; dmu_buf_t *db; uint64_t obj_num = zp->z_id; @@ -1166,7 +1167,7 @@ zfs_rezget(znode_t *zp) if (zp->z_is_ctldir) return (0); - zh = zfs_znode_hold_enter(zsb, obj_num); + zh = zfs_znode_hold_enter(zfsvfs, obj_num); mutex_enter(&zp->z_acl_lock); if (zp->z_acl_cached) { @@ -1183,9 +1184,9 @@ zfs_rezget(znode_t *zp) rw_exit(&zp->z_xattr_lock); ASSERT(zp->z_sa_hdl == NULL); - err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db); + err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); if (err) { - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); return (err); } @@ -1195,37 +1196,37 @@ zfs_rezget(znode_t *zp) (doi.doi_bonus_type == DMU_OT_ZNODE && doi.doi_bonus_size < sizeof (znode_phys_t)))) { sa_buf_rele(db, NULL); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); return (SET_ERROR(EINVAL)); } - zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL); + zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL); /* reload cached values */ - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &gen, sizeof (gen)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, &zp->z_size, sizeof (zp->z_size)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, sizeof (links)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &z_uid, sizeof (z_uid)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &z_gid, sizeof (z_gid)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, sizeof (mode)); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) { zfs_znode_dmu_fini(zp); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); return (SET_ERROR(EIO)); } @@ -1239,7 +1240,7 @@ zfs_rezget(znode_t *zp) if (gen != ZTOI(zp)->i_generation) { zfs_znode_dmu_fini(zp); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); return (SET_ERROR(EIO)); } @@ -1251,7 +1252,7 @@ zfs_rezget(znode_t *zp) zp->z_atime_dirty = 0; zfs_inode_update(zp); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); return (0); } @@ -1259,26 +1260,26 @@ zfs_rezget(znode_t *zp) void zfs_znode_delete(znode_t *zp, dmu_tx_t *tx) { - zfs_sb_t *zsb = ZTOZSB(zp); - objset_t *os = zsb->z_os; + zfsvfs_t *zfsvfs = ZTOZSB(zp); + objset_t *os = zfsvfs->z_os; uint64_t obj = zp->z_id; uint64_t acl_obj = zfs_external_acl(zp); znode_hold_t *zh; - zh = zfs_znode_hold_enter(zsb, obj); + zh = zfs_znode_hold_enter(zfsvfs, obj); if (acl_obj) { VERIFY(!zp->z_is_sa); VERIFY(0 == dmu_object_free(os, acl_obj, tx)); } VERIFY(0 == dmu_object_free(os, obj, tx)); zfs_znode_dmu_fini(zp); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); } void zfs_zinactive(znode_t *zp) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); uint64_t z_id = zp->z_id; znode_hold_t *zh; @@ -1287,7 +1288,7 @@ zfs_zinactive(znode_t *zp) /* * Don't allow a zfs_zget() while were trying to release this znode. */ - zh = zfs_znode_hold_enter(zsb, z_id); + zh = zfs_znode_hold_enter(zfsvfs, z_id); mutex_enter(&zp->z_lock); @@ -1297,7 +1298,7 @@ zfs_zinactive(znode_t *zp) */ if (zp->z_unlinked) { mutex_exit(&zp->z_lock); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); zfs_rmnode(zp); return; } @@ -1305,7 +1306,7 @@ zfs_zinactive(znode_t *zp) mutex_exit(&zp->z_lock); zfs_znode_dmu_fini(zp); - zfs_znode_hold_exit(zsb, zh); + zfs_znode_hold_exit(zfsvfs, zh); } static inline int @@ -1407,7 +1408,7 @@ zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx) static int zfs_extend(znode_t *zp, uint64_t end) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); dmu_tx_t *tx; rl_t *rl; uint64_t newblksz; @@ -1425,11 +1426,11 @@ zfs_extend(znode_t *zp, uint64_t end) zfs_range_unlock(rl); return (0); } - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); zfs_sa_upgrade_txholds(tx, zp); if (end > zp->z_blksz && - (!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) { + (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) { /* * We are growing the file past the current block size. */ @@ -1526,7 +1527,7 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len) static int zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); rl_t *rl; int error; @@ -1546,7 +1547,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) if (off + len > zp->z_size) len = zp->z_size - off; - error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len); + error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len); /* * Zero partial page cache entries. This must be done under a @@ -1605,7 +1606,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) static int zfs_trunc(znode_t *zp, uint64_t end) { - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); dmu_tx_t *tx; rl_t *rl; int error; @@ -1625,12 +1626,12 @@ zfs_trunc(znode_t *zp, uint64_t end) return (0); } - error = dmu_free_long_range(zsb->z_os, zp->z_id, end, -1); + error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, -1); if (error) { zfs_range_unlock(rl); return (error); } - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); zfs_sa_upgrade_txholds(tx, zp); dmu_tx_mark_netfree(tx); @@ -1642,12 +1643,12 @@ zfs_trunc(znode_t *zp, uint64_t end) } zp->z_size = end; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, &zp->z_size, sizeof (zp->z_size)); if (end == 0) { zp->z_pflags &= ~ZFS_SPARSE; - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, 8); } VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0); @@ -1674,15 +1675,15 @@ int zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log) { dmu_tx_t *tx; - zfs_sb_t *zsb = ZTOZSB(zp); - zilog_t *zilog = zsb->z_log; + zfsvfs_t *zfsvfs = ZTOZSB(zp); + zilog_t *zilog = zfsvfs->z_log; uint64_t mode; uint64_t mtime[2], ctime[2]; sa_bulk_attr_t bulk[3]; int count = 0; int error; - if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode, + if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode, sizeof (mode))) != 0) return (error); @@ -1703,7 +1704,7 @@ zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log) if (error || !log) goto out; log: - tx = dmu_tx_create(zsb->z_os); + tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); zfs_sa_upgrade_txholds(tx, zp); error = dmu_tx_assign(tx, TXG_WAIT); @@ -1712,9 +1713,9 @@ log: goto out; } - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, 8); zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime); error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); @@ -1742,7 +1743,7 @@ void zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx) { struct super_block *sb; - zfs_sb_t *zsb; + zfsvfs_t *zfsvfs; uint64_t moid, obj, sa_obj, version; uint64_t sense = ZFS_CASE_SENSITIVE; uint64_t norm = 0; @@ -1824,7 +1825,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx) ASSERT(error == 0); /* - * Create root znode. Create minimal znode/inode/zsb/sb + * Create root znode. Create minimal znode/inode/zfsvfs/sb * to allow zfs_mknode to work. */ vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID; @@ -1838,21 +1839,21 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx) rootzp->z_atime_dirty = 0; rootzp->z_is_sa = USE_SA(version, os); - zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP); - zsb->z_os = os; - zsb->z_parent = zsb; - zsb->z_version = version; - zsb->z_use_fuids = USE_FUIDS(version, os); - zsb->z_use_sa = USE_SA(version, os); - zsb->z_norm = norm; + zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP); + zfsvfs->z_os = os; + zfsvfs->z_parent = zfsvfs; + zfsvfs->z_version = version; + zfsvfs->z_use_fuids = USE_FUIDS(version, os); + zfsvfs->z_use_sa = USE_SA(version, os); + zfsvfs->z_norm = norm; sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP); - sb->s_fs_info = zsb; + sb->s_fs_info = zfsvfs; ZTOI(rootzp)->i_sb = sb; error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END, - &zsb->z_attr_table); + &zfsvfs->z_attr_table); ASSERT(error == 0); @@ -1861,20 +1862,21 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx) * insensitive. */ if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED) - zsb->z_norm |= U8_TEXTPREP_TOUPPER; + zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER; - mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL); - list_create(&zsb->z_all_znodes, sizeof (znode_t), + mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL); + list_create(&zfsvfs->z_all_znodes, sizeof (znode_t), offsetof(znode_t, z_link_node)); size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX); - zsb->z_hold_size = size; - zsb->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size, KM_SLEEP); - zsb->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP); + zfsvfs->z_hold_size = size; + zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size, + KM_SLEEP); + zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP); for (i = 0; i != size; i++) { - avl_create(&zsb->z_hold_trees[i], zfs_znode_hold_compare, + avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare, sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node)); - mutex_init(&zsb->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL); + mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL); } VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr, @@ -1892,18 +1894,18 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx) /* * Create shares directory */ - error = zfs_create_share_dir(zsb, tx); + error = zfs_create_share_dir(zfsvfs, tx); ASSERT(error == 0); for (i = 0; i != size; i++) { - avl_destroy(&zsb->z_hold_trees[i]); - mutex_destroy(&zsb->z_hold_locks[i]); + avl_destroy(&zfsvfs->z_hold_trees[i]); + mutex_destroy(&zfsvfs->z_hold_locks[i]); } - vmem_free(zsb->z_hold_trees, sizeof (avl_tree_t) * size); - vmem_free(zsb->z_hold_locks, sizeof (kmutex_t) * size); + vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size); + vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size); kmem_free(sb, sizeof (struct super_block)); - kmem_free(zsb, sizeof (zfs_sb_t)); + kmem_free(zfsvfs, sizeof (zfsvfs_t)); } #endif /* _KERNEL */ diff --git a/module/zfs/zpl_ctldir.c b/module/zfs/zpl_ctldir.c index cdd6668b1..b6a3b669d 100644 --- a/module/zfs/zpl_ctldir.c +++ b/module/zfs/zpl_ctldir.c @@ -52,10 +52,10 @@ zpl_common_open(struct inode *ip, struct file *filp) static int zpl_root_iterate(struct file *filp, struct dir_context *ctx) { - zfs_sb_t *zsb = ITOZSB(file_inode(filp)); + zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp)); int error = 0; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); if (!dir_emit_dots(filp, ctx)) goto out; @@ -76,7 +76,7 @@ zpl_root_iterate(struct file *filp, struct dir_context *ctx) ctx->pos++; } out: - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -249,14 +249,14 @@ zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry, static int zpl_snapdir_iterate(struct file *filp, struct dir_context *ctx) { - zfs_sb_t *zsb = ITOZSB(file_inode(filp)); + zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp)); fstrans_cookie_t cookie; char snapname[MAXNAMELEN]; boolean_t case_conflict; uint64_t id, pos; int error = 0; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); cookie = spl_fstrans_mark(); if (!dir_emit_dots(filp, ctx)) @@ -264,10 +264,10 @@ zpl_snapdir_iterate(struct file *filp, struct dir_context *ctx) pos = ctx->pos; while (error == 0) { - dsl_pool_config_enter(dmu_objset_pool(zsb->z_os), FTAG); - error = -dmu_snapshot_list_next(zsb->z_os, MAXNAMELEN, + dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG); + error = -dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN, snapname, &id, &pos, &case_conflict); - dsl_pool_config_exit(dmu_objset_pool(zsb->z_os), FTAG); + dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG); if (error) goto out; @@ -279,7 +279,7 @@ zpl_snapdir_iterate(struct file *filp, struct dir_context *ctx) } out: spl_fstrans_unmark(cookie); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); if (error == -ENOENT) return (0); @@ -378,15 +378,15 @@ static int zpl_snapdir_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - zfs_sb_t *zsb = ITOZSB(dentry->d_inode); + zfsvfs_t *zfsvfs = ITOZSB(dentry->d_inode); int error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); error = simple_getattr(mnt, dentry, stat); stat->nlink = stat->size = 2; - stat->ctime = stat->mtime = dmu_objset_snap_cmtime(zsb->z_os); + stat->ctime = stat->mtime = dmu_objset_snap_cmtime(zfsvfs->z_os); stat->atime = CURRENT_TIME; - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } @@ -464,19 +464,19 @@ zpl_shares_iterate(struct file *filp, struct dir_context *ctx) { fstrans_cookie_t cookie; cred_t *cr = CRED(); - zfs_sb_t *zsb = ITOZSB(file_inode(filp)); + zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp)); znode_t *dzp; int error = 0; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); cookie = spl_fstrans_mark(); - if (zsb->z_shares_dir == 0) { + if (zfsvfs->z_shares_dir == 0) { dir_emit_dots(filp, ctx); goto out; } - error = -zfs_zget(zsb, zsb->z_shares_dir, &dzp); + error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp); if (error) goto out; @@ -487,7 +487,7 @@ zpl_shares_iterate(struct file *filp, struct dir_context *ctx) iput(ZTOI(dzp)); out: spl_fstrans_unmark(cookie); - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); ASSERT3S(error, <=, 0); return (error); @@ -513,27 +513,27 @@ zpl_shares_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *ip = dentry->d_inode; - zfs_sb_t *zsb = ITOZSB(ip); + zfsvfs_t *zfsvfs = ITOZSB(ip); znode_t *dzp; int error; - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); - if (zsb->z_shares_dir == 0) { + if (zfsvfs->z_shares_dir == 0) { error = simple_getattr(mnt, dentry, stat); stat->nlink = stat->size = 2; stat->atime = CURRENT_TIME; - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); return (error); } - error = -zfs_zget(zsb, zsb->z_shares_dir, &dzp); + error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp); if (error == 0) { error = -zfs_getattr_fast(ZTOI(dzp), stat); iput(ZTOI(dzp)); } - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); ASSERT3S(error, <=, 0); return (error); diff --git a/module/zfs/zpl_file.c b/module/zfs/zpl_file.c index cdacdba27..4805abe69 100644 --- a/module/zfs/zpl_file.c +++ b/module/zfs/zpl_file.c @@ -605,14 +605,14 @@ static int zpl_writepages(struct address_space *mapping, struct writeback_control *wbc) { znode_t *zp = ITOZ(mapping->host); - zfs_sb_t *zsb = ITOZSB(mapping->host); + zfsvfs_t *zfsvfs = ITOZSB(mapping->host); enum writeback_sync_modes sync_mode; int result; - ZFS_ENTER(zsb); - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + ZFS_ENTER(zfsvfs); + if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) wbc->sync_mode = WB_SYNC_ALL; - ZFS_EXIT(zsb); + ZFS_EXIT(zfsvfs); sync_mode = wbc->sync_mode; /* @@ -625,11 +625,11 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc) wbc->sync_mode = WB_SYNC_NONE; result = write_cache_pages(mapping, wbc, zpl_putpage, mapping); if (sync_mode != wbc->sync_mode) { - ZFS_ENTER(zsb); + ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); - if (zsb->z_log != NULL) - zil_commit(zsb->z_log, zp->z_id); - ZFS_EXIT(zsb); + if (zfsvfs->z_log != NULL) + zil_commit(zfsvfs->z_log, zp->z_id); + ZFS_EXIT(zfsvfs); /* * We need to call write_cache_pages() again (we can't just diff --git a/module/zfs/zpl_inode.c b/module/zfs/zpl_inode.c index b39a8bbe1..2e438eaff 100644 --- a/module/zfs/zpl_inode.c +++ b/module/zfs/zpl_inode.c @@ -48,7 +48,7 @@ zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) pathname_t *ppn = NULL; pathname_t pn; int zfs_flags = 0; - zfs_sb_t *zsb = dentry->d_sb->s_fs_info; + zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info; if (dlen(dentry) >= ZAP_MAXNAMELEN) return (ERR_PTR(-ENAMETOOLONG)); @@ -57,7 +57,7 @@ zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) cookie = spl_fstrans_mark(); /* If we are a case insensitive fs, we need the real name */ - if (zsb->z_case == ZFS_CASE_INSENSITIVE) { + if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) { zfs_flags = FIGNORECASE; pn_alloc(&pn); ppn = &pn; @@ -259,7 +259,7 @@ zpl_unlink(struct inode *dir, struct dentry *dentry) cred_t *cr = CRED(); int error; fstrans_cookie_t cookie; - zfs_sb_t *zsb = dentry->d_sb->s_fs_info; + zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info; crhold(cr); cookie = spl_fstrans_mark(); @@ -269,7 +269,7 @@ zpl_unlink(struct inode *dir, struct dentry *dentry) * For a CI FS we must invalidate the dentry to prevent the * creation of negative entries. */ - if (error == 0 && zsb->z_case == ZFS_CASE_INSENSITIVE) + if (error == 0 && zfsvfs->z_case == ZFS_CASE_INSENSITIVE) d_invalidate(dentry); spl_fstrans_unmark(cookie); @@ -319,7 +319,7 @@ zpl_rmdir(struct inode *dir, struct dentry *dentry) cred_t *cr = CRED(); int error; fstrans_cookie_t cookie; - zfs_sb_t *zsb = dentry->d_sb->s_fs_info; + zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info; crhold(cr); cookie = spl_fstrans_mark(); @@ -329,7 +329,7 @@ zpl_rmdir(struct inode *dir, struct dentry *dentry) * For a CI FS we must invalidate the dentry to prevent the * creation of negative entries. */ - if (error == 0 && zsb->z_case == ZFS_CASE_INSENSITIVE) + if (error == 0 && zfsvfs->z_case == ZFS_CASE_INSENSITIVE) d_invalidate(dentry); spl_fstrans_unmark(cookie); @@ -658,7 +658,7 @@ zpl_revalidate(struct dentry *dentry, unsigned int flags) { #endif /* HAVE_D_REVALIDATE_NAMEIDATA */ /* CSTYLED */ - zfs_sb_t *zsb = dentry->d_sb->s_fs_info; + zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info; int error; if (flags & LOOKUP_RCU) @@ -668,12 +668,12 @@ zpl_revalidate(struct dentry *dentry, unsigned int flags) * Automounted snapshots rely on periodic dentry revalidation * to defer snapshots from being automatically unmounted. */ - if (zsb->z_issnap) { - if (time_after(jiffies, zsb->z_snap_defer_time + + if (zfsvfs->z_issnap) { + if (time_after(jiffies, zfsvfs->z_snap_defer_time + MAX(zfs_expire_snapshot * HZ / 2, HZ))) { - zsb->z_snap_defer_time = jiffies; - zfsctl_snapshot_unmount_delay(zsb->z_os->os_spa, - dmu_objset_id(zsb->z_os), zfs_expire_snapshot); + zfsvfs->z_snap_defer_time = jiffies; + zfsctl_snapshot_unmount_delay(zfsvfs->z_os->os_spa, + dmu_objset_id(zfsvfs->z_os), zfs_expire_snapshot); } } @@ -684,7 +684,7 @@ zpl_revalidate(struct dentry *dentry, unsigned int flags) */ if (dentry->d_inode == NULL) { spin_lock(&dentry->d_lock); - error = time_before(dentry->d_time, zsb->z_rollback_time); + error = time_before(dentry->d_time, zfsvfs->z_rollback_time); spin_unlock(&dentry->d_lock); if (error) diff --git a/module/zfs/zpl_super.c b/module/zfs/zpl_super.c index 91c36c9e3..b6ef60277 100644 --- a/module/zfs/zpl_super.c +++ b/module/zfs/zpl_super.c @@ -184,211 +184,15 @@ zpl_statfs(struct dentry *dentry, struct kstatfs *statp) return (error); } -enum { - TOKEN_RO, - TOKEN_RW, - TOKEN_SETUID, - TOKEN_NOSETUID, - TOKEN_EXEC, - TOKEN_NOEXEC, - TOKEN_DEVICES, - TOKEN_NODEVICES, - TOKEN_DIRXATTR, - TOKEN_SAXATTR, - TOKEN_XATTR, - TOKEN_NOXATTR, - TOKEN_ATIME, - TOKEN_NOATIME, - TOKEN_RELATIME, - TOKEN_NORELATIME, - TOKEN_NBMAND, - TOKEN_NONBMAND, - TOKEN_MNTPOINT, - TOKEN_LAST, -}; - -static const match_table_t zpl_tokens = { - { TOKEN_RO, MNTOPT_RO }, - { TOKEN_RW, MNTOPT_RW }, - { TOKEN_SETUID, MNTOPT_SETUID }, - { TOKEN_NOSETUID, MNTOPT_NOSETUID }, - { TOKEN_EXEC, MNTOPT_EXEC }, - { TOKEN_NOEXEC, MNTOPT_NOEXEC }, - { TOKEN_DEVICES, MNTOPT_DEVICES }, - { TOKEN_NODEVICES, MNTOPT_NODEVICES }, - { TOKEN_DIRXATTR, MNTOPT_DIRXATTR }, - { TOKEN_SAXATTR, MNTOPT_SAXATTR }, - { TOKEN_XATTR, MNTOPT_XATTR }, - { TOKEN_NOXATTR, MNTOPT_NOXATTR }, - { TOKEN_ATIME, MNTOPT_ATIME }, - { TOKEN_NOATIME, MNTOPT_NOATIME }, - { TOKEN_RELATIME, MNTOPT_RELATIME }, - { TOKEN_NORELATIME, MNTOPT_NORELATIME }, - { TOKEN_NBMAND, MNTOPT_NBMAND }, - { TOKEN_NONBMAND, MNTOPT_NONBMAND }, - { TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" }, - { TOKEN_LAST, NULL }, -}; - -static int -zpl_parse_option(char *option, int token, substring_t *args, zfs_mntopts_t *zmo) -{ - switch (token) { - case TOKEN_RO: - zmo->z_readonly = B_TRUE; - zmo->z_do_readonly = B_TRUE; - break; - case TOKEN_RW: - zmo->z_readonly = B_FALSE; - zmo->z_do_readonly = B_TRUE; - break; - case TOKEN_SETUID: - zmo->z_setuid = B_TRUE; - zmo->z_do_setuid = B_TRUE; - break; - case TOKEN_NOSETUID: - zmo->z_setuid = B_FALSE; - zmo->z_do_setuid = B_TRUE; - break; - case TOKEN_EXEC: - zmo->z_exec = B_TRUE; - zmo->z_do_exec = B_TRUE; - break; - case TOKEN_NOEXEC: - zmo->z_exec = B_FALSE; - zmo->z_do_exec = B_TRUE; - break; - case TOKEN_DEVICES: - zmo->z_devices = B_TRUE; - zmo->z_do_devices = B_TRUE; - break; - case TOKEN_NODEVICES: - zmo->z_devices = B_FALSE; - zmo->z_do_devices = B_TRUE; - break; - case TOKEN_DIRXATTR: - zmo->z_xattr = ZFS_XATTR_DIR; - zmo->z_do_xattr = B_TRUE; - break; - case TOKEN_SAXATTR: - zmo->z_xattr = ZFS_XATTR_SA; - zmo->z_do_xattr = B_TRUE; - break; - case TOKEN_XATTR: - zmo->z_xattr = ZFS_XATTR_DIR; - zmo->z_do_xattr = B_TRUE; - break; - case TOKEN_NOXATTR: - zmo->z_xattr = ZFS_XATTR_OFF; - zmo->z_do_xattr = B_TRUE; - break; - case TOKEN_ATIME: - zmo->z_atime = B_TRUE; - zmo->z_do_atime = B_TRUE; - break; - case TOKEN_NOATIME: - zmo->z_atime = B_FALSE; - zmo->z_do_atime = B_TRUE; - break; - case TOKEN_RELATIME: - zmo->z_relatime = B_TRUE; - zmo->z_do_relatime = B_TRUE; - break; - case TOKEN_NORELATIME: - zmo->z_relatime = B_FALSE; - zmo->z_do_relatime = B_TRUE; - break; - case TOKEN_NBMAND: - zmo->z_nbmand = B_TRUE; - zmo->z_do_nbmand = B_TRUE; - break; - case TOKEN_NONBMAND: - zmo->z_nbmand = B_FALSE; - zmo->z_do_nbmand = B_TRUE; - break; - case TOKEN_MNTPOINT: - zmo->z_mntpoint = match_strdup(&args[0]); - if (zmo->z_mntpoint == NULL) - return (-ENOMEM); - - break; - default: - break; - } - - return (0); -} - -/* - * Parse the mntopts string storing the results in provided zmo argument. - * If an error occurs the zmo argument will not be modified. The caller - * needs to set isremount when recycling an existing zfs_mntopts_t. - */ -static int -zpl_parse_options(char *osname, char *mntopts, zfs_mntopts_t *zmo, - boolean_t isremount) -{ - zfs_mntopts_t *tmp_zmo; - int error; - - tmp_zmo = zfs_mntopts_alloc(); - tmp_zmo->z_osname = strdup(osname); - - if (mntopts) { - substring_t args[MAX_OPT_ARGS]; - char *tmp_mntopts, *p, *t; - int token; - - t = tmp_mntopts = strdup(mntopts); - - while ((p = strsep(&t, ",")) != NULL) { - if (!*p) - continue; - - args[0].to = args[0].from = NULL; - token = match_token(p, zpl_tokens, args); - error = zpl_parse_option(p, token, args, tmp_zmo); - if (error) { - zfs_mntopts_free(tmp_zmo); - strfree(tmp_mntopts); - return (error); - } - } - - strfree(tmp_mntopts); - } - - if (isremount == B_TRUE) { - if (zmo->z_osname) - strfree(zmo->z_osname); - - if (zmo->z_mntpoint) - strfree(zmo->z_mntpoint); - } else { - ASSERT3P(zmo->z_osname, ==, NULL); - ASSERT3P(zmo->z_mntpoint, ==, NULL); - } - - memcpy(zmo, tmp_zmo, sizeof (zfs_mntopts_t)); - kmem_free(tmp_zmo, sizeof (zfs_mntopts_t)); - - return (0); -} - static int zpl_remount_fs(struct super_block *sb, int *flags, char *data) { - zfs_sb_t *zsb = sb->s_fs_info; + zfs_mnt_t zm = { .mnt_osname = NULL, .mnt_data = data }; fstrans_cookie_t cookie; int error; - error = zpl_parse_options(zsb->z_mntopts->z_osname, data, - zsb->z_mntopts, B_TRUE); - if (error) - return (error); - cookie = spl_fstrans_mark(); - error = -zfs_remount(sb, flags, zsb->z_mntopts); + error = -zfs_remount(sb, flags, &zm); spl_fstrans_unmark(cookie); ASSERT3S(error, <=, 0); @@ -396,12 +200,13 @@ zpl_remount_fs(struct super_block *sb, int *flags, char *data) } static int -__zpl_show_options(struct seq_file *seq, zfs_sb_t *zsb) +__zpl_show_options(struct seq_file *seq, zfsvfs_t *zfsvfs) { - seq_printf(seq, ",%s", zsb->z_flags & ZSB_XATTR ? "xattr" : "noxattr"); + seq_printf(seq, ",%s", + zfsvfs->z_flags & ZSB_XATTR ? "xattr" : "noxattr"); #ifdef CONFIG_FS_POSIX_ACL - switch (zsb->z_acl_type) { + switch (zfsvfs->z_acl_type) { case ZFS_ACLTYPE_POSIXACL: seq_puts(seq, ",posixacl"); break; @@ -431,12 +236,12 @@ zpl_show_options(struct seq_file *seq, struct vfsmount *vfsp) static int zpl_fill_super(struct super_block *sb, void *data, int silent) { - zfs_mntopts_t *zmo = (zfs_mntopts_t *)data; + zfs_mnt_t *zm = (zfs_mnt_t *)data; fstrans_cookie_t cookie; int error; cookie = spl_fstrans_mark(); - error = -zfs_domount(sb, zmo, silent); + error = -zfs_domount(sb, zm, silent); spl_fstrans_unmark(cookie); ASSERT3S(error, <=, 0); @@ -448,32 +253,18 @@ static struct dentry * zpl_mount(struct file_system_type *fs_type, int flags, const char *osname, void *data) { - zfs_mntopts_t *zmo = zfs_mntopts_alloc(); - int error; - - error = zpl_parse_options((char *)osname, (char *)data, zmo, B_FALSE); - if (error) { - zfs_mntopts_free(zmo); - return (ERR_PTR(error)); - } + zfs_mnt_t zm = { .mnt_osname = osname, .mnt_data = data }; - return (mount_nodev(fs_type, flags, zmo, zpl_fill_super)); + return (mount_nodev(fs_type, flags, &zm, zpl_fill_super)); } #else static int zpl_get_sb(struct file_system_type *fs_type, int flags, const char *osname, void *data, struct vfsmount *mnt) { - zfs_mntopts_t *zmo = zfs_mntopts_alloc(); - int error; - - error = zpl_parse_options((char *)osname, (char *)data, zmo, B_FALSE); - if (error) { - zfs_mntopts_free(zmo); - return (error); - } + zfs_mnt_t zm = { .mnt_osname = osname, .mnt_data = data }; - return (get_sb_nodev(fs_type, flags, zmo, zpl_fill_super, mnt)); + return (get_sb_nodev(fs_type, flags, &zm, zpl_fill_super, mnt)); } #endif /* HAVE_MOUNT_NODEV */ @@ -494,7 +285,7 @@ zpl_prune_sb(int64_t nr_to_scan, void *arg) struct super_block *sb = (struct super_block *)arg; int objects = 0; - (void) -zfs_sb_prune(sb, nr_to_scan, &objects); + (void) -zfs_prune(sb, nr_to_scan, &objects); } #ifdef HAVE_NR_CACHED_OBJECTS diff --git a/module/zfs/zpl_xattr.c b/module/zfs/zpl_xattr.c index 7186e477a..5edabedd3 100644 --- a/module/zfs/zpl_xattr.c +++ b/module/zfs/zpl_xattr.c @@ -237,7 +237,7 @@ ssize_t zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { znode_t *zp = ITOZ(dentry->d_inode); - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); xattr_filldir_t xf = { buffer_size, 0, buffer, dentry }; cred_t *cr = CRED(); fstrans_cookie_t cookie; @@ -245,10 +245,10 @@ zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) crhold(cr); cookie = spl_fstrans_mark(); - rrm_enter_read(&(zsb)->z_teardown_lock, FTAG); + rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG); rw_enter(&zp->z_xattr_lock, RW_READER); - if (zsb->z_use_sa && zp->z_is_sa) { + if (zfsvfs->z_use_sa && zp->z_is_sa) { error = zpl_xattr_list_sa(&xf); if (error) goto out; @@ -262,7 +262,7 @@ zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) out: rw_exit(&zp->z_xattr_lock); - rrm_exit(&(zsb)->z_teardown_lock, FTAG); + rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG); spl_fstrans_unmark(cookie); crfree(cr); @@ -349,12 +349,12 @@ __zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); int error; ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock)); - if (zsb->z_use_sa && zp->z_is_sa) { + if (zfsvfs->z_use_sa && zp->z_is_sa) { error = zpl_xattr_get_sa(ip, name, value, size); if (error != -ENOENT) goto out; @@ -376,14 +376,14 @@ static int __zpl_xattr_where(struct inode *ip, const char *name, int *where, cred_t *cr) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); int error; ASSERT(where); ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock)); *where = XATTR_NOENT; - if (zsb->z_use_sa && zp->z_is_sa) { + if (zfsvfs->z_use_sa && zp->z_is_sa) { error = zpl_xattr_get_sa(ip, name, NULL, 0); if (error >= 0) *where |= XATTR_IN_SA; @@ -411,18 +411,18 @@ static int zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); cred_t *cr = CRED(); fstrans_cookie_t cookie; int error; crhold(cr); cookie = spl_fstrans_mark(); - rrm_enter_read(&(zsb)->z_teardown_lock, FTAG); + rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG); rw_enter(&zp->z_xattr_lock, RW_READER); error = __zpl_xattr_get(ip, name, value, size, cr); rw_exit(&zp->z_xattr_lock); - rrm_exit(&(zsb)->z_teardown_lock, FTAG); + rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG); spl_fstrans_unmark(cookie); crfree(cr); @@ -576,7 +576,7 @@ zpl_xattr_set(struct inode *ip, const char *name, const void *value, size_t size, int flags) { znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ZTOZSB(zp); + zfsvfs_t *zfsvfs = ZTOZSB(zp); cred_t *cr = CRED(); fstrans_cookie_t cookie; int where; @@ -584,7 +584,7 @@ zpl_xattr_set(struct inode *ip, const char *name, const void *value, crhold(cr); cookie = spl_fstrans_mark(); - rrm_enter_read(&(zsb)->z_teardown_lock, FTAG); + rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG); rw_enter(&ITOZ(ip)->z_xattr_lock, RW_WRITER); /* @@ -615,8 +615,8 @@ zpl_xattr_set(struct inode *ip, const char *name, const void *value, } /* Preferentially store the xattr as a SA for better performance */ - if (zsb->z_use_sa && zp->z_is_sa && - (zsb->z_xattr_sa || (value == NULL && where & XATTR_IN_SA))) { + if (zfsvfs->z_use_sa && zp->z_is_sa && + (zfsvfs->z_xattr_sa || (value == NULL && where & XATTR_IN_SA))) { error = zpl_xattr_set_sa(ip, name, value, size, flags, cr); if (error == 0) { /* @@ -637,7 +637,7 @@ zpl_xattr_set(struct inode *ip, const char *name, const void *value, zpl_xattr_set_sa(ip, name, NULL, 0, 0, cr); out: rw_exit(&ITOZ(ip)->z_xattr_lock); - rrm_exit(&(zsb)->z_teardown_lock, FTAG); + rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); |