summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/sys/dsl_pool.h4
-rw-r--r--include/sys/zfs_acl.h6
-rw-r--r--include/sys/zfs_dir.h8
-rw-r--r--include/sys/zfs_fuid.h18
-rw-r--r--include/sys/zfs_vfsops.h46
-rw-r--r--include/sys/zfs_vnops.h76
-rw-r--r--include/sys/zfs_znode.h76
-rw-r--r--module/zfs/dmu_objset.c2
-rw-r--r--module/zfs/dsl_dataset.c2
-rw-r--r--module/zfs/dsl_pool.c8
-rw-r--r--module/zfs/spa_config.c1
-rw-r--r--module/zfs/vdev_file.c1
-rw-r--r--module/zfs/zfs_acl.c281
-rw-r--r--module/zfs/zfs_dir.c285
-rw-r--r--module/zfs/zfs_fuid.c171
-rw-r--r--module/zfs/zfs_ioctl.c205
-rw-r--r--module/zfs/zfs_log.c15
-rw-r--r--module/zfs/zfs_rlock.c2
-rw-r--r--module/zfs/zfs_sa.c78
-rw-r--r--module/zfs/zfs_vfsops.c1094
-rw-r--r--module/zfs/zfs_vnops.c2335
-rw-r--r--module/zfs/zfs_znode.c726
22 files changed, 2096 insertions, 3344 deletions
diff --git a/include/sys/dsl_pool.h b/include/sys/dsl_pool.h
index 7d25bd7c0..99a131eec 100644
--- a/include/sys/dsl_pool.h
+++ b/include/sys/dsl_pool.h
@@ -75,7 +75,7 @@ typedef struct dsl_pool {
struct dsl_dir *dp_free_dir;
struct dsl_dataset *dp_origin_snap;
uint64_t dp_root_dir_obj;
- struct taskq *dp_vnrele_taskq;
+ struct taskq *dp_iput_taskq;
/* No lock needed - sync context only */
blkptr_t dp_meta_rootbp;
@@ -135,7 +135,7 @@ void dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx);
-taskq_t *dsl_pool_vnrele_taskq(dsl_pool_t *dp);
+taskq_t *dsl_pool_iput_taskq(dsl_pool_t *dp);
extern int dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj,
const char *tag, uint64_t *now, dmu_tx_t *tx);
diff --git a/include/sys/zfs_acl.h b/include/sys/zfs_acl.h
index c1a0aeebd..6f7cef2ad 100644
--- a/include/sys/zfs_acl.h
+++ b/include/sys/zfs_acl.h
@@ -200,13 +200,13 @@ typedef struct zfs_acl_ids {
#define ZFS_ACL_PASSTHROUGH_X 5
struct znode;
-struct zfsvfs;
+struct zfs_sb;
#ifdef _KERNEL
int zfs_acl_ids_create(struct znode *, int, vattr_t *,
cred_t *, vsecattr_t *, zfs_acl_ids_t *);
void zfs_acl_ids_free(zfs_acl_ids_t *);
-boolean_t zfs_acl_ids_overquota(struct zfsvfs *, zfs_acl_ids_t *);
+boolean_t zfs_acl_ids_overquota(struct zfs_sb *, zfs_acl_ids_t *);
int zfs_getacl(struct znode *, vsecattr_t *, boolean_t, cred_t *);
int zfs_setacl(struct znode *, vsecattr_t *, boolean_t, cred_t *);
void zfs_acl_rele(void *);
@@ -223,7 +223,7 @@ int zfs_zaccess_delete(struct znode *, struct znode *, cred_t *);
int zfs_zaccess_rename(struct znode *, struct znode *,
struct znode *, struct znode *, cred_t *cr);
void zfs_acl_free(zfs_acl_t *);
-int zfs_vsec_2_aclp(struct zfsvfs *, vtype_t, vsecattr_t *, cred_t *,
+int zfs_vsec_2_aclp(struct zfs_sb *, umode_t, vsecattr_t *, cred_t *,
struct zfs_fuid_info **, zfs_acl_t **);
int zfs_aclset_common(struct znode *, zfs_acl_t *, cred_t *, dmu_tx_t *);
uint64_t zfs_external_acl(struct znode *);
diff --git a/include/sys/zfs_dir.h b/include/sys/zfs_dir.h
index 349f8ef37..8610fbe08 100644
--- a/include/sys/zfs_dir.h
+++ b/include/sys/zfs_dir.h
@@ -54,7 +54,7 @@ extern void zfs_dirent_unlock(zfs_dirlock_t *);
extern int zfs_link_create(zfs_dirlock_t *, znode_t *, dmu_tx_t *, int);
extern int zfs_link_destroy(zfs_dirlock_t *, znode_t *, dmu_tx_t *, int,
boolean_t *);
-extern int zfs_dirlook(znode_t *, char *, vnode_t **, int, int *,
+extern int zfs_dirlook(znode_t *, char *, struct inode **, int, int *,
pathname_t *);
extern void zfs_mknode(znode_t *, vattr_t *, dmu_tx_t *, cred_t *,
uint_t, znode_t **, zfs_acl_ids_t *);
@@ -62,10 +62,10 @@ extern void zfs_rmnode(znode_t *);
extern void zfs_dl_name_switch(zfs_dirlock_t *dl, char *new, char **old);
extern boolean_t zfs_dirempty(znode_t *);
extern void zfs_unlinked_add(znode_t *, dmu_tx_t *);
-extern void zfs_unlinked_drain(zfsvfs_t *zfsvfs);
+extern void zfs_unlinked_drain(zfs_sb_t *);
extern int zfs_sticky_remove_access(znode_t *, znode_t *, cred_t *cr);
-extern int zfs_get_xattrdir(znode_t *, vnode_t **, cred_t *, int);
-extern int zfs_make_xattrdir(znode_t *, vattr_t *, vnode_t **, cred_t *);
+extern int zfs_get_xattrdir(znode_t *, struct inode **, cred_t *, int);
+extern int zfs_make_xattrdir(znode_t *, vattr_t *, struct inode **, cred_t *);
#ifdef __cplusplus
}
diff --git a/include/sys/zfs_fuid.h b/include/sys/zfs_fuid.h
index 91650a22d..deaebcc82 100644
--- a/include/sys/zfs_fuid.h
+++ b/include/sys/zfs_fuid.h
@@ -100,24 +100,24 @@ typedef struct zfs_fuid_info {
#ifdef _KERNEL
struct znode;
-extern uid_t zfs_fuid_map_id(zfsvfs_t *, uint64_t, cred_t *, zfs_fuid_type_t);
+extern uid_t zfs_fuid_map_id(zfs_sb_t *, uint64_t, cred_t *, zfs_fuid_type_t);
extern void zfs_fuid_node_add(zfs_fuid_info_t **, const char *, uint32_t,
uint64_t, uint64_t, zfs_fuid_type_t);
-extern void zfs_fuid_destroy(zfsvfs_t *);
-extern uint64_t zfs_fuid_create_cred(zfsvfs_t *, zfs_fuid_type_t,
+extern void zfs_fuid_destroy(zfs_sb_t *);
+extern uint64_t zfs_fuid_create_cred(zfs_sb_t *, zfs_fuid_type_t,
cred_t *, zfs_fuid_info_t **);
-extern uint64_t zfs_fuid_create(zfsvfs_t *, uint64_t, cred_t *, zfs_fuid_type_t,
+extern uint64_t zfs_fuid_create(zfs_sb_t *, uint64_t, cred_t *, zfs_fuid_type_t,
zfs_fuid_info_t **);
extern void zfs_fuid_map_ids(struct znode *zp, cred_t *cr,
uid_t *uid, uid_t *gid);
extern zfs_fuid_info_t *zfs_fuid_info_alloc(void);
extern void zfs_fuid_info_free(zfs_fuid_info_t *);
-extern boolean_t zfs_groupmember(zfsvfs_t *, uint64_t, cred_t *);
-void zfs_fuid_sync(zfsvfs_t *, dmu_tx_t *);
-extern int zfs_fuid_find_by_domain(zfsvfs_t *, const char *domain,
+extern boolean_t zfs_groupmember(zfs_sb_t *, uint64_t, cred_t *);
+void zfs_fuid_sync(zfs_sb_t *, dmu_tx_t *);
+extern int zfs_fuid_find_by_domain(zfs_sb_t *, const char *domain,
char **retdomain, boolean_t addok);
-extern const char *zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx);
-extern void zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx);
+extern const char *zfs_fuid_find_by_idx(zfs_sb_t *zsb, uint32_t idx);
+extern void zfs_fuid_txhold(zfs_sb_t *zsb, dmu_tx_t *tx);
#endif
char *zfs_fuid_idx_domain(avl_tree_t *, uint32_t);
diff --git a/include/sys/zfs_vfsops.h b/include/sys/zfs_vfsops.h
index 34a871587..a2f00acf0 100644
--- a/include/sys/zfs_vfsops.h
+++ b/include/sys/zfs_vfsops.h
@@ -38,13 +38,15 @@
extern "C" {
#endif
-typedef struct zfsvfs zfsvfs_t;
+struct zfs_sb;
struct znode;
-struct zfsvfs {
- vfs_t *z_vfs; /* generic fs struct */
- zfsvfs_t *z_parent; /* parent fs */
+typedef struct zfs_sb {
+ struct vfsmount *z_vfs; /* generic vfs struct */
+ struct super_block *z_sb; /* generic super_block */
+ struct zfs_sb *z_parent; /* parent fs */
objset_t *z_os; /* objset reference */
+ uint64_t z_flags; /* super_block flags */
uint64_t z_root; /* id of root znode */
uint64_t z_unlinkedobj; /* id of unlinked zapobj */
uint64_t z_max_blksz; /* maximum block size for files */
@@ -87,6 +89,8 @@ struct zfsvfs {
#define ZFS_SUPER_MAGIC 0x2fc12fc1
+#define ZSB_XATTR_USER 0x0001 /* Enable user xattrs */
+
/*
* Minimal snapshot helpers, the bulk of the Linux snapshot implementation
@@ -162,30 +166,30 @@ typedef struct zfid_long {
extern uint_t zfs_fsyncer_key;
-extern int zfs_suspend_fs(zfsvfs_t *zfsvfs);
-extern int zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname);
-extern int zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
+extern int zfs_suspend_fs(zfs_sb_t *zsb);
+extern int zfs_resume_fs(zfs_sb_t *zsb, const char *osname);
+extern int zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type,
const char *domain, uint64_t rid, uint64_t *valuep);
-extern int zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
+extern int zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type,
uint64_t *cookiep, void *vbuf, uint64_t *bufsizep);
-extern int zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
+extern int zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type,
const char *domain, uint64_t rid, uint64_t quota);
-extern boolean_t zfs_owner_overquota(zfsvfs_t *zfsvfs, struct znode *,
+extern boolean_t zfs_owner_overquota(zfs_sb_t *zsb, struct znode *,
boolean_t isgroup);
-extern boolean_t zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup,
+extern boolean_t zfs_fuid_overquota(zfs_sb_t *zsb, boolean_t isgroup,
uint64_t fuid);
-extern int zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers);
-extern int zfsvfs_create(const char *name, zfsvfs_t **zfvp);
-extern void zfsvfs_free(zfsvfs_t *zfsvfs);
+extern int zfs_set_version(zfs_sb_t *zsb, uint64_t newvers);
+extern int zfs_sb_create(const char *name, zfs_sb_t **zsbp);
+extern void zfs_sb_free(zfs_sb_t *zsb);
extern int zfs_check_global_label(const char *dsname, const char *hexsl);
-extern int zfs_register_callbacks(vfs_t *vfsp);
-extern void zfs_unregister_callbacks(zfsvfs_t *zfsvfs);
-extern int zfs_domount(vfs_t *vfsp, char *osname);
-extern int zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr);
-extern int zfs_root(vfs_t *vfsp, vnode_t **vpp);
-extern int zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp);
-extern int zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp);
+extern int zfs_register_callbacks(zfs_sb_t *zsb);
+extern void zfs_unregister_callbacks(zfs_sb_t *zsb);
+extern int zfs_domount(struct super_block *sb, void *data, int silent);
+extern int zfs_umount(struct super_block *sb);
+extern int zfs_root(zfs_sb_t *zsb, struct inode **ipp);
+extern int zfs_statvfs(struct dentry *dentry, struct kstatfs *statp);
+extern int zfs_vget(struct vfsmount *vfsp, struct inode **ipp, fid_t *fidp);
#ifdef __cplusplus
}
diff --git a/include/sys/zfs_vnops.h b/include/sys/zfs_vnops.h
index 64e2210de..2cacb9c6f 100644
--- a/include/sys/zfs_vnops.h
+++ b/include/sys/zfs_vnops.h
@@ -28,50 +28,48 @@
#include <sys/vnode.h>
#include <sys/uio.h>
#include <sys/cred.h>
+#include <sys/fcntl.h>
+#include <sys/pathname.h>
#ifdef __cplusplus
extern "C" {
#endif
-extern int zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr,
- caller_context_t *ct);
-extern int zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr,
- caller_context_t *ct);
-extern int zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp,
- struct pathname *pnp, int flags, vnode_t *rdir, cred_t *cr,
- caller_context_t *ct, int *direntflags, pathname_t *realpnp);
-extern int zfs_create(vnode_t *dvp, char *name, vattr_t *vap,
- int excl, int mode, vnode_t **vpp, cred_t *cr, int flag,
- caller_context_t *ct, vsecattr_t *vsecp);
-extern int zfs_remove(vnode_t *dvp, char *name, cred_t *cr,
- caller_context_t *ct, int flags);
-extern int zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap,
- vnode_t **vpp, cred_t *cr, caller_context_t *ct, int flags,
- vsecattr_t *vsecp);
-extern int zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
- caller_context_t *ct, int flags);
-extern int zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr,
- caller_context_t *ct);
-extern int zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
- caller_context_t *ct);
-extern int zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
- caller_context_t *ct);
-extern int zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
- cred_t *cr, caller_context_t *ct, int flags);
-extern int zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link,
- cred_t *cr, caller_context_t *ct, int flags);
-extern int zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr,
- caller_context_t *ct);
-extern int zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
- caller_context_t *ct, int flags);
-extern void zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct);
-extern int zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
- offset_t offset, cred_t *cr, caller_context_t *ct);
-extern int zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct);
-extern int zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag,
- cred_t *cr, caller_context_t *ct);
-extern int zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag,
- cred_t *cr, caller_context_t *ct);
+extern int zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr);
+extern int zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr);
+extern int zfs_access(struct inode *ip, int mode, int flag, cred_t *cr);
+extern int zfs_lookup(struct inode *dip, char *nm, struct inode **ipp,
+ int flags, cred_t *cr, int *direntflags, pathname_t *realpnp);
+extern int zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
+ int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp);
+extern int zfs_remove(struct inode *dip, char *name, cred_t *cr);
+extern int zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap,
+ struct inode **ipp, cred_t *cr, int flags, vsecattr_t *vsecp);
+extern int zfs_rmdir(struct inode *dip, char *name, struct inode *cwd,
+ cred_t *cr, int flags);
+extern int zfs_readdir(struct inode *ip, void *dirent, filldir_t filldir,
+ loff_t *pos, cred_t *cr);
+extern int zfs_fsync(struct inode *ip, int syncflag, cred_t *cr);
+extern int zfs_getattr(struct inode *ip, struct kstat *stat, int flag,
+ cred_t *cr);
+extern int zfs_setattr(struct inode *ip, struct iattr *attr, int flag,
+ cred_t *cr);
+extern int zfs_rename(struct inode *sdip, char *snm, struct inode *tdip,
+ char *tnm, cred_t *cr, int flags);
+extern int zfs_symlink(struct inode *dip, char *name, vattr_t *vap,
+ char *link, struct inode **ipp, cred_t *cr, int flags);
+extern int zfs_follow_link(struct dentry *dentry, struct nameidata *nd);
+extern int zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr);
+extern int zfs_link(struct inode *tdip, struct inode *sip,
+ char *name, cred_t *cr);
+extern void zfs_inactive(struct inode *ip);
+extern int zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
+ offset_t offset, cred_t *cr);
+extern int zfs_fid(struct inode *ip, fid_t *fidp);
+extern int zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag,
+ cred_t *cr);
+extern int zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag,
+ cred_t *cr);
#ifdef __cplusplus
}
diff --git a/include/sys/zfs_znode.h b/include/sys/zfs_znode.h
index 4dbecb4b5..2f25cc7fe 100644
--- a/include/sys/zfs_znode.h
+++ b/include/sys/zfs_znode.h
@@ -69,7 +69,7 @@ extern "C" {
pflags |= attr; \
else \
pflags &= ~attr; \
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zp->z_zfsvfs), \
+ VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zp->z_sb), \
&pflags, sizeof (pflags), tx)); \
}
@@ -181,8 +181,6 @@ typedef struct zfs_dirlock {
} zfs_dirlock_t;
typedef struct znode {
- struct zfsvfs *z_zfsvfs;
- vnode_t *z_vnode;
uint64_t z_id; /* object ID for this znode */
kmutex_t z_lock; /* znode modification lock */
krwlock_t z_parent_lock; /* parent lock for directories */
@@ -235,48 +233,52 @@ typedef struct znode {
/*
* Convert between znode pointers and inode pointers
*/
-#define ZTOI(ZP) (&((ZP)->z_inode))
-#define ITOZ(IP) (container_of((IP), znode_t, z_inode))
-
-/* XXX - REMOVE ME ONCE THE OTHER BUILD ISSUES ARE RESOLVED */
-#define ZTOV(ZP) ((ZP)->z_vnode)
-#define VTOZ(VP) ((znode_t *)(VP)->v_data)
+#define ZTOI(znode) (&((znode)->z_inode))
+#define ITOZ(inode) (container_of((inode), znode_t, z_inode))
+#define VTOZSB(vfs) ((zfs_sb_t *)((vfs)->mnt_sb->s_fs_info))
+#define ZTOZSB(znode) ((zfs_sb_t *)(ZTOI(znode)->i_sb->s_fs_info))
+#define ITOZSB(inode) ((zfs_sb_t *)((inode)->i_sb->s_fs_info))
+#define S_ISDEV(mode) (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode))
/*
* ZFS_ENTER() is called on entry to each ZFS inode and vfs operation.
* ZFS_EXIT() must be called before exitting the vop.
* ZFS_VERIFY_ZP() verifies the znode is valid.
*/
-#define ZFS_ENTER(zfsvfs) \
+#define ZFS_ENTER(zsb) \
{ \
- rrw_enter(&(zfsvfs)->z_teardown_lock, RW_READER, FTAG); \
- if ((zfsvfs)->z_unmounted) { \
- ZFS_EXIT(zfsvfs); \
+ rrw_enter(&(zsb)->z_teardown_lock, RW_READER, FTAG); \
+ if ((zsb)->z_unmounted) { \
+ ZFS_EXIT(zsb); \
return (EIO); \
} \
}
-#define ZFS_EXIT(zfsvfs) rrw_exit(&(zfsvfs)->z_teardown_lock, FTAG)
+#define ZFS_EXIT(zsb) \
+ { \
+ rrw_exit(&(zsb)->z_teardown_lock, FTAG); \
+ tsd_exit(); \
+ }
#define ZFS_VERIFY_ZP(zp) \
if ((zp)->z_sa_hdl == NULL) { \
- ZFS_EXIT((zp)->z_zfsvfs); \
+ ZFS_EXIT(ZTOZSB(zp)); \
return (EIO); \
- } \
+ }
/*
* Macros for dealing with dmu_buf_hold
*/
#define ZFS_OBJ_HASH(obj_num) ((obj_num) & (ZFS_OBJ_MTX_SZ - 1))
-#define ZFS_OBJ_MUTEX(zfsvfs, obj_num) \
- (&(zfsvfs)->z_hold_mtx[ZFS_OBJ_HASH(obj_num)])
-#define ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num) \
- mutex_enter(ZFS_OBJ_MUTEX((zfsvfs), (obj_num)))
-#define ZFS_OBJ_HOLD_TRYENTER(zfsvfs, obj_num) \
- mutex_tryenter(ZFS_OBJ_MUTEX((zfsvfs), (obj_num)))
-#define ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num) \
- mutex_exit(ZFS_OBJ_MUTEX((zfsvfs), (obj_num)))
+#define ZFS_OBJ_MUTEX(zsb, obj_num) \
+ (&(zsb)->z_hold_mtx[ZFS_OBJ_HASH(obj_num)])
+#define ZFS_OBJ_HOLD_ENTER(zsb, obj_num) \
+ mutex_enter(ZFS_OBJ_MUTEX((zsb), (obj_num)))
+#define ZFS_OBJ_HOLD_TRYENTER(zsb, obj_num) \
+ mutex_tryenter(ZFS_OBJ_MUTEX((zsb), (obj_num)))
+#define ZFS_OBJ_HOLD_EXIT(zsb, obj_num) \
+ mutex_exit(ZFS_OBJ_MUTEX((zsb), (obj_num)))
/*
* Macros to encode/decode ZFS stored time values from/to struct timespec
@@ -296,15 +298,15 @@ typedef struct znode {
/*
* Timestamp defines
*/
-#define ACCESSED (AT_ATIME)
-#define STATE_CHANGED (AT_CTIME)
-#define CONTENT_MODIFIED (AT_MTIME | AT_CTIME)
+#define ACCESSED (ATTR_ATIME)
+#define STATE_CHANGED (ATTR_CTIME)
+#define CONTENT_MODIFIED (ATTR_MTIME | ATTR_CTIME)
-#define ZFS_ACCESSTIME_STAMP(zfsvfs, zp) \
- if ((zfsvfs)->z_atime && !((zfsvfs)->z_vfs->vfs_flag & VFS_RDONLY)) \
+#define ZFS_ACCESSTIME_STAMP(zsb, zp) \
+ if ((zsb)->z_atime && !((zsb)->z_vfs->mnt_flags & MNT_READONLY)) \
zfs_tstamp_update_setup(zp, ACCESSED, NULL, NULL, B_FALSE);
-extern int zfs_init_fs(zfsvfs_t *, znode_t **);
+extern int zfs_init_fs(zfs_sb_t *, znode_t **);
extern void zfs_set_dataprop(objset_t *);
extern void zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *,
dmu_tx_t *tx);
@@ -314,18 +316,19 @@ extern void zfs_grow_blocksize(znode_t *, uint64_t, dmu_tx_t *);
extern int zfs_freesp(znode_t *, uint64_t, uint64_t, int, boolean_t);
extern void zfs_znode_init(void);
extern void zfs_znode_fini(void);
-extern int zfs_zget(zfsvfs_t *, uint64_t, znode_t **);
+extern int zfs_zget(zfs_sb_t *, uint64_t, znode_t **);
extern int zfs_rezget(znode_t *);
extern void zfs_zinactive(znode_t *);
extern void zfs_znode_delete(znode_t *, dmu_tx_t *);
-extern void zfs_znode_free(znode_t *);
extern void zfs_remove_op_tables(void);
extern int zfs_create_op_tables(void);
-extern int zfs_sync(vfs_t *vfsp, short flag, cred_t *cr);
+extern int zfs_sync(zfs_sb_t *, short, cred_t *);
extern dev_t zfs_cmpldev(uint64_t);
extern int zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value);
extern int zfs_get_stats(objset_t *os, nvlist_t *nv);
extern void zfs_znode_dmu_fini(znode_t *);
+extern int zfs_inode_alloc(struct super_block *, struct inode **ip);
+extern void zfs_inode_destroy(struct inode *);
extern void zfs_inode_update(znode_t *);
extern void zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
@@ -347,12 +350,13 @@ extern void zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
extern void zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, uint64_t off, uint64_t len);
extern void zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
- znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp);
+ znode_t *zp, struct iattr *attr, uint_t mask_applied,
+ zfs_fuid_info_t *fuidp);
extern void zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
vsecattr_t *vsecp, zfs_fuid_info_t *fuidp);
extern void zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx);
-extern void zfs_upgrade(zfsvfs_t *zfsvfs, dmu_tx_t *tx);
-extern int zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx);
+extern void zfs_upgrade(zfs_sb_t *zsb, dmu_tx_t *tx);
+extern int zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx);
#if defined(HAVE_UIO_RW)
extern caddr_t zfs_map_page(page_t *, enum seg_rw);
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index 7da5c1aa0..cbd177819 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -910,10 +910,8 @@ dmu_objset_snapshot_one(const char *name, void *arg)
* permission checks for the starting dataset have already been
* performed in zfs_secpolicy_snapshot()
*/
-#ifdef HAVE_ZPL
if (sn->recursive && (err = zfs_secpolicy_snapshot_perms(name, CRED())))
return (err);
-#endif
err = dmu_objset_hold(name, sn, &os);
if (err != 0)
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index dfccede04..c34ac2a76 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -2364,13 +2364,11 @@ dsl_snapshot_rename_one(const char *name, void *arg)
* For recursive snapshot renames the parent won't be changing
* so we just pass name for both the to/from argument.
*/
-#ifdef HAVE_ZPL
err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
if (err != 0) {
strfree(snapname);
return (err == ENOENT ? 0 : err);
}
-#endif
/* XXX: Ignore for SPL version until mounting the FS is supported */
#if defined(_KERNEL) && !defined(HAVE_SPL)
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 6a0e3632c..7185540f1 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -92,7 +92,7 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg)
mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
- dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri,
+ dp->dp_iput_taskq = taskq_create("zfs_iput_taskq", 1, minclsyspri,
1, 4, 0);
return (dp);
@@ -214,7 +214,7 @@ dsl_pool_close(dsl_pool_t *dp)
dsl_scan_fini(dp);
rw_destroy(&dp->dp_config_rwlock);
mutex_destroy(&dp->dp_lock);
- taskq_destroy(dp->dp_vnrele_taskq);
+ taskq_destroy(dp->dp_iput_taskq);
if (dp->dp_blkstats)
kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
kmem_free(dp, sizeof (dsl_pool_t));
@@ -738,9 +738,9 @@ dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
}
taskq_t *
-dsl_pool_vnrele_taskq(dsl_pool_t *dp)
+dsl_pool_iput_taskq(dsl_pool_t *dp)
{
- return (dp->dp_vnrele_taskq);
+ return (dp->dp_iput_taskq);
}
/*
diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c
index 79664cbe1..d84d6b0f9 100644
--- a/module/zfs/spa_config.c
+++ b/module/zfs/spa_config.c
@@ -179,7 +179,6 @@ spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl)
(void) vn_rename(temp, dp->scd_path, UIO_SYSSPACE);
}
(void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL);
- VN_RELE(vp);
}
(void) vn_remove(temp, UIO_SYSSPACE, RMFILE);
diff --git a/module/zfs/vdev_file.c b/module/zfs/vdev_file.c
index f31389a6d..bbc85e733 100644
--- a/module/zfs/vdev_file.c
+++ b/module/zfs/vdev_file.c
@@ -130,7 +130,6 @@ vdev_file_close(vdev_t *vd)
(void) VOP_PUTPAGE(vf->vf_vnode, 0, 0, B_INVAL, kcred, NULL);
(void) VOP_CLOSE(vf->vf_vnode, spa_mode(vd->vdev_spa), 1, 0,
kcred, NULL);
- VN_RELE(vf->vf_vnode);
}
vd->vdev_delayed_close = B_FALSE;
diff --git a/module/zfs/zfs_acl.c b/module/zfs/zfs_acl.c
index 45ec20647..0ae749e6b 100644
--- a/module/zfs/zfs_acl.c
+++ b/module/zfs/zfs_acl.c
@@ -345,7 +345,7 @@ zfs_external_acl(znode_t *zp)
* changed.
*/
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_extern_obj);
else {
@@ -368,23 +368,23 @@ static int
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
- if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
+ if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zsb),
&size)) != 0)
return (error);
*aclsize = size;
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zsb),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zsb),
aclphys, sizeof (*aclphys))) != 0)
return (error);
@@ -418,7 +418,7 @@ zfs_znode_acl_version(znode_t *zp)
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl,
- SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
+ SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_version);
else {
@@ -444,7 +444,7 @@ zfs_acl_version(int version)
static int
zfs_acl_version_zp(znode_t *zp)
{
- return (zfs_acl_version(zp->z_zfsvfs->z_version));
+ return (zfs_acl_version(ZTOZSB(zp)->z_version));
}
zfs_acl_t *
@@ -531,7 +531,7 @@ zfs_acl_valid_ace_type(uint_t type, uint_t flags)
}
static boolean_t
-zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
+zfs_ace_valid(umode_t obj_mode, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
{
/*
* first check type of entry
@@ -554,7 +554,7 @@ zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
* next check inheritance level flags
*/
- if (obj_type == VDIR &&
+ if (S_ISDIR(obj_mode) &&
(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
@@ -648,7 +648,7 @@ zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt,
* ACE FUIDs will be created later.
*/
int
-zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
+zfs_copy_ace_2_fuid(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
@@ -666,7 +666,7 @@ zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
- aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
+ aceptr->z_fuid = zfs_fuid_create(zsb, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
@@ -674,7 +674,7 @@ zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
/*
* Make sure ACE is valid
*/
- if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type,
+ if (zfs_ace_valid(obj_mode, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (EINVAL);
@@ -710,7 +710,7 @@ zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
-zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
+zfs_copy_fuid_2_ace(zfs_sb_t *zsb, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
@@ -753,7 +753,7 @@ zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
- acep->a_who = zfs_fuid_map_id(zfsvfs, who,
+ acep->a_who = zfs_fuid_map_id(zsb, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
@@ -767,7 +767,7 @@ zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
}
static int
-zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep,
+zfs_copy_ace_2_oldace(umode_t obj_mode, zfs_acl_t *aclp, ace_t *acep,
zfs_oldace_t *z_acl, int aclcnt, size_t *size)
{
int i;
@@ -781,7 +781,7 @@ zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep,
/*
* Make sure ACE is valid
*/
- if (zfs_ace_valid(obj_type, aclp, aceptr->z_type,
+ if (zfs_ace_valid(obj_mode, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (EINVAL);
}
@@ -825,8 +825,8 @@ zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t));
aclp->z_ops = zfs_acl_fuid_ops;
- VERIFY(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp,
- oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
+ VERIFY(zfs_copy_ace_2_fuid(ZTOZSB(zp), ZTOI(zp)->i_mode,
+ aclp, oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr) == 0);
newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION;
@@ -1100,7 +1100,7 @@ zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
if (!zp->z_is_sa) {
if (znode_acl.z_acl_extern_obj) {
- error = dmu_read(zp->z_zfsvfs->z_os,
+ error = dmu_read(ZTOZSB(zp)->z_os,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
@@ -1108,7 +1108,7 @@ zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
aclnode->z_size);
}
} else {
- error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zp->z_zfsvfs),
+ error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(ZTOZSB(zp)),
aclnode->z_acldata, aclnode->z_size);
}
@@ -1295,7 +1295,7 @@ int
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
@@ -1309,11 +1309,11 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
zp->z_uid, zp->z_gid);
zp->z_mode = mode;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
&mode, sizeof (mode));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
@@ -1324,11 +1324,11 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
/*
* Upgrade needed?
*/
- if (!zfsvfs->z_use_fuids) {
+ if (!zsb->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
- (zfsvfs->z_version >= ZPL_VERSION_FUID))
+ (zsb->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
@@ -1341,9 +1341,9 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zsb),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zsb),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
@@ -1351,7 +1351,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
zfs_acl_phys_t acl_phys;
uint64_t aoid;
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zsb),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
@@ -1365,20 +1365,20 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
- error = dmu_object_free(zfsvfs->z_os, aoid, tx);
+ error = dmu_object_free(zsb->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
- aoid = dmu_object_alloc(zfsvfs->z_os,
+ aoid = dmu_object_alloc(zsb->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_MAX_BONUSLEN : 0, tx);
} else {
- (void) dmu_object_set_blocksize(zfsvfs->z_os,
+ (void) dmu_object_set_blocksize(zsb->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
@@ -1386,7 +1386,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
- dmu_write(zfsvfs->z_os, aoid, off,
+ dmu_write(zsb->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
@@ -1396,7 +1396,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
- error = dmu_object_free(zfsvfs->z_os,
+ error = dmu_object_free(zsb->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
@@ -1425,7 +1425,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
}
acl_phys.z_acl_version = aclp->z_version;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zsb), NULL,
&acl_phys, sizeof (acl_phys));
}
@@ -1444,19 +1444,19 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
}
static void
-zfs_acl_chmod(zfsvfs_t *zfsvfs, uint64_t mode, zfs_acl_t *aclp)
+zfs_acl_chmod(zfs_sb_t *zsb, uint64_t mode, zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
int new_count, new_bytes;
int ace_size;
- int entry_type;
+ int entry_type;
uint16_t iflags, type;
uint32_t access_mask;
zfs_acl_node_t *newnode;
- size_t abstract_size = aclp->z_ops.ace_abstract_size();
- void *zacep;
- uint32_t owner, group, everyone;
+ size_t abstract_size = aclp->z_ops.ace_abstract_size();
+ void *zacep;
+ uint32_t owner, group, everyone;
uint32_t deny1, deny2, allow0;
new_count = new_bytes = 0;
@@ -1516,7 +1516,7 @@ zfs_acl_chmod(zfsvfs_t *zfsvfs, uint64_t mode, zfs_acl_t *aclp)
* Limit permissions to be no greater than
* group permissions
*/
- if (zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) {
+ if (zsb->z_acl_inherit == ZFS_ACL_RESTRICTED) {
if (!(mode & S_IRGRP))
access_mask &= ~ACE_READ_DATA;
if (!(mode & S_IWGRP))
@@ -1558,7 +1558,7 @@ zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
mutex_enter(&zp->z_lock);
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
- zfs_acl_chmod(zp->z_zfsvfs, mode, *aclp);
+ zfs_acl_chmod(ZTOZSB(zp), mode, *aclp);
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
ASSERT(*aclp);
@@ -1568,11 +1568,11 @@ zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
* strip off write_owner and write_acl
*/
static void
-zfs_restricted_update(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, void *acep)
+zfs_restricted_update(zfs_sb_t *zsb, zfs_acl_t *aclp, void *acep)
{
uint32_t mask = aclp->z_ops.ace_mask_get(acep);
- if ((zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) &&
+ if ((zsb->z_acl_inherit == ZFS_ACL_RESTRICTED) &&
(aclp->z_ops.ace_type_get(acep) == ALLOW)) {
mask &= ~RESTRICTED_CLEAR;
aclp->z_ops.ace_mask_set(acep, mask);
@@ -1583,14 +1583,14 @@ zfs_restricted_update(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, void *acep)
* Should ACE be inherited?
*/
static int
-zfs_ace_can_use(vtype_t vtype, uint16_t acep_flags)
+zfs_ace_can_use(umode_t obj_mode, uint16_t acep_flags)
{
int iflags = (acep_flags & 0xf);
- if ((vtype == VDIR) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
+ if (S_ISDIR(obj_mode) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
return (1);
else if (iflags & ACE_FILE_INHERIT_ACE)
- return (!((vtype == VDIR) &&
+ return (!(S_ISDIR(obj_mode) &&
(iflags & ACE_NO_PROPAGATE_INHERIT_ACE)));
return (0);
}
@@ -1599,7 +1599,7 @@ zfs_ace_can_use(vtype_t vtype, uint16_t acep_flags)
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
-zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
+zfs_acl_inherit(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep;
@@ -1612,21 +1612,21 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
size_t ace_size;
void *data1, *data2;
size_t data1sz, data2sz;
- boolean_t vdir = vtype == VDIR;
- boolean_t vreg = vtype == VREG;
+ boolean_t vdir = S_ISDIR(obj_mode);
+ boolean_t vreg = S_ISREG(obj_mode);
boolean_t passthrough, passthrough_x, noallow;
passthrough_x =
- zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X;
+ zsb->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X;
passthrough = passthrough_x ||
- zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH;
+ zsb->z_acl_inherit == ZFS_ACL_PASSTHROUGH;
noallow =
- zfsvfs->z_acl_inherit == ZFS_ACL_NOALLOW;
+ zsb->z_acl_inherit == ZFS_ACL_NOALLOW;
*need_chmod = B_TRUE;
pacep = NULL;
aclp = zfs_acl_alloc(paclp->z_version);
- if (zfsvfs->z_acl_inherit == ZFS_ACL_DISCARD || vtype == VLNK)
+ if (zsb->z_acl_inherit == ZFS_ACL_DISCARD || S_ISLNK(obj_mode))
return (aclp);
while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
&access_mask, &iflags, &type))) {
@@ -1642,7 +1642,7 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
ace_size = aclp->z_ops.ace_size(pacep);
- if (!zfs_ace_can_use(vtype, iflags))
+ if (!zfs_ace_can_use(obj_mode, iflags))
continue;
/*
@@ -1690,7 +1690,7 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
newflags &= ~ALL_INHERIT;
aclp->z_ops.ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
- zfs_restricted_update(zfsvfs, aclp, acep);
+ zfs_restricted_update(zsb, aclp, acep);
continue;
}
@@ -1723,7 +1723,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids)
{
int error;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
zfs_acl_t *paclp;
#ifdef HAVE_KSID
gid_t gid;
@@ -1732,11 +1732,11 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
boolean_t inherited = B_FALSE;
bzero(acl_ids, sizeof (zfs_acl_ids_t));
- acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
+ acl_ids->z_mode = vap->va_mode;
if (vsecp)
- if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, cr,
- &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
+ if ((error = zfs_vsec_2_aclp(zsb, vap->va_mode, vsecp,
+ cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
acl_ids->z_fuid = vap->va_uid;
@@ -1745,21 +1745,19 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
/*
* Determine uid and gid.
*/
- if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
- ((flag & IS_XATTR) && (vap->va_type == VDIR))) {
- acl_ids->z_fuid = zfs_fuid_create(zfsvfs,
- (uint64_t)vap->va_uid, cr,
- ZFS_OWNER, &acl_ids->z_fuidp);
- acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
- (uint64_t)vap->va_gid, cr,
- ZFS_GROUP, &acl_ids->z_fuidp);
+ if ((flag & IS_ROOT_NODE) || zsb->z_replay ||
+ ((flag & IS_XATTR) && (S_ISDIR(vap->va_mode)))) {
+ acl_ids->z_fuid = zfs_fuid_create(zsb, (uint64_t)vap->va_uid,
+ cr, ZFS_OWNER, &acl_ids->z_fuidp);
+ acl_ids->z_fgid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid,
+ cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
- acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
+ acl_ids->z_fuid = zfs_fuid_create_cred(zsb, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
- acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
+ acl_ids->z_fgid = zfs_fuid_create(zsb,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
@@ -1774,13 +1772,13 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
uint32_t rid;
acl_ids->z_fgid = dzp->z_gid;
- gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
+ gid = zfs_fuid_map_id(zsb, acl_ids->z_fgid,
cr, ZFS_GROUP);
- if (zfsvfs->z_use_fuids &&
+ if (zsb->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain = zfs_fuid_idx_domain(
- &zfsvfs->z_fuid_idx,
+ &zsb->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
@@ -1789,7 +1787,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
acl_ids->z_fgid, ZFS_GROUP);
}
} else {
- acl_ids->z_fgid = zfs_fuid_create_cred(zfsvfs,
+ acl_ids->z_fgid = zfs_fuid_create_cred(zsb,
ZFS_GROUP, cr, &acl_ids->z_fuidp);
gid = crgetgid(cr);
}
@@ -1805,7 +1803,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
*/
if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) &&
- (vap->va_type == VDIR)) {
+ (S_ISDIR(vap->va_mode))) {
acl_ids->z_mode |= S_ISGID;
} else {
if ((acl_ids->z_mode & S_ISGID) &&
@@ -1816,13 +1814,13 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
if (acl_ids->z_aclp == NULL) {
mutex_enter(&dzp->z_acl_lock);
mutex_enter(&dzp->z_lock);
- if (!(flag & IS_ROOT_NODE) && (ZTOV(dzp)->v_type == VDIR &&
+ if (!(flag & IS_ROOT_NODE) && (S_ISDIR(ZTOI(dzp)->i_mode) &&
(dzp->z_pflags & ZFS_INHERIT_ACE)) &&
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
- acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
- vap->va_type, paclp, acl_ids->z_mode, &need_chmod);
+ acl_ids->z_aclp = zfs_acl_inherit(zsb,
+ vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
@@ -1832,9 +1830,9 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
mutex_exit(&dzp->z_lock);
mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
- acl_ids->z_aclp->z_hints |= (vap->va_type == VDIR) ?
+ acl_ids->z_aclp->z_hints |= S_ISDIR(vap->va_mode) ?
ZFS_ACL_AUTO_INHERIT : 0;
- zfs_acl_chmod(zfsvfs, acl_ids->z_mode, acl_ids->z_aclp);
+ zfs_acl_chmod(zsb, acl_ids->z_mode, acl_ids->z_aclp);
}
}
@@ -1864,10 +1862,10 @@ zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
}
boolean_t
-zfs_acl_ids_overquota(zfsvfs_t *zfsvfs, zfs_acl_ids_t *acl_ids)
+zfs_acl_ids_overquota(zfs_sb_t *zsb, zfs_acl_ids_t *acl_ids)
{
- return (zfs_fuid_overquota(zfsvfs, B_FALSE, acl_ids->z_fuid) ||
- zfs_fuid_overquota(zfsvfs, B_TRUE, acl_ids->z_fgid));
+ return (zfs_fuid_overquota(zsb, B_FALSE, acl_ids->z_fuid) ||
+ zfs_fuid_overquota(zsb, B_TRUE, acl_ids->z_fgid));
}
/*
@@ -1939,7 +1937,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
vsecp->vsa_aclentsz = aclsz;
if (aclp->z_version == ZFS_ACL_VERSION_FUID)
- zfs_copy_fuid_2_ace(zp->z_zfsvfs, aclp, cr,
+ zfs_copy_fuid_2_ace(ZTOZSB(zp), aclp, cr,
vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES));
else {
zfs_acl_node_t *aclnode;
@@ -1971,7 +1969,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
}
int
-zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, vtype_t obj_type,
+zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
@@ -1982,12 +1980,12 @@ zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, vtype_t obj_type,
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (EINVAL);
- aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
+ aclp = zfs_acl_alloc(zfs_acl_version(zsb->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
- if ((error = zfs_copy_ace_2_oldace(obj_type, aclp,
+ if ((error = zfs_copy_ace_2_oldace(obj_mode, aclp,
(ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata,
aclcnt, &aclnode->z_size)) != 0) {
zfs_acl_free(aclp);
@@ -1995,7 +1993,7 @@ zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, vtype_t obj_type,
return (error);
}
} else {
- if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_type, aclp,
+ if ((error = zfs_copy_ace_2_fuid(zsb, obj_mode, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
@@ -2031,8 +2029,8 @@ zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, vtype_t obj_type,
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- zilog_t *zilog = zfsvfs->z_log;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ zilog_t *zilog = zsb->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
@@ -2050,7 +2048,7 @@ zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
- error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, cr, &fuidp,
+ error = zfs_vsec_2_aclp(zsb, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
@@ -2067,13 +2065,13 @@ top:
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
- fuid_dirtied = zfsvfs->z_fuid_dirty;
+ fuid_dirtied = zsb->z_fuid_dirty;
if (fuid_dirtied)
- zfs_fuid_txhold(zfsvfs, tx);
+ zfs_fuid_txhold(zsb, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
@@ -2081,7 +2079,7 @@ top:
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
- if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
+ if (zsb->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
@@ -2116,7 +2114,7 @@ top:
zp->z_acl_cached = aclp;
if (fuid_dirtied)
- zfs_fuid_sync(zfsvfs, tx);
+ zfs_fuid_sync(zsb, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
@@ -2139,9 +2137,9 @@ static int
zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
{
if ((v4_mode & WRITE_MASK) &&
- (zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
- (!IS_DEVVP(ZTOV(zp)) ||
- (IS_DEVVP(ZTOV(zp)) && (v4_mode & WRITE_MASK_ATTRS)))) {
+ (ZTOZSB(zp)->z_vfs->mnt_flags & MNT_READONLY) &&
+ (!S_ISDEV(ZTOI(zp)->i_mode) ||
+ (S_ISDEV(ZTOI(zp)->i_mode) && (v4_mode & WRITE_MASK_ATTRS)))) {
return (EROFS);
}
@@ -2149,9 +2147,9 @@ zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
* Only check for READONLY on non-directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
- (((ZTOV(zp)->v_type != VDIR) &&
+ ((!S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & (ZFS_READONLY | ZFS_IMMUTABLE))) ||
- (ZTOV(zp)->v_type == VDIR &&
+ (S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & ZFS_IMMUTABLE)))) {
return (EPERM);
}
@@ -2198,11 +2196,11 @@ static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
- uint64_t who;
+ uint64_t who;
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
@@ -2231,7 +2229,8 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
- if (ZTOV(zp)->v_type == VDIR && (iflags & ACE_INHERIT_ONLY_ACE))
+ if (S_ISDIR(ZTOI(zp)->i_mode) &&
+ (iflags & ACE_INHERIT_ONLY_ACE))
continue;
/* Skip ACE if it does not affect any AoI */
@@ -2252,7 +2251,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
who = gowner;
/*FALLTHROUGH*/
case ACE_IDENTIFIER_GROUP:
- checkit = zfs_groupmember(zfsvfs, who, cr);
+ checkit = zfs_groupmember(zsb, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
@@ -2263,7 +2262,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
if (entry_type == 0) {
uid_t newid;
- newid = zfs_fuid_map_id(zfsvfs, who, cr,
+ newid = zfs_fuid_map_id(zsb, who, cr,
ZFS_ACE_USER);
if (newid != IDMAP_WK_CREATOR_OWNER_UID &&
uid == newid)
@@ -2325,8 +2324,8 @@ zfs_has_access(znode_t *zp, cred_t *cr)
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) {
uid_t owner;
- owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
- return (secpolicy_vnode_any_access(cr, ZTOV(zp), owner) == 0);
+ owner = zfs_fuid_map_id(ZTOZSB(zp), zp->z_uid, cr, ZFS_OWNER);
+ return (secpolicy_vnode_any_access(cr, ZTOI(zp), owner) == 0);
}
return (B_TRUE);
}
@@ -2335,7 +2334,7 @@ static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
int err;
*working_mode = v4_mode;
@@ -2344,7 +2343,7 @@ zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
/*
* Short circuit empty requests
*/
- if (v4_mode == 0 || zfsvfs->z_replay) {
+ if (v4_mode == 0 || zsb->z_replay) {
*working_mode = 0;
return (0);
}
@@ -2391,7 +2390,7 @@ zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
return (EACCES);
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
- (ZTOV(zdp)->v_type == VDIR));
+ (S_ISDIR(ZTOI(zdp)->i_mode)));
if (is_attr)
goto slow;
@@ -2439,9 +2438,9 @@ zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
slow:
DTRACE_PROBE(zfs__fastpath__execute__access__miss);
- ZFS_ENTER(zdp->z_zfsvfs);
+ ZFS_ENTER(ZTOZSB(zdp));
error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr);
- ZFS_EXIT(zdp->z_zfsvfs);
+ ZFS_EXIT(ZTOZSB(zdp));
return (error);
}
@@ -2456,13 +2455,13 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
uint32_t working_mode;
int error;
int is_attr;
- boolean_t check_privs;
+ boolean_t check_privs;
znode_t *xzp;
- znode_t *check_zp = zp;
+ znode_t *check_zp = zp;
mode_t needed_bits;
uid_t owner;
- is_attr = ((zp->z_pflags & ZFS_XATTR) && (ZTOV(zp)->v_type == VDIR));
+ is_attr = ((zp->z_pflags & ZFS_XATTR) && S_ISDIR(ZTOI(zp)->i_mode));
/*
* If attribute then validate against base file
@@ -2471,11 +2470,11 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
uint64_t parent;
if ((error = sa_lookup(zp->z_sa_hdl,
- SA_ZPL_PARENT(zp->z_zfsvfs), &parent,
+ SA_ZPL_PARENT(ZTOZSB(zp)), &parent,
sizeof (parent))) != 0)
return (error);
- if ((error = zfs_zget(zp->z_zfsvfs,
+ if ((error = zfs_zget(ZTOZSB(zp),
parent, &xzp)) != 0) {
return (error);
}
@@ -2497,11 +2496,11 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
}
}
- owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
+ owner = zfs_fuid_map_id(ZTOZSB(zp), zp->z_uid, cr, ZFS_OWNER);
/*
- * Map the bits required to the standard vnode flags VREAD|VWRITE|VEXEC
- * in needed_bits. Map the bits mapped by working_mode (currently
- * missing) in missing_bits.
+ * Map the bits required to the standard inode flags
+ * S_IRUSR|S_IWUSR|S_IXUSR in the needed_bits. Map the bits
+ * mapped by working_mode (currently missing) in missing_bits.
* Call secpolicy_vnode_access2() with (needed_bits & ~checkmode),
* needed_bits.
*/
@@ -2514,24 +2513,24 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
- needed_bits |= VREAD;
+ needed_bits |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
- needed_bits |= VWRITE;
+ needed_bits |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
- needed_bits |= VEXEC;
+ needed_bits |= S_IXUSR;
if ((error = zfs_zaccess_common(check_zp, mode, &working_mode,
&check_privs, skipaclchk, cr)) == 0) {
if (is_attr)
- VN_RELE(ZTOV(xzp));
- return (secpolicy_vnode_access2(cr, ZTOV(zp), owner,
+ iput(ZTOI(xzp));
+ return (secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits));
}
if (error && !check_privs) {
if (is_attr)
- VN_RELE(ZTOV(xzp));
+ iput(ZTOI(xzp));
return (error);
}
@@ -2556,14 +2555,14 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
- checkmode |= VREAD;
+ checkmode |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
- checkmode |= VWRITE;
+ checkmode |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
- checkmode |= VEXEC;
+ checkmode |= S_IXUSR;
- error = secpolicy_vnode_access2(cr, ZTOV(check_zp), owner,
+ error = secpolicy_vnode_access2(cr, ZTOI(check_zp), owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
@@ -2588,19 +2587,19 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
}
}
} else if (error == 0) {
- error = secpolicy_vnode_access2(cr, ZTOV(zp), owner,
+ error = secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits);
}
if (is_attr)
- VN_RELE(ZTOV(xzp));
+ iput(ZTOI(xzp));
return (error);
}
/*
- * Translate traditional unix VREAD/VWRITE/VEXEC mode into
+ * Translate traditional unix S_IRUSR/S_IWUSR/S_IXUSR mode into
* native ACL format and call zfs_zaccess()
*/
int
@@ -2627,10 +2626,10 @@ zfs_delete_final_check(znode_t *zp, znode_t *dzp,
int error;
uid_t downer;
- downer = zfs_fuid_map_id(dzp->z_zfsvfs, dzp->z_uid, cr, ZFS_OWNER);
+ downer = zfs_fuid_map_id(ZTOZSB(dzp), dzp->z_uid, cr, ZFS_OWNER);
- error = secpolicy_vnode_access2(cr, ZTOV(dzp),
- downer, available_perms, VWRITE|VEXEC);
+ error = secpolicy_vnode_access2(cr, ZTOI(dzp),
+ downer, available_perms, S_IWUSR|S_IXUSR);
if (error == 0)
error = zfs_sticky_remove_access(dzp, zp, cr);
@@ -2750,8 +2749,8 @@ zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr)
* Fourth row
*/
- available_perms = (dzp_working_mode & ACE_WRITE_DATA) ? 0 : VWRITE;
- available_perms |= (dzp_working_mode & ACE_EXECUTE) ? 0 : VEXEC;
+ available_perms = (dzp_working_mode & ACE_WRITE_DATA) ? 0 : S_IWUSR;
+ available_perms |= (dzp_working_mode & ACE_EXECUTE) ? 0 : S_IXUSR;
return (zfs_delete_final_check(zp, dzp, available_perms, cr));
@@ -2767,7 +2766,7 @@ zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (EACCES);
- add_perm = (ZTOV(szp)->v_type == VDIR) ?
+ add_perm = S_ISDIR(ZTOI(szp)->i_mode) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;
/*
diff --git a/module/zfs/zfs_dir.c b/module/zfs/zfs_dir.c
index aced2886a..f54ed1912 100644
--- a/module/zfs/zfs_dir.c
+++ b/module/zfs/zfs_dir.c
@@ -22,7 +22,6 @@
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
-#ifdef HAVE_ZPL
#include <sys/types.h>
#include <sys/param.h>
@@ -62,12 +61,12 @@
* of names after deciding which is the appropriate lookup interface.
*/
static int
-zfs_match_find(zfsvfs_t *zfsvfs, znode_t *dzp, char *name, boolean_t exact,
+zfs_match_find(zfs_sb_t *zsb, znode_t *dzp, char *name, boolean_t exact,
boolean_t update, int *deflags, pathname_t *rpnp, uint64_t *zoid)
{
int error;
- if (zfsvfs->z_norm) {
+ if (zsb->z_norm) {
matchtype_t mt = MT_FIRST;
boolean_t conflict = B_FALSE;
size_t bufsz = 0;
@@ -83,17 +82,19 @@ zfs_match_find(zfsvfs_t *zfsvfs, znode_t *dzp, char *name, boolean_t exact,
* In the non-mixed case we only expect there would ever
* be one match, but we need to use the normalizing lookup.
*/
- error = zap_lookup_norm(zfsvfs->z_os, dzp->z_id, name, 8, 1,
+ error = zap_lookup_norm(zsb->z_os, dzp->z_id, name, 8, 1,
zoid, mt, buf, bufsz, &conflict);
if (!error && deflags)
*deflags = conflict ? ED_CASE_CONFLICT : 0;
} else {
- error = zap_lookup(zfsvfs->z_os, dzp->z_id, name, 8, 1, zoid);
+ error = zap_lookup(zsb->z_os, dzp->z_id, name, 8, 1, zoid);
}
*zoid = ZFS_DIRENT_OBJ(*zoid);
+#ifdef HAVE_DNLC
if (error == ENOENT && update)
- dnlc_update(ZTOV(dzp), name, DNLC_NO_VNODE);
+ dnlc_update(ZTOI(dzp), name, DNLC_NO_VNODE);
+#endif /* HAVE_DNLC */
return (error);
}
@@ -137,12 +138,14 @@ int
zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
int flag, int *direntflags, pathname_t *realpnp)
{
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
zfs_dirlock_t *dl;
boolean_t update;
boolean_t exact;
uint64_t zoid;
+#ifdef HAVE_DNLC
vnode_t *vp = NULL;
+#endif /* HAVE_DNLC */
int error = 0;
int cmpflags;
@@ -160,7 +163,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
/*
* Case sensitivity and normalization preferences are set when
* the file system is created. These are stored in the
- * zfsvfs->z_case and zfsvfs->z_norm fields. These choices
+ * zsb->z_case and zsb->z_norm fields. These choices
* affect what vnodes can be cached in the DNLC, how we
* perform zap lookups, and the "width" of our dirlocks.
*
@@ -180,8 +183,8 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
* access.
*/
exact =
- ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE) && (flag & ZCIEXACT)) ||
- ((zfsvfs->z_case == ZFS_CASE_MIXED) && !(flag & ZCILOOK));
+ ((zsb->z_case == ZFS_CASE_INSENSITIVE) && (flag & ZCIEXACT)) ||
+ ((zsb->z_case == ZFS_CASE_MIXED) && !(flag & ZCILOOK));
/*
* Only look in or update the DNLC if we are looking for the
@@ -193,9 +196,9 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
* Maybe can add TO-UPPERed version of name to dnlc in ci-only
* case for performance improvement?
*/
- update = !zfsvfs->z_norm ||
- ((zfsvfs->z_case == ZFS_CASE_MIXED) &&
- !(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK));
+ update = !zsb->z_norm ||
+ ((zsb->z_case == ZFS_CASE_MIXED) &&
+ !(zsb->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK));
/*
* ZRENAMING indicates we are in a situation where we should
@@ -208,7 +211,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
if (flag & ZRENAMING)
cmpflags = 0;
else
- cmpflags = zfsvfs->z_norm;
+ cmpflags = zsb->z_norm;
/*
* Wait until there are no locks on this name.
@@ -288,29 +291,34 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
* See if there's an object by this name; if so, put a hold on it.
*/
if (flag & ZXATTR) {
- error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &zoid,
+ error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zsb), &zoid,
sizeof (zoid));
if (error == 0)
error = (zoid == 0 ? ENOENT : 0);
} else {
+#ifdef HAVE_DNLC
if (update)
- vp = dnlc_lookup(ZTOV(dzp), name);
+ vp = dnlc_lookup(ZTOI(dzp), name);
if (vp == DNLC_NO_VNODE) {
- VN_RELE(vp);
+ iput(vp);
error = ENOENT;
} else if (vp) {
if (flag & ZNEW) {
zfs_dirent_unlock(dl);
- VN_RELE(vp);
+ iput(vp);
return (EEXIST);
}
*dlpp = dl;
*zpp = VTOZ(vp);
return (0);
} else {
- error = zfs_match_find(zfsvfs, dzp, name, exact,
+ error = zfs_match_find(zsb, dzp, name, exact,
update, direntflags, realpnp, &zoid);
}
+#else
+ error = zfs_match_find(zsb, dzp, name, exact,
+ update, direntflags, realpnp, &zoid);
+#endif /* HAVE_DNLC */
}
if (error) {
if (error != ENOENT || (flag & ZEXISTS)) {
@@ -322,13 +330,15 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
zfs_dirent_unlock(dl);
return (EEXIST);
}
- error = zfs_zget(zfsvfs, zoid, zpp);
+ error = zfs_zget(zsb, zoid, zpp);
if (error) {
zfs_dirent_unlock(dl);
return (error);
}
+#ifdef HAVE_DNLC
if (!(flag & ZXATTR) && update)
- dnlc_update(ZTOV(dzp), name, ZTOV(*zpp));
+ dnlc_update(ZTOI(dzp), name, ZTOI(*zpp));
+#endif /* HAVE_DNLC */
}
*dlpp = dl;
@@ -377,7 +387,7 @@ zfs_dirent_unlock(zfs_dirlock_t *dl)
* special pseudo-directory.
*/
int
-zfs_dirlook(znode_t *dzp, char *name, vnode_t **vpp, int flags,
+zfs_dirlook(znode_t *dzp, char *name, struct inode **ipp, int flags,
int *deflg, pathname_t *rpnp)
{
zfs_dirlock_t *dl;
@@ -386,31 +396,35 @@ zfs_dirlook(znode_t *dzp, char *name, vnode_t **vpp, int flags,
uint64_t parent;
if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
- *vpp = ZTOV(dzp);
- VN_HOLD(*vpp);
+ *ipp = ZTOI(dzp);
+ igrab(*ipp);
} else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
/*
* If we are a snapshot mounted under .zfs, return
* the vp for the snapshot directory.
*/
if ((error = sa_lookup(dzp->z_sa_hdl,
- SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
+ SA_ZPL_PARENT(zsb), &parent, sizeof (parent))) != 0)
return (error);
- if (parent == dzp->z_id && zfsvfs->z_parent != zfsvfs) {
- error = zfsctl_root_lookup(zfsvfs->z_parent->z_ctldir,
- "snapshot", vpp, NULL, 0, NULL, kcred,
+#ifdef HAVE_SNAPSHOT
+ if (parent == dzp->z_id && zsb->z_parent != zsb) {
+ error = zfsctl_root_lookup(zsb->z_parent->z_ctldir,
+ "snapshot", ipp, NULL, 0, NULL, kcred,
NULL, NULL, NULL);
return (error);
}
+#endif /* HAVE_SNAPSHOT */
rw_enter(&dzp->z_parent_lock, RW_READER);
- error = zfs_zget(zfsvfs, parent, &zp);
+ error = zfs_zget(zsb, parent, &zp);
if (error == 0)
- *vpp = ZTOV(zp);
+ *ipp = ZTOI(zp);
rw_exit(&dzp->z_parent_lock);
+#ifdef HAVE_SNAPSHOT
} else if (zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0) {
- *vpp = zfsctl_root(dzp);
+ *ipp = zfsctl_root(dzp);
+#endif /* HAVE_SNAPSHOT */
} else {
int zf;
@@ -420,7 +434,7 @@ zfs_dirlook(znode_t *dzp, char *name, vnode_t **vpp, int flags,
error = zfs_dirent_lock(&dl, dzp, name, &zp, zf, deflg, rpnp);
if (error == 0) {
- *vpp = ZTOV(zp);
+ *ipp = ZTOI(zp);
zfs_dirent_unlock(dl);
dzp->z_zn_prefetch = B_TRUE; /* enable prefetching */
}
@@ -450,13 +464,13 @@ zfs_dirlook(znode_t *dzp, char *name, vnode_t **vpp, int flags,
void
zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
ASSERT(zp->z_unlinked);
ASSERT(zp->z_links == 0);
VERIFY3U(0, ==,
- zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
+ zap_add_int(zsb->z_os, zsb->z_unlinkedobj, zp->z_id, tx));
}
/*
@@ -464,7 +478,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
* (force) umounted the file system.
*/
void
-zfs_unlinked_drain(zfsvfs_t *zfsvfs)
+zfs_unlinked_drain(zfs_sb_t *zsb)
{
zap_cursor_t zc;
zap_attribute_t zap;
@@ -475,7 +489,7 @@ zfs_unlinked_drain(zfsvfs_t *zfsvfs)
/*
* Interate over the contents of the unlinked set.
*/
- for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_unlinkedobj);
+ for (zap_cursor_init(&zc, zsb->z_os, zsb->z_unlinkedobj);
zap_cursor_retrieve(&zc, &zap) == 0;
zap_cursor_advance(&zc)) {
@@ -483,8 +497,7 @@ zfs_unlinked_drain(zfsvfs_t *zfsvfs)
* See what kind of object we have in list
*/
- error = dmu_object_info(zfsvfs->z_os,
- zap.za_first_integer, &doi);
+ error = dmu_object_info(zsb->z_os, zap.za_first_integer, &doi);
if (error != 0)
continue;
@@ -494,7 +507,7 @@ zfs_unlinked_drain(zfsvfs_t *zfsvfs)
* We need to re-mark these list entries for deletion,
* so we pull them back into core and set zp->z_unlinked.
*/
- error = zfs_zget(zfsvfs, zap.za_first_integer, &zp);
+ error = zfs_zget(zsb, zap.za_first_integer, &zp);
/*
* We may pick up znodes that are already marked for deletion.
@@ -506,7 +519,7 @@ zfs_unlinked_drain(zfsvfs_t *zfsvfs)
continue;
zp->z_unlinked = B_TRUE;
- VN_RELE(ZTOV(zp));
+ iput(ZTOI(zp));
}
zap_cursor_fini(&zc);
}
@@ -529,35 +542,34 @@ zfs_purgedir(znode_t *dzp)
zap_attribute_t zap;
znode_t *xzp;
dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
zfs_dirlock_t dl;
int skipped = 0;
int error;
- for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
+ for (zap_cursor_init(&zc, zsb->z_os, dzp->z_id);
(error = zap_cursor_retrieve(&zc, &zap)) == 0;
zap_cursor_advance(&zc)) {
- error = zfs_zget(zfsvfs,
+ error = zfs_zget(zsb,
ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp);
if (error) {
skipped += 1;
continue;
}
- ASSERT((ZTOV(xzp)->v_type == VREG) ||
- (ZTOV(xzp)->v_type == VLNK));
+ ASSERT(S_ISREG(ZTOI(xzp)->i_mode)||S_ISLNK(ZTOI(xzp)->i_mode));
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
- dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
+ dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
/* Is this really needed ? */
zfs_sa_upgrade_txholds(tx, xzp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
- VN_RELE(ZTOV(xzp));
+ iput(ZTOI(xzp));
skipped += 1;
continue;
}
@@ -570,7 +582,7 @@ zfs_purgedir(znode_t *dzp)
skipped += 1;
dmu_tx_commit(tx);
- VN_RELE(ZTOV(xzp));
+ iput(ZTOI(xzp));
}
zap_cursor_fini(&zc);
if (error != ENOENT)
@@ -581,8 +593,8 @@ zfs_purgedir(znode_t *dzp)
void
zfs_rmnode(znode_t *zp)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- objset_t *os = zfsvfs->z_os;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ objset_t *os = zsb->z_os;
znode_t *xzp = NULL;
dmu_tx_t *tx;
uint64_t acl_obj;
@@ -590,19 +602,20 @@ zfs_rmnode(znode_t *zp)
int error;
ASSERT(zp->z_links == 0);
- ASSERT(ZTOV(zp)->v_count == 0);
+ ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0);
/*
* If this is an attribute directory, purge its contents.
*/
- if (ZTOV(zp)->v_type == VDIR && (zp->z_pflags & ZFS_XATTR)) {
+ if (S_ISDIR(ZTOI(zp)->i_mode) && (zp->z_pflags & ZFS_XATTR)) {
if (zfs_purgedir(zp) != 0) {
/*
* Not enough space to delete some xattrs.
* Leave it in the unlinked set.
*/
zfs_znode_dmu_fini(zp);
- zfs_znode_free(zp);
+ zfs_inode_destroy(ZTOI(zp));
+
return;
}
}
@@ -616,7 +629,7 @@ zfs_rmnode(znode_t *zp)
* Not enough space. Leave the file in the unlinked set.
*/
zfs_znode_dmu_fini(zp);
- zfs_znode_free(zp);
+ zfs_inode_destroy(ZTOI(zp));
return;
}
@@ -624,10 +637,10 @@ zfs_rmnode(znode_t *zp)
* If the file has extended attributes, we're going to unlink
* the xattr dir.
*/
- error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
+ error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
- error = zfs_zget(zfsvfs, xattr_obj, &xzp);
+ error = zfs_zget(zsb, xattr_obj, &xzp);
ASSERT(error == 0);
}
@@ -638,9 +651,9 @@ zfs_rmnode(znode_t *zp)
*/
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
- dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
+ dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
if (xzp) {
- dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL);
+ dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, TRUE, NULL);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
if (acl_obj)
@@ -656,7 +669,7 @@ zfs_rmnode(znode_t *zp)
*/
dmu_tx_abort(tx);
zfs_znode_dmu_fini(zp);
- zfs_znode_free(zp);
+ zfs_inode_destroy(ZTOI(zp));
goto out;
}
@@ -665,7 +678,7 @@ zfs_rmnode(znode_t *zp)
mutex_enter(&xzp->z_lock);
xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */
xzp->z_links = 0; /* no more links to it */
- VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
+ VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zsb),
&xzp->z_links, sizeof (xzp->z_links), tx));
mutex_exit(&xzp->z_lock);
zfs_unlinked_add(xzp, tx);
@@ -673,14 +686,14 @@ zfs_rmnode(znode_t *zp)
/* Remove this znode from the unlinked set */
VERIFY3U(0, ==,
- zap_remove_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
+ zap_remove_int(zsb->z_os, zsb->z_unlinkedobj, zp->z_id, tx));
zfs_znode_delete(zp, tx);
dmu_tx_commit(tx);
out:
if (xzp)
- VN_RELE(ZTOV(xzp));
+ iput(ZTOI(xzp));
}
static uint64_t
@@ -688,7 +701,7 @@ zfs_dirent(znode_t *zp, uint64_t mode)
{
uint64_t de = zp->z_id;
- if (zp->z_zfsvfs->z_version >= ZPL_VERSION_DIRENT_TYPE)
+ if (ZTOZSB(zp)->z_version >= ZPL_VERSION_DIRENT_TYPE)
de |= IFTODT(mode) << 60;
return (de);
}
@@ -700,10 +713,9 @@ int
zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
{
znode_t *dzp = dl->dl_dzp;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- vnode_t *vp = ZTOV(zp);
+ zfs_sb_t *zsb = ZTOZSB(zp);
uint64_t value;
- int zp_is_dir = (vp->v_type == VDIR);
+ int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
sa_bulk_attr_t bulk[5];
uint64_t mtime[2], ctime[2];
int count = 0;
@@ -718,17 +730,17 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
return (ENOENT);
}
zp->z_links++;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
&zp->z_links, sizeof (zp->z_links));
}
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
&dzp->z_id, sizeof (dzp->z_id));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (!(flag & ZNEW)) {
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
ctime, B_TRUE);
@@ -742,15 +754,15 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
dzp->z_size++;
dzp->z_links += zp_is_dir;
count = 0;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
&dzp->z_size, sizeof (dzp->z_size));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
&dzp->z_links, sizeof (dzp->z_links));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL,
mtime, sizeof (mtime));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
ctime, sizeof (ctime));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
@@ -758,11 +770,13 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
mutex_exit(&dzp->z_lock);
value = zfs_dirent(zp, zp->z_mode);
- error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name,
+ error = zap_add(ZTOZSB(zp)->z_os, dzp->z_id, dl->dl_name,
8, 1, &value, tx);
ASSERT(error == 0);
- dnlc_update(ZTOV(dzp), dl->dl_name, vp);
+#ifdef HAVE_DNLC
+ dnlc_update(ZTOI(dzp), dl->dl_name, vp);
+#endif /* HAVE_DNLC */
return (0);
}
@@ -773,18 +787,18 @@ zfs_dropname(zfs_dirlock_t *dl, znode_t *zp, znode_t *dzp, dmu_tx_t *tx,
{
int error;
- if (zp->z_zfsvfs->z_norm) {
- if (((zp->z_zfsvfs->z_case == ZFS_CASE_INSENSITIVE) &&
+ if (ZTOZSB(zp)->z_norm) {
+ if (((ZTOZSB(zp)->z_case == ZFS_CASE_INSENSITIVE) &&
(flag & ZCIEXACT)) ||
- ((zp->z_zfsvfs->z_case == ZFS_CASE_MIXED) &&
+ ((ZTOZSB(zp)->z_case == ZFS_CASE_MIXED) &&
!(flag & ZCILOOK)))
- error = zap_remove_norm(zp->z_zfsvfs->z_os,
+ error = zap_remove_norm(ZTOZSB(zp)->z_os,
dzp->z_id, dl->dl_name, MT_EXACT, tx);
else
- error = zap_remove_norm(zp->z_zfsvfs->z_os,
+ error = zap_remove_norm(ZTOZSB(zp)->z_os,
dzp->z_id, dl->dl_name, MT_FIRST, tx);
} else {
- error = zap_remove(zp->z_zfsvfs->z_os,
+ error = zap_remove(ZTOZSB(zp)->z_os,
dzp->z_id, dl->dl_name, tx);
}
@@ -803,31 +817,23 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
boolean_t *unlinkedp)
{
znode_t *dzp = dl->dl_dzp;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
- vnode_t *vp = ZTOV(zp);
- int zp_is_dir = (vp->v_type == VDIR);
+ zfs_sb_t *zsb = ZTOZSB(dzp);
+ int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
boolean_t unlinked = B_FALSE;
sa_bulk_attr_t bulk[5];
uint64_t mtime[2], ctime[2];
int count = 0;
int error;
- dnlc_remove(ZTOV(dzp), dl->dl_name);
+#ifdef HAVE_DNLC
+ dnlc_remove(ZTOI(dzp), dl->dl_name);
+#endif /* HAVE_DNLC */
if (!(flag & ZRENAMING)) {
- if (vn_vfswlock(vp)) /* prevent new mounts on zp */
- return (EBUSY);
-
- if (vn_ismntpt(vp)) { /* don't remove mount point */
- vn_vfsunlock(vp);
- return (EBUSY);
- }
-
mutex_enter(&zp->z_lock);
if (zp_is_dir && !zfs_dirempty(zp)) {
mutex_exit(&zp->z_lock);
- vn_vfsunlock(vp);
return (EEXIST);
}
@@ -839,16 +845,13 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
error = zfs_dropname(dl, zp, dzp, tx, flag);
if (error != 0) {
mutex_exit(&zp->z_lock);
- vn_vfsunlock(vp);
return (error);
}
if (zp->z_links <= zp_is_dir) {
- zfs_panic_recover("zfs: link count on %s is %u, "
- "should be at least %u",
- zp->z_vnode->v_path ? zp->z_vnode->v_path :
- "<unknown>", (int)zp->z_links,
- zp_is_dir + 1);
+ zfs_panic_recover("zfs: link count on %lu is %u, "
+ "should be at least %u", zp->z_id,
+ (int)zp->z_links, zp_is_dir + 1);
zp->z_links = zp_is_dir + 1;
}
if (--zp->z_links == zp_is_dir) {
@@ -856,20 +859,19 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
zp->z_links = 0;
unlinked = B_TRUE;
} else {
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb),
NULL, &ctime, sizeof (ctime));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
NULL, &zp->z_pflags, sizeof (zp->z_pflags));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
B_TRUE);
}
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb),
NULL, &zp->z_links, sizeof (zp->z_links));
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
count = 0;
ASSERT(error == 0);
mutex_exit(&zp->z_lock);
- vn_vfsunlock(vp);
} else {
error = zfs_dropname(dl, zp, dzp, tx, flag);
if (error != 0)
@@ -879,15 +881,15 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
mutex_enter(&dzp->z_lock);
dzp->z_size--; /* one dirent removed */
dzp->z_links -= zp_is_dir; /* ".." link from zp */
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb),
NULL, &dzp->z_links, sizeof (dzp->z_links));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
NULL, &dzp->z_size, sizeof (dzp->z_size));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb),
NULL, ctime, sizeof (ctime));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb),
NULL, mtime, sizeof (mtime));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
@@ -914,38 +916,40 @@ zfs_dirempty(znode_t *dzp)
}
int
-zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr)
+zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
znode_t *xzp;
dmu_tx_t *tx;
int error;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
+#ifdef DEBUG
uint64_t parent;
+#endif
- *xvpp = NULL;
+ *xipp = NULL;
- if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr))
+ if ((error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr)))
return (error);
if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
&acl_ids)) != 0)
return (error);
- if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
+ if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
zfs_acl_ids_free(&acl_ids);
return (EDQUOT);
}
top:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
- fuid_dirtied = zfsvfs->z_fuid_dirty;
+ fuid_dirtied = zsb->z_fuid_dirty;
if (fuid_dirtied)
- zfs_fuid_txhold(zfsvfs, tx);
+ zfs_fuid_txhold(zsb, tx);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
if (error == ERESTART) {
@@ -960,24 +964,24 @@ top:
zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids);
if (fuid_dirtied)
- zfs_fuid_sync(zfsvfs, tx);
+ zfs_fuid_sync(zsb, tx);
#ifdef DEBUG
- error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
+ error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zsb),
&parent, sizeof (parent));
ASSERT(error == 0 && parent == zp->z_id);
#endif
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
+ VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), &xzp->z_id,
sizeof (xzp->z_id), tx));
- (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp,
+ (void) zfs_log_create(zsb->z_log, tx, TX_MKXATTR, zp,
xzp, "", NULL, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
- *xvpp = ZTOV(xzp);
+ *xipp = ZTOI(xzp);
return (0);
}
@@ -990,15 +994,15 @@ top:
* cr - credentials of caller
* flags - flags from the VOP_LOOKUP call
*
- * OUT: xzpp - pointer to extended attribute znode
+ * OUT: xipp - pointer to extended attribute znode
*
* RETURN: 0 on success
* error number on failure
*/
int
-zfs_get_xattrdir(znode_t *zp, vnode_t **xvpp, cred_t *cr, int flags)
+zfs_get_xattrdir(znode_t *zp, struct inode **xipp, cred_t *cr, int flags)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
znode_t *xzp;
zfs_dirlock_t *dl;
vattr_t va;
@@ -1009,18 +1013,17 @@ top:
return (error);
if (xzp != NULL) {
- *xvpp = ZTOV(xzp);
+ *xipp = ZTOI(xzp);
zfs_dirent_unlock(dl);
return (0);
}
-
if (!(flags & CREATE_XATTR_DIR)) {
zfs_dirent_unlock(dl);
return (ENOENT);
}
- if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
+ if (zsb->z_vfs->mnt_flags & MNT_READONLY) {
zfs_dirent_unlock(dl);
return (EROFS);
}
@@ -1035,12 +1038,11 @@ top:
* Once in a directory the ability to read/write attributes
* is controlled by the permissions on the attribute file.
*/
- va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID;
- va.va_type = VDIR;
+ va.va_mask = ATTR_MODE | ATTR_UID | ATTR_GID;
va.va_mode = S_IFDIR | S_ISVTX | 0777;
zfs_fuid_map_ids(zp, cr, &va.va_uid, &va.va_gid);
- error = zfs_make_xattrdir(zp, &va, xvpp, cr);
+ error = zfs_make_xattrdir(zp, &va, xipp, cr);
zfs_dirent_unlock(dl);
if (error == ERESTART) {
@@ -1067,25 +1069,24 @@ top:
int
zfs_sticky_remove_access(znode_t *zdp, znode_t *zp, cred_t *cr)
{
- uid_t uid;
+ uid_t uid;
uid_t downer;
uid_t fowner;
- zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zdp);
- if (zdp->z_zfsvfs->z_replay)
+ if (zsb->z_replay)
return (0);
if ((zdp->z_mode & S_ISVTX) == 0)
return (0);
- downer = zfs_fuid_map_id(zfsvfs, zdp->z_uid, cr, ZFS_OWNER);
- fowner = zfs_fuid_map_id(zfsvfs, zp->z_uid, cr, ZFS_OWNER);
+ downer = zfs_fuid_map_id(zsb, zdp->z_uid, cr, ZFS_OWNER);
+ fowner = zfs_fuid_map_id(zsb, zp->z_uid, cr, ZFS_OWNER);
if ((uid = crgetuid(cr)) == downer || uid == fowner ||
- (ZTOV(zp)->v_type == VREG &&
+ (S_ISDIR(ZTOI(zp)->i_mode) &&
zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr) == 0))
return (0);
else
return (secpolicy_vnode_remove(cr));
}
-#endif /* HAVE_ZPL */
diff --git a/module/zfs/zfs_fuid.c b/module/zfs/zfs_fuid.c
index a5741185a..f1e071fc7 100644
--- a/module/zfs/zfs_fuid.c
+++ b/module/zfs/zfs_fuid.c
@@ -46,7 +46,7 @@
* two AVL trees are created. One tree is keyed by the index number
* and the other by the domain string. Nodes are never removed from
* trees, but new entries may be added. If a new entry is added then
- * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
+ * the zsb->z_fuid_dirty flag is set to true and the caller will then
* be responsible for calling zfs_fuid_sync() to sync the changes to disk.
*
*/
@@ -196,34 +196,34 @@ zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
* Load the fuid table(s) into memory.
*/
static void
-zfs_fuid_init(zfsvfs_t *zfsvfs)
+zfs_fuid_init(zfs_sb_t *zsb)
{
- rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
+ rw_enter(&zsb->z_fuid_lock, RW_WRITER);
- if (zfsvfs->z_fuid_loaded) {
- rw_exit(&zfsvfs->z_fuid_lock);
+ if (zsb->z_fuid_loaded) {
+ rw_exit(&zsb->z_fuid_lock);
return;
}
- zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
+ zfs_fuid_avl_tree_create(&zsb->z_fuid_idx, &zsb->z_fuid_domain);
- (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
- ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
- if (zfsvfs->z_fuid_obj != 0) {
- zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
- zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
- &zfsvfs->z_fuid_domain);
+ (void) zap_lookup(zsb->z_os, MASTER_NODE_OBJ,
+ ZFS_FUID_TABLES, 8, 1, &zsb->z_fuid_obj);
+ if (zsb->z_fuid_obj != 0) {
+ zsb->z_fuid_size = zfs_fuid_table_load(zsb->z_os,
+ zsb->z_fuid_obj, &zsb->z_fuid_idx,
+ &zsb->z_fuid_domain);
}
- zfsvfs->z_fuid_loaded = B_TRUE;
- rw_exit(&zfsvfs->z_fuid_lock);
+ zsb->z_fuid_loaded = B_TRUE;
+ rw_exit(&zsb->z_fuid_lock);
}
/*
* sync out AVL trees to persistent storage.
*/
void
-zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
+zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx)
{
nvlist_t *nvp;
nvlist_t **fuids;
@@ -234,30 +234,30 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
int numnodes;
int i;
- if (!zfsvfs->z_fuid_dirty) {
+ if (!zsb->z_fuid_dirty) {
return;
}
- rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
+ rw_enter(&zsb->z_fuid_lock, RW_WRITER);
/*
* First see if table needs to be created?
*/
- if (zfsvfs->z_fuid_obj == 0) {
- zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
+ if (zsb->z_fuid_obj == 0) {
+ zsb->z_fuid_obj = dmu_object_alloc(zsb->z_os,
DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
sizeof (uint64_t), tx);
- VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
+ VERIFY(zap_add(zsb->z_os, MASTER_NODE_OBJ,
ZFS_FUID_TABLES, sizeof (uint64_t), 1,
- &zfsvfs->z_fuid_obj, tx) == 0);
+ &zsb->z_fuid_obj, tx) == 0);
}
VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
+ numnodes = avl_numnodes(&zsb->z_fuid_idx);
fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
- for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
- domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
+ for (i = 0, domnode = avl_first(&zsb->z_fuid_domain); domnode; i++,
+ domnode = AVL_NEXT(&zsb->z_fuid_domain, domnode)) {
VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
domnode->f_idx) == 0);
@@ -275,30 +275,29 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
VERIFY(nvlist_pack(nvp, &packed, &nvsize,
NV_ENCODE_XDR, KM_SLEEP) == 0);
nvlist_free(nvp);
- zfsvfs->z_fuid_size = nvsize;
- dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
- zfsvfs->z_fuid_size, packed, tx);
- kmem_free(packed, zfsvfs->z_fuid_size);
- VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
+ zsb->z_fuid_size = nvsize;
+ dmu_write(zsb->z_os, zsb->z_fuid_obj, 0, zsb->z_fuid_size, packed, tx);
+ kmem_free(packed, zsb->z_fuid_size);
+ VERIFY(0 == dmu_bonus_hold(zsb->z_os, zsb->z_fuid_obj,
FTAG, &db));
dmu_buf_will_dirty(db, tx);
- *(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
+ *(uint64_t *)db->db_data = zsb->z_fuid_size;
dmu_buf_rele(db, FTAG);
- zfsvfs->z_fuid_dirty = B_FALSE;
- rw_exit(&zfsvfs->z_fuid_lock);
+ zsb->z_fuid_dirty = B_FALSE;
+ rw_exit(&zsb->z_fuid_lock);
}
/*
* Query domain table for a given domain.
*
* If domain isn't found and addok is set, it is added to AVL trees and
- * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be
+ * the zsb->z_fuid_dirty flag will be set to TRUE. It will then be
* necessary for the caller or another thread to detect the dirty table
* and sync out the changes.
*/
int
-zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
+zfs_fuid_find_by_domain(zfs_sb_t *zsb, const char *domain,
char **retdomain, boolean_t addok)
{
fuid_domain_t searchnode, *findnode;
@@ -319,23 +318,23 @@ zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
searchnode.f_ksid = ksid_lookupdomain(domain);
if (retdomain)
*retdomain = searchnode.f_ksid->kd_name;
- if (!zfsvfs->z_fuid_loaded)
- zfs_fuid_init(zfsvfs);
+ if (!zsb->z_fuid_loaded)
+ zfs_fuid_init(zsb);
retry:
- rw_enter(&zfsvfs->z_fuid_lock, rw);
- findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
+ rw_enter(&zsb->z_fuid_lock, rw);
+ findnode = avl_find(&zsb->z_fuid_domain, &searchnode, &loc);
if (findnode) {
- rw_exit(&zfsvfs->z_fuid_lock);
+ rw_exit(&zsb->z_fuid_lock);
ksiddomain_rele(searchnode.f_ksid);
return (findnode->f_idx);
} else if (addok) {
fuid_domain_t *domnode;
uint64_t retidx;
- if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
- rw_exit(&zfsvfs->z_fuid_lock);
+ if (rw == RW_READER && !rw_tryupgrade(&zsb->z_fuid_lock)) {
+ rw_exit(&zsb->z_fuid_lock);
rw = RW_WRITER;
goto retry;
}
@@ -343,15 +342,15 @@ retry:
domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
domnode->f_ksid = searchnode.f_ksid;
- retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
+ retidx = domnode->f_idx = avl_numnodes(&zsb->z_fuid_idx) + 1;
- avl_add(&zfsvfs->z_fuid_domain, domnode);
- avl_add(&zfsvfs->z_fuid_idx, domnode);
- zfsvfs->z_fuid_dirty = B_TRUE;
- rw_exit(&zfsvfs->z_fuid_lock);
+ avl_add(&zsb->z_fuid_domain, domnode);
+ avl_add(&zsb->z_fuid_idx, domnode);
+ zsb->z_fuid_dirty = B_TRUE;
+ rw_exit(&zsb->z_fuid_lock);
return (retidx);
} else {
- rw_exit(&zfsvfs->z_fuid_lock);
+ rw_exit(&zsb->z_fuid_lock);
return (-1);
}
}
@@ -363,23 +362,23 @@ retry:
*
*/
const char *
-zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
+zfs_fuid_find_by_idx(zfs_sb_t *zsb, uint32_t idx)
{
char *domain;
- if (idx == 0 || !zfsvfs->z_use_fuids)
+ if (idx == 0 || !zsb->z_use_fuids)
return (NULL);
- if (!zfsvfs->z_fuid_loaded)
- zfs_fuid_init(zfsvfs);
+ if (!zsb->z_fuid_loaded)
+ zfs_fuid_init(zsb);
- rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
+ rw_enter(&zsb->z_fuid_lock, RW_READER);
- if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
- domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
+ if (zsb->z_fuid_obj || zsb->z_fuid_dirty)
+ domain = zfs_fuid_idx_domain(&zsb->z_fuid_idx, idx);
else
domain = nulldomain;
- rw_exit(&zfsvfs->z_fuid_lock);
+ rw_exit(&zsb->z_fuid_lock);
ASSERT(domain);
return (domain);
@@ -388,12 +387,12 @@ zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
void
zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
{
- *uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
- *gidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_gid, cr, ZFS_GROUP);
+ *uidp = zfs_fuid_map_id(ZTOZSB(zp), zp->z_uid, cr, ZFS_OWNER);
+ *gidp = zfs_fuid_map_id(ZTOZSB(zp), zp->z_gid, cr, ZFS_GROUP);
}
uid_t
-zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
+zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid,
cred_t *cr, zfs_fuid_type_t type)
{
#ifdef HAVE_KSID
@@ -404,7 +403,7 @@ zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
if (index == 0)
return (fuid);
- domain = zfs_fuid_find_by_idx(zfsvfs, index);
+ domain = zfs_fuid_find_by_idx(zsb, index);
ASSERT(domain != NULL);
if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
@@ -499,13 +498,13 @@ zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
* be used if it exists.
*/
uint64_t
-zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
+zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type,
cred_t *cr, zfs_fuid_info_t **fuidp)
{
uint64_t idx;
ksid_t *ksid;
uint32_t rid;
- char *kdomain;
+ char *kdomain;
const char *domain;
uid_t id;
@@ -513,7 +512,7 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
- if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
+ if (!zsb->z_use_fuids || (ksid == NULL)) {
id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
if (IS_EPHEMERAL(id))
@@ -536,7 +535,7 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
rid = ksid_getrid(ksid);
domain = ksid_getdomain(ksid);
- idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
+ idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE);
zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
@@ -554,10 +553,10 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
*
* During replay operations the domain+rid information is
* found in the zfs_fuid_info_t that the replay code has
- * attached to the zfsvfs of the file system.
+ * attached to the zsb of the file system.
*/
uint64_t
-zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
+zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr,
zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
{
#ifdef HAVE_KSID
@@ -578,11 +577,11 @@ zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
* chmod.
*/
- if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
+ if (!zsb->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
return (id);
- if (zfsvfs->z_replay) {
- fuidp = zfsvfs->z_fuid_replay;
+ if (zsb->z_replay) {
+ fuidp = zsb->z_fuid_replay;
/*
* If we are passed an ephemeral id, but no
@@ -629,9 +628,9 @@ zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
}
}
- idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
+ idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE);
- if (!zfsvfs->z_replay)
+ if (!zsb->z_replay)
zfs_fuid_node_add(fuidpp, kdomain,
rid, idx, id, type);
else if (zfuid != NULL) {
@@ -648,15 +647,15 @@ zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
}
void
-zfs_fuid_destroy(zfsvfs_t *zfsvfs)
+zfs_fuid_destroy(zfs_sb_t *zsb)
{
- rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
- if (!zfsvfs->z_fuid_loaded) {
- rw_exit(&zfsvfs->z_fuid_lock);
+ rw_enter(&zsb->z_fuid_lock, RW_WRITER);
+ if (!zsb->z_fuid_loaded) {
+ rw_exit(&zsb->z_fuid_lock);
return;
}
- zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
- rw_exit(&zfsvfs->z_fuid_lock);
+ zfs_fuid_table_destroy(&zsb->z_fuid_idx, &zsb->z_fuid_domain);
+ rw_exit(&zsb->z_fuid_lock);
}
/*
@@ -710,7 +709,7 @@ zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
* Will use a straight FUID compare when possible.
*/
boolean_t
-zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
+zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr)
{
#ifdef HAVE_KSID
ksid_t *ksid = crgetsid(cr, KSID_GROUP);
@@ -718,7 +717,7 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
uid_t gid;
if (ksid && ksidlist) {
- int i;
+ int i;
ksid_t *ksid_groups;
uint32_t idx = FUID_INDEX(id);
uint32_t rid = FUID_RID(id);
@@ -734,7 +733,7 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
} else {
const char *domain;
- domain = zfs_fuid_find_by_idx(zfsvfs, idx);
+ domain = zfs_fuid_find_by_idx(zsb, idx);
ASSERT(domain != NULL);
if (strcmp(domain,
@@ -752,7 +751,7 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
/*
* Not found in ksidlist, check posix groups
*/
- gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
+ gid = zfs_fuid_map_id(zsb, id, cr, ZFS_GROUP);
return (groupmember(gid, cr));
#else
return (B_TRUE);
@@ -760,17 +759,17 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
}
void
-zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
+zfs_fuid_txhold(zfs_sb_t *zsb, dmu_tx_t *tx)
{
- if (zfsvfs->z_fuid_obj == 0) {
+ if (zsb->z_fuid_obj == 0) {
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
- FUID_SIZE_ESTIMATE(zfsvfs));
+ FUID_SIZE_ESTIMATE(zsb));
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
} else {
- dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
- dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
- FUID_SIZE_ESTIMATE(zfsvfs));
+ dmu_tx_hold_bonus(tx, zsb->z_fuid_obj);
+ dmu_tx_hold_write(tx, zsb->z_fuid_obj, 0,
+ FUID_SIZE_ESTIMATE(zsb));
}
}
#endif
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 593ed74bb..994d65f7e 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -432,7 +432,7 @@ zfs_set_slabel_policy(const char *name, char *strval, cred_t *cr)
/*
* If the existing dataset label is nondefault, check if the
* dataset is mounted (label cannot be changed while mounted).
- * Get the zfsvfs; if there isn't one, then the dataset isn't
+ * Get the zfs_sb_t; if there isn't one, then the dataset isn't
* mounted (or isn't a dataset, doesn't exist, ...).
*/
if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) != 0) {
@@ -849,20 +849,6 @@ zfs_secpolicy_create(zfs_cmd_t *zc, cred_t *cr)
return (error);
}
-#ifdef HAVE_ZPL
-static int
-zfs_secpolicy_umount(zfs_cmd_t *zc, cred_t *cr)
-{
- int error;
-
- error = secpolicy_fs_unmount(cr, NULL);
- if (error) {
- error = dsl_deleg_access(zc->zc_name, ZFS_DELEG_PERM_MOUNT, cr);
- }
- return (error);
-}
-#endif /* HAVE_ZPL */
-
/*
* Policy for pool operations - create/destroy pools, add vdevs, etc. Requires
* SYS_CONFIG privilege, which is not available in a local zone.
@@ -1105,9 +1091,8 @@ put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl)
return (error);
}
-#ifdef HAVE_ZPL
static int
-getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
+get_zfs_sb(const char *dsname, zfs_sb_t **zsbp)
{
objset_t *os;
int error;
@@ -1121,9 +1106,9 @@ getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
}
mutex_enter(&os->os_user_ptr_lock);
- *zfvp = dmu_objset_get_user(os);
- if (*zfvp) {
- VFS_HOLD((*zfvp)->z_vfs);
+ *zsbp = dmu_objset_get_user(os);
+ if (*zsbp) {
+ mntget((*zsbp)->z_vfs);
} else {
error = ESRCH;
}
@@ -1131,52 +1116,45 @@ getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
dmu_objset_rele(os, FTAG);
return (error);
}
-#endif
/*
- * Find a zfsvfs_t for a mounted filesystem, or create our own, in which
+ * Find a zfs_sb_t for a mounted filesystem, or create our own, in which
* case its z_vfs will be NULL, and it will be opened as the owner.
*/
static int
-zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer)
+zfs_sb_hold(const char *name, void *tag, zfs_sb_t **zsbp, boolean_t writer)
{
-#ifdef HAVE_ZPL
int error = 0;
- if (getzfsvfs(name, zfvp) != 0)
- error = zfsvfs_create(name, zfvp);
+ if (get_zfs_sb(name, zsbp) != 0)
+ error = zfs_sb_create(name, zsbp);
if (error == 0) {
- rrw_enter(&(*zfvp)->z_teardown_lock, (writer) ? RW_WRITER :
+ rrw_enter(&(*zsbp)->z_teardown_lock, (writer) ? RW_WRITER :
RW_READER, tag);
- if ((*zfvp)->z_unmounted) {
+ if ((*zsbp)->z_unmounted) {
/*
* XXX we could probably try again, since the unmounting
* thread should be just about to disassociate the
* objset from the zfsvfs.
*/
- rrw_exit(&(*zfvp)->z_teardown_lock, tag);
+ rrw_exit(&(*zsbp)->z_teardown_lock, tag);
return (EBUSY);
}
}
return (error);
-#else
- return ENOTSUP;
-#endif
}
static void
-zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag)
+zfs_sb_rele(zfs_sb_t *zsb, void *tag)
{
-#ifdef HAVE_ZPL
- rrw_exit(&zfsvfs->z_teardown_lock, tag);
+ rrw_exit(&zsb->z_teardown_lock, tag);
- if (zfsvfs->z_vfs) {
- VFS_RELE(zfsvfs->z_vfs);
+ if (zsb->z_vfs) {
+ mntput(zsb->z_vfs);
} else {
- dmu_objset_disown(zfsvfs->z_os, zfsvfs);
- zfsvfs_free(zfsvfs);
+ dmu_objset_disown(zsb->z_os, zsb);
+ zfs_sb_free(zsb);
}
-#endif
}
static int
@@ -2086,7 +2064,6 @@ top:
static int
zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
{
-#ifdef HAVE_ZPL
const char *propname = nvpair_name(pair);
uint64_t *valary;
unsigned int vallen;
@@ -2095,7 +2072,7 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
zfs_userquota_prop_t type;
uint64_t rid;
uint64_t quota;
- zfsvfs_t *zfsvfs;
+ zfs_sb_t *zsb;
int err;
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
@@ -2120,16 +2097,13 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
rid = valary[1];
quota = valary[2];
- err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE);
+ err = zfs_sb_hold(dsname, FTAG, &zsb, B_FALSE);
if (err == 0) {
- err = zfs_set_userquota(zfsvfs, type, domain, rid, quota);
- zfsvfs_rele(zfsvfs, FTAG);
+ err = zfs_set_userquota(zsb, type, domain, rid, quota);
+ zfs_sb_rele(zsb, FTAG);
}
return (err);
-#else
- return ENOTSUP;
-#endif
}
/*
@@ -2185,15 +2159,13 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source,
break;
case ZFS_PROP_VERSION:
{
- zfsvfs_t *zfsvfs;
+ zfs_sb_t *zsb;
- if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0)
+ if ((err = zfs_sb_hold(dsname, FTAG, &zsb, B_TRUE)) != 0)
break;
-#ifdef HAVE_ZPL
- err = zfs_set_version(zfsvfs, intval);
-#endif
- zfsvfs_rele(zfsvfs, FTAG);
+ err = zfs_set_version(zsb, intval);
+ zfs_sb_rele(zsb, FTAG);
if (err == 0 && intval >= ZPL_VERSION_USERSPACE) {
zfs_cmd_t *zc;
@@ -2748,7 +2720,7 @@ zfs_ioc_get_fsacl(zfs_cmd_t *zc)
return (error);
}
-#ifdef HAVE_ZPL
+#ifdef HAVE_SNAPSHOT
/*
* Search the vfs list for a specified resource. Returns a pointer to it
* or NULL if no suitable entry is found. The caller of this routine
@@ -2764,7 +2736,7 @@ zfs_get_vfs(const char *resource)
vfsp = rootvfs;
do {
if (strcmp(refstr_value(vfsp->vfs_resource), resource) == 0) {
- VFS_HOLD(vfsp);
+ mntget(vfsp);
vfs_found = vfsp;
break;
}
@@ -2773,7 +2745,7 @@ zfs_get_vfs(const char *resource)
vfs_list_unlock();
return (vfs_found);
}
-#endif /* HAVE_ZPL */
+#endif /* HAVE_SNAPSHOT */
/* ARGSUSED */
static void
@@ -3128,7 +3100,7 @@ out:
int
zfs_unmount_snap(const char *name, void *arg)
{
-#ifdef HAVE_ZPL
+#ifdef HAVE_SNAPSHOT
vfs_t *vfsp = NULL;
if (arg) {
@@ -3148,14 +3120,14 @@ zfs_unmount_snap(const char *name, void *arg)
int err;
if ((err = vn_vfswlock(vfsp->vfs_vnodecovered)) != 0) {
- VFS_RELE(vfsp);
+ mntput(vfsp);
return (err);
}
- VFS_RELE(vfsp);
+ mntput(vfsp);
if ((err = dounmount(vfsp, flag, kcred)) != 0)
return (err);
}
-#endif /* HAVE_ZPL */
+#endif /* HAVE_SNAPSHOT */
return (0);
}
@@ -3215,10 +3187,9 @@ zfs_ioc_destroy(zfs_cmd_t *zc)
static int
zfs_ioc_rollback(zfs_cmd_t *zc)
{
-#ifdef HAVE_ZPL
dsl_dataset_t *ds, *clone;
int error;
- zfsvfs_t *zfsvfs;
+ zfs_sb_t *zsb;
char *clone_name;
error = dsl_dataset_hold(zc->zc_name, FTAG, &ds);
@@ -3252,8 +3223,8 @@ zfs_ioc_rollback(zfs_cmd_t *zc)
/*
* Do clone swap.
*/
- if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) {
- error = zfs_suspend_fs(zfsvfs);
+ if (get_zfs_sb(zc->zc_name, &zsb) == 0) {
+ error = zfs_suspend_fs(zsb);
if (error == 0) {
int resume_err;
@@ -3265,10 +3236,10 @@ zfs_ioc_rollback(zfs_cmd_t *zc)
} else {
error = EBUSY;
}
- resume_err = zfs_resume_fs(zfsvfs, zc->zc_name);
+ resume_err = zfs_resume_fs(zsb, zc->zc_name);
error = error ? error : resume_err;
}
- VFS_RELE(zfsvfs->z_vfs);
+ mntput(zsb->z_vfs);
} else {
if (dsl_dataset_tryown(ds, B_FALSE, FTAG)) {
error = dsl_dataset_clone_swap(clone, ds, B_TRUE);
@@ -3289,9 +3260,6 @@ out:
if (ds)
dsl_dataset_rele(ds, FTAG);
return (error);
-#else
- return (ENOTSUP);
-#endif /* HAVE_ZPL */
}
/*
@@ -3741,29 +3709,25 @@ zfs_ioc_recv(zfs_cmd_t *zc)
&zc->zc_action_handle);
if (error == 0) {
-#ifdef HAVE_ZPL
- zfsvfs_t *zfsvfs = NULL;
+ zfs_sb_t *zsb = NULL;
- if (getzfsvfs(tofs, &zfsvfs) == 0) {
+ if (get_zfs_sb(tofs, &zsb) == 0) {
/* online recv */
int end_err;
- error = zfs_suspend_fs(zfsvfs);
+ error = zfs_suspend_fs(zsb);
/*
* If the suspend fails, then the recv_end will
* likely also fail, and clean up after itself.
*/
end_err = dmu_recv_end(&drc);
if (error == 0)
- error = zfs_resume_fs(zfsvfs, tofs);
+ error = zfs_resume_fs(zsb, tofs);
error = error ? error : end_err;
- VFS_RELE(zfsvfs->z_vfs);
+ mntput(zsb->z_vfs);
} else {
error = dmu_recv_end(&drc);
}
-#else
- error = dmu_recv_end(&drc);
-#endif /* HAVE_ZPL */
}
zc->zc_cookie = off - fp->f_offset;
@@ -4087,25 +4051,21 @@ zfs_ioc_promote(zfs_cmd_t *zc)
static int
zfs_ioc_userspace_one(zfs_cmd_t *zc)
{
-#ifdef HAVE_ZPL
- zfsvfs_t *zfsvfs;
+ zfs_sb_t *zsb;
int error;
if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
return (EINVAL);
- error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
+ error = zfs_sb_hold(zc->zc_name, FTAG, &zsb, B_FALSE);
if (error)
return (error);
- error = zfs_userspace_one(zfsvfs,
+ error = zfs_userspace_one(zsb,
zc->zc_objset_type, zc->zc_value, zc->zc_guid, &zc->zc_cookie);
- zfsvfs_rele(zfsvfs, FTAG);
+ zfs_sb_rele(zsb, FTAG);
return (error);
-#else
- return (ENOTSUP);
-#endif /* HAVE_ZPL */
}
/*
@@ -4122,20 +4082,21 @@ zfs_ioc_userspace_one(zfs_cmd_t *zc)
static int
zfs_ioc_userspace_many(zfs_cmd_t *zc)
{
-#ifdef HAVE_ZPL
- zfsvfs_t *zfsvfs;
+ zfs_sb_t *zsb;
int bufsize = zc->zc_nvlist_dst_size;
+ int error;
+ void *buf;
if (bufsize <= 0)
return (ENOMEM);
- int error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
+ error = zfs_sb_hold(zc->zc_name, FTAG, &zsb, B_FALSE);
if (error)
return (error);
- void *buf = kmem_alloc(bufsize, KM_SLEEP);
+ buf = kmem_alloc(bufsize, KM_SLEEP);
- error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie,
+ error = zfs_userspace_many(zsb, zc->zc_objset_type, &zc->zc_cookie,
buf, &zc->zc_nvlist_dst_size);
if (error == 0) {
@@ -4144,12 +4105,9 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc)
zc->zc_nvlist_dst_size);
}
kmem_free(buf, bufsize);
- zfsvfs_rele(zfsvfs, FTAG);
+ zfs_sb_rele(zsb, FTAG);
return (error);
-#else
- return (ENOTSUP);
-#endif /* HAVE_ZPL */
}
/*
@@ -4162,25 +4120,24 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc)
static int
zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
{
-#ifdef HAVE_ZPL
objset_t *os;
int error = 0;
- zfsvfs_t *zfsvfs;
+ zfs_sb_t *zsb;
- if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) {
- if (!dmu_objset_userused_enabled(zfsvfs->z_os)) {
+ if (get_zfs_sb(zc->zc_name, &zsb) == 0) {
+ if (!dmu_objset_userused_enabled(zsb->z_os)) {
/*
* If userused is not enabled, it may be because the
* objset needs to be closed & reopened (to grow the
* objset_phys_t). Suspend/resume the fs will do that.
*/
- error = zfs_suspend_fs(zfsvfs);
+ error = zfs_suspend_fs(zsb);
if (error == 0)
- error = zfs_resume_fs(zfsvfs, zc->zc_name);
+ error = zfs_resume_fs(zsb, zc->zc_name);
}
if (error == 0)
- error = dmu_objset_userspace_upgrade(zfsvfs->z_os);
- VFS_RELE(zfsvfs->z_vfs);
+ error = dmu_objset_userspace_upgrade(zsb->z_os);
+ mntput(zsb->z_vfs);
} else {
/* XXX kind of reading contents without owning */
error = dmu_objset_hold(zc->zc_name, FTAG, &os);
@@ -4192,9 +4149,6 @@ zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
}
return (error);
-#else
- return (ENOTSUP);
-#endif /* HAVE_ZPL */
}
/*
@@ -4456,10 +4410,10 @@ zfs_smb_acl_purge(znode_t *dzp)
{
zap_cursor_t zc;
zap_attribute_t zap;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
int error;
- for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
+ for (zap_cursor_init(&zc, zsb->z_os, dzp->z_id);
(error = zap_cursor_retrieve(&zc, &zap)) == 0;
zap_cursor_advance(&zc)) {
if ((error = VOP_REMOVE(ZTOV(dzp), zap.za_name, kcred,
@@ -4479,7 +4433,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
znode_t *dzp;
vnode_t *resourcevp = NULL;
znode_t *sharedir;
- zfsvfs_t *zfsvfs;
+ zfs_sb_t *zsb;
nvlist_t *nvlist;
char *src, *target;
vattr_t vattr;
@@ -4500,17 +4454,17 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
}
dzp = VTOZ(vp);
- zfsvfs = dzp->z_zfsvfs;
- ZFS_ENTER(zfsvfs);
+ zsb = ZTOZSB(dzp);
+ ZFS_ENTER(zsb);
/*
* Create share dir if its missing.
*/
- mutex_enter(&zfsvfs->z_lock);
- if (zfsvfs->z_shares_dir == 0) {
+ mutex_enter(&zsb->z_lock);
+ if (zsb->z_shares_dir == 0) {
dmu_tx_t *tx;
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, TRUE,
ZFS_SHARES_DIR);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
@@ -4518,29 +4472,28 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
if (error) {
dmu_tx_abort(tx);
} else {
- error = zfs_create_share_dir(zfsvfs, tx);
+ error = zfs_create_share_dir(zsb, tx);
dmu_tx_commit(tx);
}
if (error) {
- mutex_exit(&zfsvfs->z_lock);
+ mutex_exit(&zsb->z_lock);
VN_RELE(vp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
}
- mutex_exit(&zfsvfs->z_lock);
+ mutex_exit(&zsb->z_lock);
- ASSERT(zfsvfs->z_shares_dir);
- if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &sharedir)) != 0) {
+ ASSERT(zsb->z_shares_dir);
+ if ((error = zfs_zget(zsb, zsb->z_shares_dir, &sharedir)) != 0) {
VN_RELE(vp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
switch (zc->zc_cookie) {
case ZFS_SMB_ACL_ADD:
vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
- vattr.va_type = VREG;
vattr.va_mode = S_IFREG|0777;
vattr.va_uid = 0;
vattr.va_gid = 0;
@@ -4565,7 +4518,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
if ((error = get_nvlist(zc->zc_nvlist_src,
zc->zc_nvlist_src_size, zc->zc_iflags, &nvlist)) != 0) {
VN_RELE(vp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
if (nvlist_lookup_string(nvlist, ZFS_SMB_ACL_SRC, &src) ||
@@ -4573,7 +4526,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
&target)) {
VN_RELE(vp);
VN_RELE(ZTOV(sharedir));
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
nvlist_free(nvlist);
return (error);
}
@@ -4594,7 +4547,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
VN_RELE(vp);
VN_RELE(ZTOV(sharedir));
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
#else
diff --git a/module/zfs/zfs_log.c b/module/zfs/zfs_log.c
index 59a6451c7..945b734ce 100644
--- a/module/zfs/zfs_log.c
+++ b/module/zfs/zfs_log.c
@@ -22,7 +22,6 @@
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
-#ifdef HAVE_ZPL
#include <sys/types.h>
#include <sys/param.h>
@@ -411,9 +410,9 @@ zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
lr->lr_uid = zp->z_uid;
lr->lr_gid = zp->z_gid;
lr->lr_mode = zp->z_mode;
- (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zp->z_zfsvfs), &lr->lr_gen,
+ (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
sizeof (uint64_t));
- (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs),
+ (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
lr->lr_crtime, sizeof (uint64_t) * 2);
bcopy(name, (char *)(lr + 1), namesize);
bcopy(link, (char *)(lr + 1) + namesize, linksize);
@@ -496,7 +495,7 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
itx = zil_itx_create(txtype, sizeof (*lr) +
(write_state == WR_COPIED ? len : 0));
lr = (lr_write_t *)&itx->itx_lr;
- if (write_state == WR_COPIED && dmu_read(zp->z_zfsvfs->z_os,
+ if (write_state == WR_COPIED && dmu_read(ZTOZSB(zp)->z_os,
zp->z_id, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(txtype, sizeof (*lr));
@@ -513,7 +512,7 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
- itx->itx_private = zp->z_zfsvfs;
+ itx->itx_private = ZTOZSB(zp);
if (!(ioflag & (FSYNC | FDSYNC)) && (zp->z_sync_cnt == 0) &&
(fsync_cnt == 0))
@@ -629,7 +628,7 @@ zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
if (zil_replaying(zilog, tx) || zp->z_unlinked)
return;
- txtype = (zp->z_zfsvfs->z_version < ZPL_VERSION_FUID) ?
+ txtype = (ZTOZSB(zp)->z_version < ZPL_VERSION_FUID) ?
TX_ACL_V0 : TX_ACL;
if (txtype == TX_ACL)
@@ -667,14 +666,14 @@ zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
+#ifdef HAVE_XVATTR
if (fuidp) {
start = zfs_log_fuid_ids(fuidp, start);
(void) zfs_log_fuid_domains(fuidp, start);
}
+#endif /* HAVE_XVATTR */
}
itx->itx_sync = (zp->z_sync_cnt != 0);
zil_itx_assign(zilog, itx, tx);
}
-
-#endif /* HAVE_ZPL */
diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c
index 4e3c176a3..6709ce80b 100644
--- a/module/zfs/zfs_rlock.c
+++ b/module/zfs/zfs_rlock.c
@@ -134,7 +134,7 @@ zfs_range_lock_writer(znode_t *zp, rl_t *new)
*/
end_size = MAX(zp->z_size, new->r_off + len);
if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
- zp->z_blksz < zp->z_zfsvfs->z_max_blksz)) {
+ zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
new->r_off = 0;
new->r_len = UINT64_MAX;
}
diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c
index 68bce0a6e..ed696490f 100644
--- a/module/zfs/zfs_sa.c
+++ b/module/zfs/zfs_sa.c
@@ -81,7 +81,7 @@ zfs_sa_readlink(znode_t *zp, uio_t *uio)
MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio);
} else {
dmu_buf_t *dbp;
- if ((error = dmu_buf_hold(zp->z_zfsvfs->z_os, zp->z_id,
+ if ((error = dmu_buf_hold(ZTOZSB(zp)->z_os, zp->z_id,
0, FTAG, &dbp, DMU_READ_NO_PREFETCH)) == 0) {
error = uiomove(dbp->db_data,
MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio);
@@ -107,7 +107,7 @@ zfs_sa_symlink(znode_t *zp, char *link, int len, dmu_tx_t *tx)
dmu_buf_t *dbp;
zfs_grow_blocksize(zp, len, tx);
- VERIFY(0 == dmu_buf_hold(zp->z_zfsvfs->z_os,
+ VERIFY(0 == dmu_buf_hold(ZTOZSB(zp)->z_os,
zp->z_id, 0, FTAG, &dbp, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(dbp, tx);
@@ -122,13 +122,13 @@ zfs_sa_symlink(znode_t *zp, char *link, int len, dmu_tx_t *tx)
void
zfs_sa_get_scanstamp(znode_t *zp, xvattr_t *xvap)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
xoptattr_t *xoap;
ASSERT(MUTEX_HELD(&zp->z_lock));
VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
if (zp->z_is_sa) {
- if (sa_lookup(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
+ if (sa_lookup(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zsb),
&xoap->xoa_av_scanstamp,
sizeof (xoap->xoa_av_scanstamp)) != 0)
return;
@@ -156,13 +156,13 @@ zfs_sa_get_scanstamp(znode_t *zp, xvattr_t *xvap)
void
zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
xoptattr_t *xoap;
ASSERT(MUTEX_HELD(&zp->z_lock));
VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
if (zp->z_is_sa)
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
+ VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zsb),
&xoap->xoa_av_scanstamp,
sizeof (xoap->xoa_av_scanstamp), tx));
else {
@@ -179,7 +179,7 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp));
zp->z_pflags |= ZFS_BONUS_SCANSTAMP;
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
+ VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zsb),
&zp->z_pflags, sizeof (uint64_t), tx));
}
}
@@ -198,7 +198,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
{
dmu_buf_t *db = sa_get_db(hdl);
znode_t *zp = sa_get_userdata(hdl);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
int count = 0;
sa_bulk_attr_t *bulk, *sa_attrs;
zfs_acl_locator_cb_t locate = { 0 };
@@ -216,7 +216,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
* and ready the ACL would require special "locked"
* interfaces that would be messy
*/
- if (zp->z_acl_cached == NULL || ZTOV(zp)->v_type == VLNK)
+ if (zp->z_acl_cached == NULL || S_ISLNK(ZTOI(zp)->i_mode))
return;
/*
@@ -237,16 +237,16 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
/* First do a bulk query of the attributes that aren't cached */
bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 20, KM_SLEEP);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zsb), NULL, &crtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL, &parent, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zsb), NULL, &xattr, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zsb), NULL, &rdev, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &uid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &gid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zsb), NULL,
&znode_acl, 88);
if (sa_bulk_lookup_locked(hdl, bulk, count) != 0) {
@@ -260,42 +260,42 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
*/
count = 0;
sa_attrs = kmem_zalloc(sizeof(sa_bulk_attr_t) * 20, KM_SLEEP);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zsb), NULL,
&zp->z_size, 8);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zsb),
NULL, &zp->z_gen, 8);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zsb), NULL, &uid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zsb), NULL, &gid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zsb),
NULL, &parent, 8);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, 8);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zsb), NULL,
zp->z_atime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zsb), NULL,
&mtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zsb), NULL,
&ctime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zsb), NULL,
&crtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zsb), NULL,
&zp->z_links, 8);
- if (zp->z_vnode->v_type == VBLK || zp->z_vnode->v_type == VCHR)
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
+ if (S_ISBLK(ZTOI(zp)->i_mode) || S_ISCHR(ZTOI(zp)->i_mode))
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zsb), NULL,
&rdev, 8);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zsb), NULL,
&zp->z_acl_cached->z_acl_count, 8);
if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
zfs_acl_xform(zp, zp->z_acl_cached, CRED());
locate.cb_aclp = zp->z_acl_cached;
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zsb),
zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes);
if (xattr)
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zsb),
NULL, &xattr, 8);
#ifdef HAVE_SCANSTAMP
@@ -304,7 +304,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
scanstamp, AV_SCANSTAMP_SZ);
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zsb),
NULL, scanstamp, AV_SCANSTAMP_SZ);
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
}
@@ -314,7 +314,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
count, tx) == 0);
if (znode_acl.z_acl_extern_obj)
- VERIFY(0 == dmu_object_free(zfsvfs->z_os,
+ VERIFY(0 == dmu_object_free(zsb->z_os,
znode_acl.z_acl_extern_obj, tx));
zp->z_is_sa = B_TRUE;
@@ -328,7 +328,7 @@ done:
void
zfs_sa_upgrade_txholds(dmu_tx_t *tx, znode_t *zp)
{
- if (!zp->z_zfsvfs->z_use_sa || zp->z_is_sa)
+ if (!ZTOZSB(zp)->z_use_sa || zp->z_is_sa)
return;
diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c
index 7c980b118..1763d171a 100644
--- a/module/zfs/zfs_vfsops.c
+++ b/module/zfs/zfs_vfsops.c
@@ -38,6 +38,7 @@
#include <sys/cmn_err.h>
#include "fs/fs_subr.h"
#include <sys/zfs_znode.h>
+#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
@@ -62,13 +63,13 @@
#include <sys/dmu_objset.h>
#include <sys/spa_boot.h>
#include <sys/sa.h>
+#include <sys/zpl.h>
#include "zfs_comutil.h"
-#ifdef HAVE_ZPL
/*ARGSUSED*/
int
-zfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
+zfs_sync(zfs_sb_t *zsb, short flag, cred_t *cr)
{
/*
* Data integrity is job one. We don't want a compromised kernel
@@ -77,15 +78,14 @@ zfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
if (unlikely(oops_in_progress))
return (0);
- if (vfsp != NULL) {
+ if (zsb != NULL) {
/*
* Sync a specific filesystem.
*/
- zfsvfs_t *zfsvfs = vfsp->vfs_data;
dsl_pool_t *dp;
- ZFS_ENTER(zfsvfs);
- dp = dmu_objset_pool(zfsvfs->z_os);
+ ZFS_ENTER(zsb);
+ dp = dmu_objset_pool(zsb->z_os);
#ifdef HAVE_SHUTDOWN
/*
@@ -96,15 +96,15 @@ zfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
* notifiers: {un}register_reboot_notifier().
*/
if (sys_shutdown && spa_suspended(dp->dp_spa)) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
#endif /* HAVE_SHUTDOWN */
- if (zfsvfs->z_log != NULL)
- zil_commit(zfsvfs->z_log, 0);
+ if (zsb->z_log != NULL)
+ zil_commit(zsb->z_log, 0);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
@@ -121,113 +121,106 @@ EXPORT_SYMBOL(zfs_sync);
static void
atime_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
+ zfs_sb_t *zsb = arg;
+ struct super_block *sb = zsb->z_sb;
+ struct vfsmount *vfs = zsb->z_vfs;
if (newval == TRUE) {
- zfsvfs->z_atime = TRUE;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0);
+ vfs->mnt_flags &= ~MNT_NOATIME;
+ sb->s_flags &= ~MS_NOATIME;
+ zsb->z_atime = TRUE;
} else {
- zfsvfs->z_atime = FALSE;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0);
+ vfs->mnt_flags |= MNT_NOATIME;
+ sb->s_flags |= MS_NOATIME;
+ zsb->z_atime = FALSE;
}
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
+ zfs_sb_t *zsb = arg;
if (newval == TRUE) {
- /* XXX locking on vfs_flag? */
- zfsvfs->z_vfs->vfs_flag |= VFS_XATTR;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_XATTR, NULL, 0);
+ zsb->z_flags |= ZSB_XATTR_USER;
} else {
- /* XXX locking on vfs_flag? */
- zfsvfs->z_vfs->vfs_flag &= ~VFS_XATTR;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_XATTR);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR, NULL, 0);
+ zsb->z_flags &= ~ZSB_XATTR_USER;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
+ zfs_sb_t *zsb = arg;
if (newval < SPA_MINBLOCKSIZE ||
newval > SPA_MAXBLOCKSIZE || !ISP2(newval))
newval = SPA_MAXBLOCKSIZE;
- zfsvfs->z_max_blksz = newval;
- zfsvfs->z_vfs->vfs_bsize = newval;
+ zsb->z_max_blksz = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
+ zfs_sb_t *zsb = arg;
+ struct super_block *sb = zsb->z_sb;
+ struct vfsmount *vfs = zsb->z_vfs;
if (newval) {
- /* XXX locking on vfs_flag? */
- zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0);
+ vfs->mnt_flags |= MNT_READONLY;
+ sb->s_flags |= MS_RDONLY;
} else {
- /* XXX locking on vfs_flag? */
- zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0);
+ vfs->mnt_flags &= ~MNT_READONLY;
+ sb->s_flags &= ~MS_RDONLY;
}
}
static void
devices_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
+ zfs_sb_t *zsb = arg;
+ struct super_block *sb = zsb->z_sb;
+ struct vfsmount *vfs = zsb->z_vfs;
if (newval == FALSE) {
- zfsvfs->z_vfs->vfs_flag |= VFS_NODEVICES;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES, NULL, 0);
+ vfs->mnt_flags |= MNT_NODEV;
+ sb->s_flags |= MS_NODEV;
} else {
- zfsvfs->z_vfs->vfs_flag &= ~VFS_NODEVICES;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES, NULL, 0);
+ vfs->mnt_flags &= ~MNT_NODEV;
+ sb->s_flags &= ~MS_NODEV;
}
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
+ zfs_sb_t *zsb = arg;
+ struct super_block *sb = zsb->z_sb;
+ struct vfsmount *vfs = zsb->z_vfs;
if (newval == FALSE) {
- zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0);
+ vfs->mnt_flags |= MNT_NOSUID;
+ sb->s_flags |= MS_NOSUID;
} else {
- zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0);
+ vfs->mnt_flags &= ~MNT_NOSUID;
+ sb->s_flags &= ~MS_NOSUID;
}
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
+ zfs_sb_t *zsb = arg;
+ struct super_block *sb = zsb->z_sb;
+ struct vfsmount *vfs = zsb->z_vfs;
if (newval == FALSE) {
- zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0);
+ vfs->mnt_flags |= MNT_NOEXEC;
+ sb->s_flags |= MS_NOEXEC;
} else {
- zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC;
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0);
+ vfs->mnt_flags &= ~MNT_NOEXEC;
+ sb->s_flags &= ~MS_NOEXEC;
}
}
@@ -242,138 +235,89 @@ exec_changed_cb(void *arg, uint64_t newval)
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
- if (newval == FALSE) {
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND, NULL, 0);
+ zfs_sb_t *zsb = arg;
+ struct super_block *sb = zsb->z_sb;
+
+ if (newval == TRUE) {
+ sb->s_flags |= MS_MANDLOCK;
} else {
- vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND);
- vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND, NULL, 0);
+ sb->s_flags &= ~MS_MANDLOCK;
}
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
-
- zfsvfs->z_show_ctldir = newval;
+ ((zfs_sb_t *)arg)->z_show_ctldir = newval;
}
static void
vscan_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
-
- zfsvfs->z_vscan = newval;
+ ((zfs_sb_t *)arg)->z_vscan = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
- zfsvfs_t *zfsvfs = arg;
-
- zfsvfs->z_acl_inherit = newval;
+ ((zfs_sb_t *)arg)->z_acl_inherit = newval;
}
int
-zfs_register_callbacks(vfs_t *vfsp)
+zfs_register_callbacks(zfs_sb_t *zsb)
{
+ struct vfsmount *vfsp = zsb->z_vfs;
struct dsl_dataset *ds = NULL;
- objset_t *os = NULL;
- zfsvfs_t *zfsvfs = NULL;
+ objset_t *os = zsb->z_os;
uint64_t nbmand;
- int readonly, do_readonly = B_FALSE;
- int setuid, do_setuid = B_FALSE;
- int exec, do_exec = B_FALSE;
- int devices, do_devices = B_FALSE;
- int xattr, do_xattr = B_FALSE;
- int atime, do_atime = B_FALSE;
+ boolean_t readonly = B_FALSE;
+ boolean_t setuid = B_TRUE;
+ boolean_t exec = B_TRUE;
+ boolean_t devices = B_TRUE;
+ boolean_t xattr = B_TRUE;
+ boolean_t atime = B_TRUE;
+ char osname[MAXNAMELEN];
int error = 0;
- ASSERT(vfsp);
- zfsvfs = vfsp->vfs_data;
- ASSERT(zfsvfs);
- os = zfsvfs->z_os;
-
/*
- * The act of registering our callbacks will destroy any mount
- * options we may have. In order to enable temporary overrides
- * of mount options, we stash away the current values and
- * restore them after we register the callbacks.
+ * While Linux allows multiple vfs mounts per super block we have
+ * limited it artificially to one in zfs_fill_super. Thus it is
+ * safe for us to modify the vfs mount fails through the callbacks.
*/
- if (vfs_optionisset(vfsp, MNTOPT_RO, NULL) ||
- !spa_writeable(dmu_objset_spa(os))) {
+ if ((vfsp->mnt_flags & MNT_READONLY) ||
+ !spa_writeable(dmu_objset_spa(os)))
readonly = B_TRUE;
- do_readonly = B_TRUE;
- } else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
- readonly = B_FALSE;
- do_readonly = B_TRUE;
- }
- if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
+
+ if (vfsp->mnt_flags & MNT_NOSUID) {
devices = B_FALSE;
setuid = B_FALSE;
- do_devices = B_TRUE;
- do_setuid = B_TRUE;
} else {
- if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) {
+ if (vfsp->mnt_flags & MNT_NODEV)
devices = B_FALSE;
- do_devices = B_TRUE;
- } else if (vfs_optionisset(vfsp, MNTOPT_DEVICES, NULL)) {
- devices = B_TRUE;
- do_devices = B_TRUE;
- }
-
- if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
- setuid = B_FALSE;
- do_setuid = B_TRUE;
- } else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) {
- setuid = B_TRUE;
- do_setuid = B_TRUE;
- }
}
- if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) {
+
+ if (vfsp->mnt_flags & MNT_NOEXEC)
exec = B_FALSE;
- do_exec = B_TRUE;
- } else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) {
- exec = B_TRUE;
- do_exec = B_TRUE;
- }
- if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
- xattr = B_FALSE;
- do_xattr = B_TRUE;
- } else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) {
- xattr = B_TRUE;
- do_xattr = B_TRUE;
- }
- if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL)) {
+
+ if (vfsp->mnt_flags & MNT_NOATIME)
atime = B_FALSE;
- do_atime = B_TRUE;
- } else if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL)) {
- atime = B_TRUE;
- do_atime = B_TRUE;
- }
/*
- * nbmand is a special property. It can only be changed at
- * mount time.
+ * nbmand is a special property which may only be changed at
+ * mount time. Unfortunately, Linux does not have a VFS mount
+ * flag instead this is a super block flag. So setting this
+ * option at mount time will have to wait until we can parse
+ * the mount option string. For now we rely on the nbmand
+ * value stored with the object set. Additional mount option
+ * string to be handled:
*
- * This is weird, but it is documented to only be changeable
- * at mount time.
+ * case: sensitive|insensitive|mixed
+ * zerocopy: on|off
*/
- if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) {
- nbmand = B_FALSE;
- } else if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) {
- nbmand = B_TRUE;
- } else {
- char osname[MAXNAMELEN];
- dmu_objset_name(os, osname);
- if ((error = dsl_prop_get_integer(osname, "nbmand", &nbmand,
- NULL))) {
- return (error);
- }
- }
+ dmu_objset_name(os, osname);
+ if ((error = dsl_prop_get_integer(osname, "nbmand", &nbmand, NULL)))
+ return (error);
/*
* Register property callbacks.
@@ -383,45 +327,39 @@ zfs_register_callbacks(vfs_t *vfsp)
* overboard...
*/
ds = dmu_objset_ds(os);
- error = dsl_prop_register(ds, "atime", atime_changed_cb, zfsvfs);
+ error = dsl_prop_register(ds,
+ "atime", atime_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
- "xattr", xattr_changed_cb, zfsvfs);
+ "xattr", xattr_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
- "recordsize", blksz_changed_cb, zfsvfs);
+ "recordsize", blksz_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
- "readonly", readonly_changed_cb, zfsvfs);
+ "readonly", readonly_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
- "devices", devices_changed_cb, zfsvfs);
+ "devices", devices_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
- "setuid", setuid_changed_cb, zfsvfs);
+ "setuid", setuid_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
- "exec", exec_changed_cb, zfsvfs);
+ "exec", exec_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
- "snapdir", snapdir_changed_cb, zfsvfs);
+ "snapdir", snapdir_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
- "aclinherit", acl_inherit_changed_cb, zfsvfs);
+ "aclinherit", acl_inherit_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
- "vscan", vscan_changed_cb, zfsvfs);
+ "vscan", vscan_changed_cb, zsb);
if (error)
goto unregister;
/*
- * Invoke our callbacks to restore temporary mount options.
+ * Invoke our callbacks to set required flags.
*/
- if (do_readonly)
- readonly_changed_cb(zfsvfs, readonly);
- if (do_setuid)
- setuid_changed_cb(zfsvfs, setuid);
- if (do_exec)
- exec_changed_cb(zfsvfs, exec);
- if (do_devices)
- devices_changed_cb(zfsvfs, devices);
- if (do_xattr)
- xattr_changed_cb(zfsvfs, xattr);
- if (do_atime)
- atime_changed_cb(zfsvfs, atime);
-
- nbmand_changed_cb(zfsvfs, nbmand);
+ readonly_changed_cb(zsb, readonly);
+ setuid_changed_cb(zsb, setuid);
+ exec_changed_cb(zsb, exec);
+ devices_changed_cb(zsb, devices);
+ xattr_changed_cb(zsb, xattr);
+ atime_changed_cb(zsb, atime);
+ nbmand_changed_cb(zsb, nbmand);
return (0);
@@ -431,22 +369,21 @@ unregister:
* registered, but this is OK; it will simply return ENOMSG,
* which we will ignore.
*/
- (void) dsl_prop_unregister(ds, "atime", atime_changed_cb, zfsvfs);
- (void) dsl_prop_unregister(ds, "xattr", xattr_changed_cb, zfsvfs);
- (void) dsl_prop_unregister(ds, "recordsize", blksz_changed_cb, zfsvfs);
- (void) dsl_prop_unregister(ds, "readonly", readonly_changed_cb, zfsvfs);
- (void) dsl_prop_unregister(ds, "devices", devices_changed_cb, zfsvfs);
- (void) dsl_prop_unregister(ds, "setuid", setuid_changed_cb, zfsvfs);
- (void) dsl_prop_unregister(ds, "exec", exec_changed_cb, zfsvfs);
- (void) dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, zfsvfs);
+ (void) dsl_prop_unregister(ds, "atime", atime_changed_cb, zsb);
+ (void) dsl_prop_unregister(ds, "xattr", xattr_changed_cb, zsb);
+ (void) dsl_prop_unregister(ds, "recordsize", blksz_changed_cb, zsb);
+ (void) dsl_prop_unregister(ds, "readonly", readonly_changed_cb, zsb);
+ (void) dsl_prop_unregister(ds, "devices", devices_changed_cb, zsb);
+ (void) dsl_prop_unregister(ds, "setuid", setuid_changed_cb, zsb);
+ (void) dsl_prop_unregister(ds, "exec", exec_changed_cb, zsb);
+ (void) dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, zsb);
(void) dsl_prop_unregister(ds, "aclinherit", acl_inherit_changed_cb,
- zfsvfs);
- (void) dsl_prop_unregister(ds, "vscan", vscan_changed_cb, zfsvfs);
- return (error);
+ zsb);
+ (void) dsl_prop_unregister(ds, "vscan", vscan_changed_cb, zsb);
+ return (error);
}
EXPORT_SYMBOL(zfs_register_callbacks);
-#endif /* HAVE_ZPL */
static int
zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
@@ -497,9 +434,8 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
return (error);
}
-#ifdef HAVE_ZPL
static void
-fuidstr_to_sid(zfsvfs_t *zfsvfs, const char *fuidstr,
+fuidstr_to_sid(zfs_sb_t *zsb, const char *fuidstr,
char *domainbuf, int buflen, uid_t *ridp)
{
uint64_t fuid;
@@ -507,7 +443,7 @@ fuidstr_to_sid(zfsvfs_t *zfsvfs, const char *fuidstr,
fuid = strtonum(fuidstr, NULL);
- domain = zfs_fuid_find_by_idx(zfsvfs, FUID_INDEX(fuid));
+ domain = zfs_fuid_find_by_idx(zsb, FUID_INDEX(fuid));
if (domain)
(void) strlcpy(domainbuf, domain, buflen);
else
@@ -516,7 +452,7 @@ fuidstr_to_sid(zfsvfs_t *zfsvfs, const char *fuidstr,
}
static uint64_t
-zfs_userquota_prop_to_obj(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type)
+zfs_userquota_prop_to_obj(zfs_sb_t *zsb, zfs_userquota_prop_t type)
{
switch (type) {
case ZFS_PROP_USERUSED:
@@ -524,9 +460,9 @@ zfs_userquota_prop_to_obj(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type)
case ZFS_PROP_GROUPUSED:
return (DMU_GROUPUSED_OBJECT);
case ZFS_PROP_USERQUOTA:
- return (zfsvfs->z_userquota_obj);
+ return (zsb->z_userquota_obj);
case ZFS_PROP_GROUPQUOTA:
- return (zfsvfs->z_groupquota_obj);
+ return (zsb->z_groupquota_obj);
default:
return (ENOTSUP);
}
@@ -534,7 +470,7 @@ zfs_userquota_prop_to_obj(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type)
}
int
-zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
+zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type,
uint64_t *cookiep, void *vbuf, uint64_t *bufsizep)
{
int error;
@@ -543,23 +479,23 @@ zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
zfs_useracct_t *buf = vbuf;
uint64_t obj;
- if (!dmu_objset_userspace_present(zfsvfs->z_os))
+ if (!dmu_objset_userspace_present(zsb->z_os))
return (ENOTSUP);
- obj = zfs_userquota_prop_to_obj(zfsvfs, type);
+ obj = zfs_userquota_prop_to_obj(zsb, type);
if (obj == 0) {
*bufsizep = 0;
return (0);
}
- for (zap_cursor_init_serialized(&zc, zfsvfs->z_os, obj, *cookiep);
+ for (zap_cursor_init_serialized(&zc, zsb->z_os, obj, *cookiep);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) >
*bufsizep)
break;
- fuidstr_to_sid(zfsvfs, za.za_name,
+ fuidstr_to_sid(zsb, za.za_name,
buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid);
buf->zu_space = za.za_first_integer;
@@ -580,14 +516,14 @@ EXPORT_SYMBOL(zfs_userspace_many);
* buf must be big enough (eg, 32 bytes)
*/
static int
-id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid,
+id_to_fuidstr(zfs_sb_t *zsb, const char *domain, uid_t rid,
char *buf, boolean_t addok)
{
uint64_t fuid;
int domainid = 0;
if (domain && domain[0]) {
- domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok);
+ domainid = zfs_fuid_find_by_domain(zsb, domain, NULL, addok);
if (domainid == -1)
return (ENOENT);
}
@@ -597,7 +533,7 @@ id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid,
}
int
-zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
+zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type,
const char *domain, uint64_t rid, uint64_t *valp)
{
char buf[32];
@@ -606,18 +542,18 @@ zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
*valp = 0;
- if (!dmu_objset_userspace_present(zfsvfs->z_os))
+ if (!dmu_objset_userspace_present(zsb->z_os))
return (ENOTSUP);
- obj = zfs_userquota_prop_to_obj(zfsvfs, type);
+ obj = zfs_userquota_prop_to_obj(zsb, type);
if (obj == 0)
return (0);
- err = id_to_fuidstr(zfsvfs, domain, rid, buf, B_FALSE);
+ err = id_to_fuidstr(zsb, domain, rid, buf, B_FALSE);
if (err)
return (err);
- err = zap_lookup(zfsvfs->z_os, obj, buf, 8, 1, valp);
+ err = zap_lookup(zsb->z_os, obj, buf, 8, 1, valp);
if (err == ENOENT)
err = 0;
return (err);
@@ -625,7 +561,7 @@ zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
EXPORT_SYMBOL(zfs_userspace_one);
int
-zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
+zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type,
const char *domain, uint64_t rid, uint64_t quota)
{
char buf[32];
@@ -637,74 +573,74 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA)
return (EINVAL);
- if (zfsvfs->z_version < ZPL_VERSION_USERSPACE)
+ if (zsb->z_version < ZPL_VERSION_USERSPACE)
return (ENOTSUP);
- objp = (type == ZFS_PROP_USERQUOTA) ? &zfsvfs->z_userquota_obj :
- &zfsvfs->z_groupquota_obj;
+ objp = (type == ZFS_PROP_USERQUOTA) ? &zsb->z_userquota_obj :
+ &zsb->z_groupquota_obj;
- err = id_to_fuidstr(zfsvfs, domain, rid, buf, B_TRUE);
+ err = id_to_fuidstr(zsb, domain, rid, buf, B_TRUE);
if (err)
return (err);
- fuid_dirtied = zfsvfs->z_fuid_dirty;
+ fuid_dirtied = zsb->z_fuid_dirty;
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_zap(tx, *objp ? *objp : DMU_NEW_OBJECT, B_TRUE, NULL);
if (*objp == 0) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
zfs_userquota_prop_prefixes[type]);
}
if (fuid_dirtied)
- zfs_fuid_txhold(zfsvfs, tx);
+ zfs_fuid_txhold(zsb, tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err) {
dmu_tx_abort(tx);
return (err);
}
- mutex_enter(&zfsvfs->z_lock);
+ mutex_enter(&zsb->z_lock);
if (*objp == 0) {
- *objp = zap_create(zfsvfs->z_os, DMU_OT_USERGROUP_QUOTA,
+ *objp = zap_create(zsb->z_os, DMU_OT_USERGROUP_QUOTA,
DMU_OT_NONE, 0, tx);
- VERIFY(0 == zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
+ VERIFY(0 == zap_add(zsb->z_os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[type], 8, 1, objp, tx));
}
- mutex_exit(&zfsvfs->z_lock);
+ mutex_exit(&zsb->z_lock);
if (quota == 0) {
- err = zap_remove(zfsvfs->z_os, *objp, buf, tx);
+ err = zap_remove(zsb->z_os, *objp, buf, tx);
if (err == ENOENT)
err = 0;
} else {
- err = zap_update(zfsvfs->z_os, *objp, buf, 8, 1, &quota, tx);
+ err = zap_update(zsb->z_os, *objp, buf, 8, 1, &quota, tx);
}
ASSERT(err == 0);
if (fuid_dirtied)
- zfs_fuid_sync(zfsvfs, tx);
+ zfs_fuid_sync(zsb, tx);
dmu_tx_commit(tx);
return (err);
}
EXPORT_SYMBOL(zfs_set_userquota);
boolean_t
-zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid)
+zfs_fuid_overquota(zfs_sb_t *zsb, boolean_t isgroup, uint64_t fuid)
{
char buf[32];
uint64_t used, quota, usedobj, quotaobj;
int err;
usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
- quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
+ quotaobj = isgroup ? zsb->z_groupquota_obj : zsb->z_userquota_obj;
- if (quotaobj == 0 || zfsvfs->z_replay)
+ if (quotaobj == 0 || zsb->z_replay)
return (B_FALSE);
(void) sprintf(buf, "%llx", (longlong_t)fuid);
- err = zap_lookup(zfsvfs->z_os, quotaobj, buf, 8, 1, &quota);
+ err = zap_lookup(zsb->z_os, quotaobj, buf, 8, 1, &quota);
if (err != 0)
return (B_FALSE);
- err = zap_lookup(zfsvfs->z_os, usedobj, buf, 8, 1, &used);
+ err = zap_lookup(zsb->z_os, usedobj, buf, 8, 1, &used);
if (err != 0)
return (B_FALSE);
return (used >= quota);
@@ -712,40 +648,40 @@ zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid)
EXPORT_SYMBOL(zfs_fuid_overquota);
boolean_t
-zfs_owner_overquota(zfsvfs_t *zfsvfs, znode_t *zp, boolean_t isgroup)
+zfs_owner_overquota(zfs_sb_t *zsb, znode_t *zp, boolean_t isgroup)
{
uint64_t fuid;
uint64_t quotaobj;
- quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
+ quotaobj = isgroup ? zsb->z_groupquota_obj : zsb->z_userquota_obj;
fuid = isgroup ? zp->z_gid : zp->z_uid;
- if (quotaobj == 0 || zfsvfs->z_replay)
+ if (quotaobj == 0 || zsb->z_replay)
return (B_FALSE);
- return (zfs_fuid_overquota(zfsvfs, isgroup, fuid));
+ return (zfs_fuid_overquota(zsb, isgroup, fuid));
}
EXPORT_SYMBOL(zfs_owner_overquota);
int
-zfsvfs_create(const char *osname, zfsvfs_t **zfvp)
+zfs_sb_create(const char *osname, zfs_sb_t **zsbp)
{
objset_t *os;
- zfsvfs_t *zfsvfs;
+ zfs_sb_t *zsb;
uint64_t zval;
int i, error;
uint64_t sa_obj;
- zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
+ zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);
/*
* We claim to always be readonly so we can open snapshots;
* other ZPL code will prevent us from writing to snapshots.
*/
- error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zfsvfs, &os);
+ error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os);
if (error) {
- kmem_free(zfsvfs, sizeof (zfsvfs_t));
+ kmem_free(zsb, sizeof (zfs_sb_t));
return (error);
}
@@ -754,48 +690,48 @@ zfsvfs_create(const char *osname, zfsvfs_t **zfvp)
* Should probably make this a kmem cache, shuffle fields,
* and just bzero up to z_hold_mtx[].
*/
- zfsvfs->z_vfs = NULL;
- zfsvfs->z_parent = zfsvfs;
- zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
- zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
- zfsvfs->z_os = os;
+ zsb->z_vfs = NULL;
+ zsb->z_parent = zsb;
+ zsb->z_max_blksz = SPA_MAXBLOCKSIZE;
+ zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
+ zsb->z_os = os;
- error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
+ error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version);
if (error) {
goto out;
- } else if (zfsvfs->z_version >
+ } else if (zsb->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printk("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
- "this file system.", (u_longlong_t)zfsvfs->z_version,
+ "this file system.", (u_longlong_t)zsb->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
error = ENOTSUP;
goto out;
}
if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
goto out;
- zfsvfs->z_norm = (int)zval;
+ zsb->z_norm = (int)zval;
if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
goto out;
- zfsvfs->z_utf8 = (zval != 0);
+ zsb->z_utf8 = (zval != 0);
if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
goto out;
- zfsvfs->z_case = (uint_t)zval;
+ zsb->z_case = (uint_t)zval;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
- if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
- zfsvfs->z_case == ZFS_CASE_MIXED)
- zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
+ if (zsb->z_case == ZFS_CASE_INSENSITIVE ||
+ zsb->z_case == ZFS_CASE_MIXED)
+ zsb->z_norm |= U8_TEXTPREP_TOUPPER;
- zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
- zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
+ zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
+ zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);
- if (zfsvfs->z_use_sa) {
+ if (zsb->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
@@ -810,83 +746,83 @@ zfsvfs_create(const char *osname, zfsvfs_t **zfvp)
}
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
- &zfsvfs->z_attr_table);
+ &zsb->z_attr_table);
if (error)
goto out;
- if (zfsvfs->z_version >= ZPL_VERSION_SA)
+ if (zsb->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
- &zfsvfs->z_root);
+ &zsb->z_root);
if (error)
goto out;
- ASSERT(zfsvfs->z_root != 0);
+ ASSERT(zsb->z_root != 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
- &zfsvfs->z_unlinkedobj);
+ &zsb->z_unlinkedobj);
if (error)
goto out;
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
- 8, 1, &zfsvfs->z_userquota_obj);
+ 8, 1, &zsb->z_userquota_obj);
if (error && error != ENOENT)
goto out;
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
- 8, 1, &zfsvfs->z_groupquota_obj);
+ 8, 1, &zsb->z_groupquota_obj);
if (error && error != ENOENT)
goto out;
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
- &zfsvfs->z_fuid_obj);
+ &zsb->z_fuid_obj);
if (error && error != ENOENT)
goto out;
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
- &zfsvfs->z_shares_dir);
+ &zsb->z_shares_dir);
if (error && error != ENOENT)
goto out;
- mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
- mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
- list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
+ mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL);
+ list_create(&zsb->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
- rrw_init(&zfsvfs->z_teardown_lock);
- rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
- rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
+ rrw_init(&zsb->z_teardown_lock);
+ rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
+ rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
- mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
- *zfvp = zfsvfs;
+ *zsbp = zsb;
return (0);
out:
- dmu_objset_disown(os, zfsvfs);
- *zfvp = NULL;
- kmem_free(zfsvfs, sizeof (zfsvfs_t));
+ dmu_objset_disown(os, zsb);
+ *zsbp = NULL;
+ kmem_free(zsb, sizeof (zfs_sb_t));
return (error);
}
static int
-zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
+zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting)
{
int error;
- error = zfs_register_callbacks(zfsvfs->z_vfs);
+ error = zfs_register_callbacks(zsb);
if (error)
return (error);
/*
- * Set the objset user_ptr to track its zfsvfs.
+ * Set the objset user_ptr to track its zsb.
*/
- mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
- dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
- mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
+ mutex_enter(&zsb->z_os->os_user_ptr_lock);
+ dmu_objset_set_user(zsb->z_os, zsb);
+ mutex_exit(&zsb->z_os->os_user_ptr_lock);
- zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
+ zsb->z_log = zil_open(zsb->z_os, zfs_get_data);
/*
* If we are not mounting (ie: online recv), then we don't
@@ -900,11 +836,11 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
* During replay we remove the read only flag to
* allow replays to succeed.
*/
- readonly = zfsvfs->z_vfs->vfs_flag & VFS_RDONLY;
+ readonly = zsb->z_vfs->mnt_flags & MNT_READONLY;
if (readonly != 0)
- zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
+ zsb->z_vfs->mnt_flags &= ~MNT_READONLY;
else
- zfs_unlinked_drain(zfsvfs);
+ zfs_unlinked_drain(zsb);
/*
* Parse and replay the intent log.
@@ -933,157 +869,51 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
- if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
+ if (spa_writeable(dmu_objset_spa(zsb->z_os))) {
if (zil_replay_disable) {
- zil_destroy(zfsvfs->z_log, B_FALSE);
+ zil_destroy(zsb->z_log, B_FALSE);
} else {
- zfsvfs->z_replay = B_TRUE;
- zil_replay(zfsvfs->z_os, zfsvfs,
+ zsb->z_replay = B_TRUE;
+ zil_replay(zsb->z_os, zsb,
zfs_replay_vector);
- zfsvfs->z_replay = B_FALSE;
+ zsb->z_replay = B_FALSE;
}
}
- zfsvfs->z_vfs->vfs_flag |= readonly; /* restore readonly bit */
+ zsb->z_vfs->mnt_flags |= readonly; /* restore readonly bit */
}
return (0);
}
void
-zfsvfs_free(zfsvfs_t *zfsvfs)
+zfs_sb_free(zfs_sb_t *zsb)
{
int i;
- extern krwlock_t zfsvfs_lock; /* in zfs_znode.c */
- /*
- * This is a barrier to prevent the filesystem from going away in
- * zfs_znode_move() until we can safely ensure that the filesystem is
- * not unmounted. We consider the filesystem valid before the barrier
- * and invalid after the barrier.
- */
- rw_enter(&zfsvfs_lock, RW_READER);
- rw_exit(&zfsvfs_lock);
-
- zfs_fuid_destroy(zfsvfs);
+ zfs_fuid_destroy(zsb);
- mutex_destroy(&zfsvfs->z_znodes_lock);
- mutex_destroy(&zfsvfs->z_lock);
- list_destroy(&zfsvfs->z_all_znodes);
- rrw_destroy(&zfsvfs->z_teardown_lock);
- rw_destroy(&zfsvfs->z_teardown_inactive_lock);
- rw_destroy(&zfsvfs->z_fuid_lock);
+ mutex_destroy(&zsb->z_znodes_lock);
+ mutex_destroy(&zsb->z_lock);
+ list_destroy(&zsb->z_all_znodes);
+ rrw_destroy(&zsb->z_teardown_lock);
+ rw_destroy(&zsb->z_teardown_inactive_lock);
+ rw_destroy(&zsb->z_fuid_lock);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
- mutex_destroy(&zfsvfs->z_hold_mtx[i]);
- kmem_free(zfsvfs, sizeof (zfsvfs_t));
+ mutex_destroy(&zsb->z_hold_mtx[i]);
+ kmem_free(zsb, sizeof (zfs_sb_t));
}
-#ifdef HAVE_FUID_FEATURES
static void
-zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
+zfs_set_fuid_feature(zfs_sb_t *zsb)
{
- zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
- if (zfsvfs->z_use_fuids && zfsvfs->z_vfs) {
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_XVATTR);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_SYSATTR_VIEWS);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACEMASKONACCESS);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACLONCREATE);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACCESS_FILTER);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_REPARSE);
- }
- zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
+ zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
+ zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);
}
-#endif /* HAVE_FUID_FEATURES */
-
-int
-zfs_domount(vfs_t *vfsp, char *osname)
-{
- uint64_t recordsize, fsid_guid;
- int error = 0;
- zfsvfs_t *zfsvfs;
-
- ASSERT(vfsp);
- ASSERT(osname);
-
- error = zfsvfs_create(osname, &zfsvfs);
- if (error)
- return (error);
- zfsvfs->z_vfs = vfsp;
-
- /* Initialize the generic filesystem structure. */
- vfsp->vfs_bcount = 0;
- vfsp->vfs_data = NULL;
-
- if ((error = dsl_prop_get_integer(osname, "recordsize",
- &recordsize, NULL)))
- goto out;
-
- vfsp->vfs_bsize = recordsize;
- vfsp->vfs_flag |= VFS_NOTRUNC;
- vfsp->vfs_data = zfsvfs;
-
- /*
- * The fsid is 64 bits, composed of an 8-bit fs type, which
- * separates our fsid from any other filesystem types, and a
- * 56-bit objset unique ID. The objset unique ID is unique to
- * all objsets open on this system, provided by unique_create().
- * The 8-bit fs type must be put in the low bits of fsid[1]
- * because that's where other Solaris filesystems put it.
- */
- fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os);
- ASSERT((fsid_guid & ~((1ULL<<56)-1)) == 0);
- vfsp->vfs_fsid.val[0] = fsid_guid;
- vfsp->vfs_fsid.val[1] = ((fsid_guid>>32) << 8);
-
-#ifdef HAVE_FUID_FEATURES
- /*
- * Set features for file system.
- */
- zfs_set_fuid_feature(zfsvfs);
- if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
- vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
- vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
- vfs_set_feature(vfsp, VFSFT_NOCASESENSITIVE);
- } else if (zfsvfs->z_case == ZFS_CASE_MIXED) {
- vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
- vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
- }
- vfs_set_feature(vfsp, VFSFT_ZEROCOPY_SUPPORTED);
-#endif /* HAVE_FUID_FEATURES */
-
- if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
- uint64_t pval;
-
- atime_changed_cb(zfsvfs, B_FALSE);
- readonly_changed_cb(zfsvfs, B_TRUE);
- if ((error = dsl_prop_get_integer(osname,"xattr",&pval,NULL)))
- goto out;
- xattr_changed_cb(zfsvfs, pval);
- zfsvfs->z_issnap = B_TRUE;
- zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
-
- mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
- dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
- mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
- } else {
- error = zfsvfs_setup(zfsvfs, B_TRUE);
- }
-
- if (!zfsvfs->z_issnap)
- zfsctl_create(zfsvfs);
-out:
- if (error) {
- dmu_objset_disown(zfsvfs->z_os, zfsvfs);
- zfsvfs_free(zfsvfs);
- }
-
- return (error);
-}
-EXPORT_SYMBOL(zfs_domount);
void
-zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
+zfs_unregister_callbacks(zfs_sb_t *zsb)
{
- objset_t *os = zfsvfs->z_os;
+ objset_t *os = zsb->z_os;
struct dsl_dataset *ds;
/*
@@ -1092,34 +922,34 @@ zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
if (!dmu_objset_is_snapshot(os)) {
ds = dmu_objset_ds(os);
VERIFY(dsl_prop_unregister(ds, "atime", atime_changed_cb,
- zfsvfs) == 0);
+ zsb) == 0);
VERIFY(dsl_prop_unregister(ds, "xattr", xattr_changed_cb,
- zfsvfs) == 0);
+ zsb) == 0);
VERIFY(dsl_prop_unregister(ds, "recordsize", blksz_changed_cb,
- zfsvfs) == 0);
+ zsb) == 0);
VERIFY(dsl_prop_unregister(ds, "readonly", readonly_changed_cb,
- zfsvfs) == 0);
+ zsb) == 0);
VERIFY(dsl_prop_unregister(ds, "devices", devices_changed_cb,
- zfsvfs) == 0);
+ zsb) == 0);
VERIFY(dsl_prop_unregister(ds, "setuid", setuid_changed_cb,
- zfsvfs) == 0);
+ zsb) == 0);
VERIFY(dsl_prop_unregister(ds, "exec", exec_changed_cb,
- zfsvfs) == 0);
+ zsb) == 0);
VERIFY(dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb,
- zfsvfs) == 0);
+ zsb) == 0);
VERIFY(dsl_prop_unregister(ds, "aclinherit",
- acl_inherit_changed_cb, zfsvfs) == 0);
+ acl_inherit_changed_cb, zsb) == 0);
VERIFY(dsl_prop_unregister(ds, "vscan",
- vscan_changed_cb, zfsvfs) == 0);
+ vscan_changed_cb, zsb) == 0);
}
}
EXPORT_SYMBOL(zfs_unregister_callbacks);
@@ -1155,15 +985,15 @@ zfs_check_global_label(const char *dsname, const char *hexsl)
#endif /* HAVE_MLSLABEL */
int
-zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp)
+zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
{
- zfsvfs_t *zfsvfs = vfsp->vfs_data;
- dev32_t d32;
+ zfs_sb_t *zsb = dentry->d_sb->s_fs_info;
uint64_t refdbytes, availbytes, usedobjs, availobjs;
+ uint32_t bshift;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
- dmu_objset_space(zfsvfs->z_os,
+ dmu_objset_space(zsb->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
/*
@@ -1172,16 +1002,17 @@ zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp)
* and we report our blocksize as the filesystem's maximum blocksize.
*/
statp->f_frsize = 1UL << SPA_MINBLOCKSHIFT;
- statp->f_bsize = zfsvfs->z_max_blksz;
+ statp->f_bsize = zsb->z_max_blksz;
+ bshift = fls(statp->f_bsize) - 1;
/*
- * The following report "total" blocks of various kinds in the
- * file system, but reported in terms of f_frsize - the
- * "fragment" size.
+ * The following report "total" blocks of various kinds in
+ * the file system, but reported in terms of f_bsize - the
+ * "preferred" size.
*/
- statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT;
- statp->f_bfree = availbytes >> SPA_MINBLOCKSHIFT;
+ statp->f_blocks = (refdbytes + availbytes) >> bshift;
+ statp->f_bfree = availbytes >> bshift;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
@@ -1193,63 +1024,54 @@ zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp)
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, statp->f_bfree);
- statp->f_favail = statp->f_ffree; /* no "root reservation" */
statp->f_files = statp->f_ffree + usedobjs;
-
- (void) cmpldev(&d32, vfsp->vfs_dev);
- statp->f_fsid = d32;
+ statp->f_fsid.val[0] = 0; /* XXX: Map up some unique ID */
+ statp->f_fsid.val[1] = 0;
+ statp->f_type = ZFS_SUPER_MAGIC;
+ statp->f_namelen = ZFS_MAXNAMELEN;
/*
- * We're a zfs filesystem.
- */
- (void) strcpy(statp->f_basetype, MNTTYPE_ZFS);
-
- statp->f_flag = vf_to_stf(vfsp->vfs_flag);
-
- statp->f_namemax = ZFS_MAXNAMELEN;
-
- /*
- * We have all of 32 characters to stuff a string here.
+ * We have all of 40 characters to stuff a string here.
* Is there anything useful we could/should provide?
*/
- bzero(statp->f_fstr, sizeof (statp->f_fstr));
+ bzero(statp->f_spare, sizeof (statp->f_spare));
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
EXPORT_SYMBOL(zfs_statvfs);
int
-zfs_root(vfs_t *vfsp, vnode_t **vpp)
+zfs_root(zfs_sb_t *zsb, struct inode **ipp)
{
- zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *rootzp;
int error;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
- error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
+ error = zfs_zget(zsb, zsb->z_root, &rootzp);
if (error == 0)
- *vpp = ZTOV(rootzp);
+ *ipp = ZTOI(rootzp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_root);
/*
- * Teardown the zfsvfs::z_os.
+ * Teardown the zfs_sb_t::z_os.
*
* Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
-static int
-zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
+int
+zfsvfs_teardown(zfs_sb_t *zsb, boolean_t unmounting)
{
znode_t *zp;
- rrw_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
+ rrw_enter(&zsb->z_teardown_lock, RW_WRITER, FTAG);
+#ifdef HAVE_DNLC
if (!unmounting) {
/*
* We purge the parent filesystem's vfsp as the parent
@@ -1257,28 +1079,29 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
* v_vfsp set to the parent's filesystem's vfsp. Note,
* 'z_parent' is self referential for non-snapshots.
*/
- (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
+ (void) dnlc_purge_vfsp(zsb->z_parent->z_vfs, 0);
}
+#endif /* HAVE_DNLC */
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
- if (zfsvfs->z_log) {
- zil_close(zfsvfs->z_log);
- zfsvfs->z_log = NULL;
+ if (zsb->z_log) {
+ zil_close(zsb->z_log);
+ zsb->z_log = NULL;
}
- rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
+ rw_enter(&zsb->z_teardown_inactive_lock, RW_WRITER);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
- if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
- rw_exit(&zfsvfs->z_teardown_inactive_lock);
- rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
+ if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) {
+ rw_exit(&zsb->z_teardown_inactive_lock);
+ rrw_exit(&zsb->z_teardown_lock, FTAG);
return (EIO);
}
@@ -1289,14 +1112,14 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
*
* Release all holds on dbufs.
*/
- mutex_enter(&zfsvfs->z_znodes_lock);
- for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
- zp = list_next(&zfsvfs->z_all_znodes, zp))
+ mutex_enter(&zsb->z_znodes_lock);
+ for (zp = list_head(&zsb->z_all_znodes); zp != NULL;
+ zp = list_next(&zsb->z_all_znodes, zp))
if (zp->z_sa_hdl) {
- ASSERT(ZTOV(zp)->v_count > 0);
+ ASSERT(atomic_read(&ZTOI(zp)->i_count) > 0);
zfs_znode_dmu_fini(zp);
}
- mutex_exit(&zfsvfs->z_znodes_lock);
+ mutex_exit(&zsb->z_znodes_lock);
/*
* If we are unmounting, set the unmounted flag and let new vops
@@ -1304,96 +1127,142 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
* other vops will fail with EIO.
*/
if (unmounting) {
- zfsvfs->z_unmounted = B_TRUE;
- rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
- rw_exit(&zfsvfs->z_teardown_inactive_lock);
+ zsb->z_unmounted = B_TRUE;
+ rrw_exit(&zsb->z_teardown_lock, FTAG);
+ rw_exit(&zsb->z_teardown_inactive_lock);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
- * zfsvfs, so just return as the properties had already been
+ * zsb, so just return as the properties had already been
+ *
* unregistered and cached data had been evicted before.
*/
- if (zfsvfs->z_os == NULL)
+ if (zsb->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
- zfs_unregister_callbacks(zfsvfs);
+ zfs_unregister_callbacks(zsb);
/*
* Evict cached data
*/
- if (dmu_objset_is_dirty_anywhere(zfsvfs->z_os))
- if (!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY))
- txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
- (void) dmu_objset_evict_dbufs(zfsvfs->z_os);
+ if (dmu_objset_is_dirty_anywhere(zsb->z_os))
+ if (!(zsb->z_vfs->mnt_flags & MNT_READONLY))
+ txg_wait_synced(dmu_objset_pool(zsb->z_os), 0);
+ (void) dmu_objset_evict_dbufs(zsb->z_os);
return (0);
}
-/*ARGSUSED*/
int
-zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
+zfs_domount(struct super_block *sb, void *data, int silent)
{
- zfsvfs_t *zfsvfs = vfsp->vfs_data;
- objset_t *os;
- int ret;
-
- ret = secpolicy_fs_unmount(cr, vfsp);
- if (ret) {
- if (dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
- ZFS_DELEG_PERM_MOUNT, cr))
- return (ret);
- }
+ zpl_mount_data_t *zmd = data;
+ const char *osname = zmd->z_osname;
+ zfs_sb_t *zsb;
+ struct inode *root_inode;
+ uint64_t recordsize;
+ int error;
/*
- * We purge the parent filesystem's vfsp as the parent filesystem
- * and all of its snapshots have their vnode's v_vfsp set to the
- * parent's filesystem's vfsp. Note, 'z_parent' is self
- * referential for non-snapshots.
+ * Linux allows multiple vfs mounts per super block. However, the
+ * zfs_sb_t only contains a pointer for a single vfs mount. This
+ * back reference in the long term could be extended to a list of
+ * vfs mounts if a hook were added to the kernel to notify us when
+ * a vfsmount is destroyed. Until then we must limit the number
+ * of mounts per super block to one.
*/
- (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
+ if (atomic_read(&sb->s_active) > 1)
+ return (EBUSY);
- /*
- * Unmount any snapshots mounted under .zfs before unmounting the
- * dataset itself.
- */
- if (zfsvfs->z_ctldir != NULL &&
- (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0) {
- return (ret);
+ error = zfs_sb_create(osname, &zsb);
+ if (error)
+ return (error);
+
+ if ((error = dsl_prop_get_integer(osname, "recordsize",
+ &recordsize, NULL)))
+ goto out;
+
+ zsb->z_sb = sb;
+ zsb->z_vfs = zmd->z_vfs;
+ sb->s_fs_info = zsb;
+ sb->s_magic = ZFS_SUPER_MAGIC;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_time_gran = 1;
+ sb->s_blocksize = recordsize;
+ sb->s_blocksize_bits = ilog2(recordsize);
+
+ /* Set callback operations for the file system. */
+ sb->s_op = &zpl_super_operations;
+ sb->s_xattr = zpl_xattr_handlers;
+#ifdef HAVE_EXPORTS
+ sb->s_export_op = &zpl_export_operations;
+#endif /* HAVE_EXPORTS */
+
+ /* Set features for file system. */
+ zfs_set_fuid_feature(zsb);
+
+ if (dmu_objset_is_snapshot(zsb->z_os)) {
+ uint64_t pval;
+
+ atime_changed_cb(zsb, B_FALSE);
+ readonly_changed_cb(zsb, B_TRUE);
+ if ((error = dsl_prop_get_integer(osname,"xattr",&pval,NULL)))
+ goto out;
+ xattr_changed_cb(zsb, pval);
+ zsb->z_issnap = B_TRUE;
+ zsb->z_os->os_sync = ZFS_SYNC_DISABLED;
+
+ mutex_enter(&zsb->z_os->os_user_ptr_lock);
+ dmu_objset_set_user(zsb->z_os, zsb);
+ mutex_exit(&zsb->z_os->os_user_ptr_lock);
+ } else {
+ error = zfs_sb_setup(zsb, B_TRUE);
+#ifdef HAVE_SNAPSHOT
+ (void) zfs_snap_create(zsb);
+#endif /* HAVE_SNAPSHOT */
}
- if (!(fflag & MS_FORCE)) {
- /*
- * Check the number of active vnodes in the file system.
- * Our count is maintained in the vfs structure, but the
- * number is off by 1 to indicate a hold on the vfs
- * structure itself.
- *
- * The '.zfs' directory maintains a reference of its
- * own, and any active references underneath are
- * reflected in the vnode count.
- */
- if (zfsvfs->z_ctldir == NULL) {
- if (vfsp->vfs_count > 1)
- return (EBUSY);
- } else {
- if (vfsp->vfs_count > 2 ||
- zfsvfs->z_ctldir->v_count > 1)
- return (EBUSY);
- }
+ /* Allocate a root inode for the filesystem. */
+ error = zfs_root(zsb, &root_inode);
+ if (error) {
+ (void) zfs_umount(sb);
+ goto out;
}
- vfsp->vfs_flag |= VFS_UNMOUNTED;
+ /* Allocate a root dentry for the filesystem */
+ sb->s_root = d_alloc_root(root_inode);
+ if (sb->s_root == NULL) {
+ (void) zfs_umount(sb);
+ error = ENOMEM;
+ goto out;
+ }
+out:
+ if (error) {
+ dmu_objset_disown(zsb->z_os, zsb);
+ zfs_sb_free(zsb);
+ }
- VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
- os = zfsvfs->z_os;
+ return (error);
+}
+EXPORT_SYMBOL(zfs_domount);
+
+/*ARGSUSED*/
+int
+zfs_umount(struct super_block *sb)
+{
+ zfs_sb_t *zsb = sb->s_fs_info;
+ objset_t *os;
+
+ VERIFY(zfsvfs_teardown(zsb, B_TRUE) == 0);
+ os = zsb->z_os;
/*
* z_os will be NULL if there was an error in
- * attempting to reopen zfsvfs.
+ * attempting to reopen zsb.
*/
if (os != NULL) {
/*
@@ -1406,33 +1275,28 @@ zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
/*
* Finally release the objset
*/
- dmu_objset_disown(os, zfsvfs);
+ dmu_objset_disown(os, zsb);
}
- /*
- * We can now safely destroy the '.zfs' directory node.
- */
- if (zfsvfs->z_ctldir != NULL)
- zfsctl_destroy(zfsvfs);
-
+ zfs_sb_free(zsb);
return (0);
}
EXPORT_SYMBOL(zfs_umount);
int
-zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
+zfs_vget(struct vfsmount *vfsp, struct inode **ipp, fid_t *fidp)
{
- zfsvfs_t *zfsvfs = vfsp->vfs_data;
+ zfs_sb_t *zsb = VTOZSB(vfsp);
znode_t *zp;
uint64_t object = 0;
uint64_t fid_gen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
- int i, err;
+ int i, err;
- *vpp = NULL;
+ *ipp = NULL;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
if (fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
@@ -1445,12 +1309,14 @@ zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
- err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
+#ifdef HAVE_SNAPSHOT
+ err = zfsctl_lookup_objset(vfsp, objsetid, &zsb);
if (err)
return (EINVAL);
- ZFS_ENTER(zfsvfs);
+#endif /* HAVE_SNAPSHOT */
+ ZFS_ENTER(zsb);
}
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
@@ -1462,103 +1328,104 @@ zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
+#ifdef HAVE_SNAPSHOT
/* A zero fid_gen means we are in the .zfs control directories */
if (fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
- *vpp = zfsvfs->z_ctldir;
- ASSERT(*vpp != NULL);
+ *ipp = zsb->z_ctldir;
+ ASSERT(*ipp != NULL);
if (object == ZFSCTL_INO_SNAPDIR) {
- VERIFY(zfsctl_root_lookup(*vpp, "snapshot", vpp, NULL,
+ VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, NULL,
0, NULL, NULL, NULL, NULL, NULL) == 0);
} else {
- VN_HOLD(*vpp);
+ igrab(*ipp);
}
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
+#endif /* HAVE_SNAPSHOT */
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%u mask %llx]\n", object, fid_gen, gen_mask);
- if ((err = zfs_zget(zfsvfs, object, &zp))) {
- ZFS_EXIT(zfsvfs);
+ if ((err = zfs_zget(zsb, object, &zp))) {
+ ZFS_EXIT(zsb);
return (err);
}
- (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
+ (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
- VN_RELE(ZTOV(zp));
- ZFS_EXIT(zfsvfs);
+ iput(ZTOI(zp));
+ ZFS_EXIT(zsb);
return (EINVAL);
}
- *vpp = ZTOV(zp);
- if (*vpp)
- zfs_inode_update(VTOZ(*vpp));
+ *ipp = ZTOI(zp);
+ if (*ipp)
+ zfs_inode_update(ITOZ(*ipp));
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
EXPORT_SYMBOL(zfs_vget);
/*
- * Block out VOPs and close zfsvfs_t::z_os
+ * Block out VOPs and close zfs_sb_t::z_os
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held.
*/
int
-zfs_suspend_fs(zfsvfs_t *zfsvfs)
+zfs_suspend_fs(zfs_sb_t *zsb)
{
int error;
- if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
+ if ((error = zfsvfs_teardown(zsb, B_FALSE)) != 0)
return (error);
- dmu_objset_disown(zfsvfs->z_os, zfsvfs);
+ dmu_objset_disown(zsb->z_os, zsb);
return (0);
}
EXPORT_SYMBOL(zfs_suspend_fs);
/*
- * Reopen zfsvfs_t::z_os and release VOPs.
+ * Reopen zfs_sb_t::z_os and release VOPs.
*/
int
-zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname)
+zfs_resume_fs(zfs_sb_t *zsb, const char *osname)
{
int err, err2;
- ASSERT(RRW_WRITE_HELD(&zfsvfs->z_teardown_lock));
- ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
+ ASSERT(RRW_WRITE_HELD(&zsb->z_teardown_lock));
+ ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock));
- err = dmu_objset_own(osname, DMU_OST_ZFS, B_FALSE, zfsvfs,
- &zfsvfs->z_os);
+ err = dmu_objset_own(osname, DMU_OST_ZFS, B_FALSE, zsb, &zsb->z_os);
if (err) {
- zfsvfs->z_os = NULL;
+ zsb->z_os = NULL;
} else {
znode_t *zp;
uint64_t sa_obj = 0;
- err2 = zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
+ err2 = zap_lookup(zsb->z_os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj);
- if ((err || err2) && zfsvfs->z_version >= ZPL_VERSION_SA)
+ if ((err || err2) && zsb->z_version >= ZPL_VERSION_SA)
goto bail;
- if ((err = sa_setup(zfsvfs->z_os, sa_obj,
- zfs_attr_table, ZPL_END, &zfsvfs->z_attr_table)) != 0)
+ if ((err = sa_setup(zsb->z_os, sa_obj,
+ zfs_attr_table, ZPL_END, &zsb->z_attr_table)) != 0)
goto bail;
- VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
+ VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0);
/*
* Attempt to re-establish all the active znodes with
@@ -1566,78 +1433,51 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname)
* any potential callers discover that via ZFS_ENTER_VERIFY_VP
* when they try to use their znode.
*/
- mutex_enter(&zfsvfs->z_znodes_lock);
- for (zp = list_head(&zfsvfs->z_all_znodes); zp;
- zp = list_next(&zfsvfs->z_all_znodes, zp)) {
+ mutex_enter(&zsb->z_znodes_lock);
+ for (zp = list_head(&zsb->z_all_znodes); zp;
+ zp = list_next(&zsb->z_all_znodes, zp)) {
(void) zfs_rezget(zp);
}
- mutex_exit(&zfsvfs->z_znodes_lock);
+ mutex_exit(&zsb->z_znodes_lock);
}
bail:
/* release the VOPs */
- rw_exit(&zfsvfs->z_teardown_inactive_lock);
- rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
+ rw_exit(&zsb->z_teardown_inactive_lock);
+ rrw_exit(&zsb->z_teardown_lock, FTAG);
if (err) {
/*
- * Since we couldn't reopen zfsvfs::z_os, force
+ * Since we couldn't reopen zfs_sb_t::z_os, force
* unmount this file system.
*/
- if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0)
- (void) dounmount(zfsvfs->z_vfs, MS_FORCE, CRED());
+ (void) zfs_umount(zsb->z_sb);
}
return (err);
}
EXPORT_SYMBOL(zfs_resume_fs);
-static void
-zfs_freevfs(vfs_t *vfsp)
-{
- zfsvfs_t *zfsvfs = vfsp->vfs_data;
-
- zfsvfs_free(zfsvfs);
-}
-#endif /* HAVE_ZPL */
-
-void
-zfs_init(void)
-{
- zfsctl_init();
- zfs_znode_init();
-
- dmu_objset_register_type(DMU_OST_ZFS, zfs_space_delta_cb);
-}
-
-void
-zfs_fini(void)
-{
- zfsctl_fini();
- zfs_znode_fini();
-}
-
-#ifdef HAVE_ZPL
int
-zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
+zfs_set_version(zfs_sb_t *zsb, uint64_t newvers)
{
int error;
- objset_t *os = zfsvfs->z_os;
+ objset_t *os = zsb->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (EINVAL);
- if (newvers < zfsvfs->z_version)
+ if (newvers < zsb->z_version)
return (EINVAL);
if (zfs_spa_version_map(newvers) >
- spa_version(dmu_objset_spa(zfsvfs->z_os)))
+ spa_version(dmu_objset_spa(zsb->z_os)))
return (ENOTSUP);
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
- if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
+ if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
@@ -1656,10 +1496,10 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
return (error);
}
- if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
+ if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) {
uint64_t sa_obj;
- ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
+ ASSERT3U(spa_version(dmu_objset_spa(zsb->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
@@ -1674,21 +1514,18 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
spa_history_log_internal(LOG_DS_UPGRADE,
dmu_objset_spa(os), tx, "oldver=%llu newver=%llu dataset = %llu",
- zfsvfs->z_version, newvers, dmu_objset_id(os));
+ zsb->z_version, newvers, dmu_objset_id(os));
dmu_tx_commit(tx);
- zfsvfs->z_version = newvers;
+ zsb->z_version = newvers;
-#ifdef HAVE_FUID_FEATURES
- if (zfsvfs->z_version >= ZPL_VERSION_FUID)
- zfs_set_fuid_feature(zfsvfs);
-#endif /* HAVE_FUID_FEATURES */
+ if (zsb->z_version >= ZPL_VERSION_FUID)
+ zfs_set_fuid_feature(zsb);
return (0);
}
EXPORT_SYMBOL(zfs_set_version);
-#endif /* HAVE_ZPL */
/*
* Read a property stored within the master node.
@@ -1731,3 +1568,18 @@ zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
}
return (error);
}
+
+void
+zfs_init(void)
+{
+ zfs_znode_init();
+ dmu_objset_register_type(DMU_OST_ZFS, zfs_space_delta_cb);
+ register_filesystem(&zpl_fs_type);
+}
+
+void
+zfs_fini(void)
+{
+ unregister_filesystem(&zpl_fs_type);
+ zfs_znode_fini();
+}
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 764f53040..a8019ba5c 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -25,7 +25,6 @@
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
-#ifdef HAVE_ZPL
#include <sys/types.h>
#include <sys/param.h>
@@ -35,7 +34,6 @@
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/vfs_opreg.h>
-#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
@@ -43,13 +41,7 @@
#include <sys/uio.h>
#include <sys/vmsystm.h>
#include <sys/atomic.h>
-#include <sys/vm.h>
-#include <vm/seg_vn.h>
#include <vm/pvn.h>
-#include <vm/as.h>
-#include <vm/kpm.h>
-#include <vm/seg_kpm.h>
-#include <sys/mman.h>
#include <sys/pathname.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
@@ -93,12 +85,12 @@
* to freed memory. The example below illustrates the following Big Rules:
*
* (1) A check must be made in each zfs thread for a mounted file system.
- * This is done avoiding races using ZFS_ENTER(zfsvfs).
- * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
+ * This is done avoiding races using ZFS_ENTER(zsb).
+ * A ZFS_EXIT(zsb) is needed before all returns. Any znodes
* must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
* can return EIO from the calling function.
*
- * (2) VN_RELE() should always be the last thing except for zil_commit()
+ * (2) iput() should always be the last thing except for zil_commit()
* (if necessary) and ZFS_EXIT(). This is for 3 reasons:
* First, if it's the last reference, the vnode/znode
* can be freed, so the zp may point to freed memory. Second, the last
@@ -106,7 +98,7 @@
* pushing cached pages (which acquires range locks) and syncing out
* cached atime changes. Third, zfs_zinactive() may require a new tx,
* which could deadlock the system if you were already holding one.
- * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
+ * If you must call iput() within a tx then use iput_ASYNC().
*
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
@@ -122,7 +114,7 @@
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
* forever, because the previous txg can't quiesce until B's tx commits.
*
- * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
+ * If dmu_tx_assign() returns ERESTART and zsb->z_assign is TXG_NOWAIT,
* then drop all locks, call dmu_tx_wait(), and try again.
*
* (5) If the operation succeeded, generate the intent log entry for it
@@ -139,9 +131,9 @@
*
* In general, this is how things should be ordered in each vnode op:
*
- * ZFS_ENTER(zfsvfs); // exit if unmounted
+ * ZFS_ENTER(zsb); // exit if unmounted
* top:
- * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD())
+ * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
* rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify
@@ -149,14 +141,14 @@
* if (error) {
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
- * VN_RELE(...); // release held vnodes
+ * iput(...); // release held vnodes
* if (error == ERESTART) {
* dmu_tx_wait(tx);
* dmu_tx_abort(tx);
* goto top;
* }
* dmu_tx_abort(tx); // abort DMU tx
- * ZFS_EXIT(zfsvfs); // finished in zfs
+ * ZFS_EXIT(zsb); // finished in zfs
* return (error); // really out of space
* }
* error = do_real_work(); // do whatever this VOP does
@@ -165,163 +157,13 @@
* dmu_tx_commit(tx); // commit DMU tx -- error or not
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
- * VN_RELE(...); // release held vnodes
+ * iput(...); // release held vnodes
* zil_commit(zilog, foid); // synchronous when necessary
- * ZFS_EXIT(zfsvfs); // finished in zfs
+ * ZFS_EXIT(zsb); // finished in zfs
* return (error); // done, report error
*/
-/* ARGSUSED */
-static int
-zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
-{
- znode_t *zp = VTOZ(*vpp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
- ((flag & FAPPEND) == 0)) {
- ZFS_EXIT(zfsvfs);
- return (EPERM);
- }
-
- if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
- ZTOV(zp)->v_type == VREG &&
- !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
- if (fs_vscan(*vpp, cr, 0) != 0) {
- ZFS_EXIT(zfsvfs);
- return (EACCES);
- }
- }
-
- /* Keep a count of the synchronous opens in the znode */
- if (flag & (FSYNC | FDSYNC))
- atomic_inc_32(&zp->z_sync_cnt);
-
- ZFS_EXIT(zfsvfs);
- return (0);
-}
-
-/* ARGSUSED */
-static int
-zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
- caller_context_t *ct)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- /*
- * Clean up any locks held by this process on the vp.
- */
- cleanlocks(vp, ddi_get_pid(), 0);
- cleanshares(vp, ddi_get_pid());
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- /* Decrement the synchronous opens in the znode */
- if ((flag & (FSYNC | FDSYNC)) && (count == 1))
- atomic_dec_32(&zp->z_sync_cnt);
-
- if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
- ZTOV(zp)->v_type == VREG &&
- !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
- VERIFY(fs_vscan(vp, cr, 1) == 0);
-
- ZFS_EXIT(zfsvfs);
- return (0);
-}
-
-/*
- * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
- * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
- */
-static int
-zfs_holey(vnode_t *vp, int cmd, offset_t *off)
-{
- znode_t *zp = VTOZ(vp);
- uint64_t noff = (uint64_t)*off; /* new offset */
- uint64_t file_sz;
- int error;
- boolean_t hole;
-
- file_sz = zp->z_size;
- if (noff >= file_sz) {
- return (ENXIO);
- }
-
- if (cmd == _FIO_SEEK_HOLE)
- hole = B_TRUE;
- else
- hole = B_FALSE;
-
- error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
-
- /* end of file? */
- if ((error == ESRCH) || (noff > file_sz)) {
- /*
- * Handle the virtual hole at the end of file.
- */
- if (hole) {
- *off = file_sz;
- return (0);
- }
- return (ENXIO);
- }
-
- if (noff < *off)
- return (error);
- *off = noff;
- return (error);
-}
-
-/* ARGSUSED */
-static int
-zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred,
- int *rvalp, caller_context_t *ct)
-{
- offset_t off;
- int error;
- zfsvfs_t *zfsvfs;
- znode_t *zp;
-
- switch (com) {
- case _FIOFFS:
- return (zfs_sync(vp->v_vfsp, 0, cred));
-
- /*
- * The following two ioctls are used by bfu. Faking out,
- * necessary to avoid bfu errors.
- */
- case _FIOGDIO:
- case _FIOSDIO:
- return (0);
-
- case _FIO_SEEK_DATA:
- case _FIO_SEEK_HOLE:
- if (ddi_copyin((void *)data, &off, sizeof (off), flag))
- return (EFAULT);
-
- zp = VTOZ(vp);
- zfsvfs = zp->z_zfsvfs;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- /* offset parameter is in/out */
- error = zfs_holey(vp, com, &off);
- ZFS_EXIT(zfsvfs);
- if (error)
- return (error);
- if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
- return (EFAULT);
- return (0);
- }
- return (ENOTTY);
-}
-
-#if defined(_KERNEL) && defined(HAVE_UIO_RW)
+#if defined(_KERNEL) && defined(HAVE_MMAP)
/*
* Utility functions to map and unmap a single physical page. These
* are used to manage the mappable copies of ZFS file data, and therefore
@@ -346,7 +188,6 @@ zfs_unmap_page(page_t *pp, caddr_t addr)
ppmapout(addr);
}
}
-#endif /* _KERNEL && HAVE_UIO_RW */
/*
* When a file is memory mapped, we must keep the IO data synchronized
@@ -356,7 +197,8 @@ zfs_unmap_page(page_t *pp, caddr_t addr)
* the page and the dmu buffer.
*/
static void
-update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid)
+update_pages(struct inode *ip, int64_t start, int len, objset_t *os,
+ uint64_t oid)
{
int64_t off;
@@ -365,7 +207,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid)
page_t *pp;
uint64_t nbytes = MIN(PAGESIZE - off, len);
- if (pp = page_lookup(vp, start, SE_SHARED)) {
+ if (pp = page_lookup(ip, start, SE_SHARED)) {
caddr_t va;
va = zfs_map_page(pp, S_WRITE);
@@ -390,10 +232,10 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid)
* the file is memory mapped.
*/
static int
-mappedread(vnode_t *vp, int nbytes, uio_t *uio)
+mappedread(struct inode *ip, int nbytes, uio_t *uio)
{
- znode_t *zp = VTOZ(vp);
- objset_t *os = zp->z_zfsvfs->z_os;
+ znode_t *zp = ITOZ(ip);
+ objset_t *os = ITOZSB(ip)->z_os;
int64_t start, off;
int len = nbytes;
int error = 0;
@@ -404,7 +246,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
page_t *pp;
uint64_t bytes = MIN(PAGESIZE - off, len);
- if (pp = page_lookup(vp, start, SE_SHARED)) {
+ if (pp = page_lookup(ip, start, SE_SHARED)) {
caddr_t va;
va = zfs_map_page(pp, S_READ);
@@ -421,18 +263,18 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
}
return (error);
}
+#endif /* _KERNEL && HAVE_MMAP */
offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
/*
* Read bytes from specified file into supplied buffer.
*
- * IN: vp - vnode of file to be read from.
+ * IN: ip - inode of file to be read from.
* uio - structure supplying read location, range info,
* and return buffer.
* ioflag - SYNC flags; used to provide FRSYNC semantics.
* cr - credentials of caller.
- * ct - caller context
*
* OUT: uio - updated offset and range, buffer filled.
*
@@ -440,26 +282,28 @@ offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
* error code if failure
*
* Side Effects:
- * vp - atime updated if byte count > 0
+ * inode - atime updated if byte count > 0
*/
/* ARGSUSED */
int
-zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
+zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
objset_t *os;
ssize_t n, nbytes;
int error = 0;
rl_t *rl;
+#ifdef HAVE_UIO_ZEROCOPY
xuio_t *xuio = NULL;
+#endif /* HAVE_UIO_ZEROCOPY */
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- os = zfsvfs->z_os;
+ os = zsb->z_os;
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EACCES);
}
@@ -467,7 +311,7 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
* Validate file offset
*/
if (uio->uio_loffset < (offset_t)0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
@@ -475,26 +319,28 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
* Fasttrack empty reads
*/
if (uio->uio_resid == 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
+#ifdef HAVE_MANDLOCKS
/*
* Check for mandatory locks
*/
if (MANDMODE(zp->z_mode)) {
- if (error = chklock(vp, FREAD,
+ if (error = chklock(ip, FREAD,
uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
}
+#endif /* HAVE_MANDLOCK */
/*
* If we're in FRSYNC mode, sync out this znode before reading it.
*/
- if (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zfsvfs->z_log, zp->z_id);
+ if (ioflag & FRSYNC || zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ zil_commit(zsb->z_log, zp->z_id);
/*
* Lock the range against changes.
@@ -513,6 +359,7 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
ASSERT(uio->uio_loffset < zp->z_size);
n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
+#ifdef HAVE_UIO_ZEROCOPY
if ((uio->uio_extflg == UIO_XUIO) &&
(((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
int nblk;
@@ -529,7 +376,7 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
}
(void) dmu_xuio_init(xuio, nblk);
- if (vn_has_cached_data(vp)) {
+ if (vn_has_cached_data(ip)) {
/*
* For simplicity, we always allocate a full buffer
* even if we only expect to read a portion of a block.
@@ -541,15 +388,21 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
}
}
}
+#endif /* HAVE_UIO_ZEROCOPY */
while (n > 0) {
nbytes = MIN(n, zfs_read_chunk_size -
P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
- if (vn_has_cached_data(vp))
- error = mappedread(vp, nbytes, uio);
+/* XXX: Drop this, ARC update handled by zpl layer */
+#ifdef HAVE_MMAP
+ if (vn_has_cached_data(ip))
+ error = mappedread(ip, nbytes, uio);
else
error = dmu_read_uio(os, zp->z_id, uio, nbytes);
+#else
+ error = dmu_read_uio(os, zp->z_id, uio, nbytes);
+#endif /* HAVE_MMAP */
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
@@ -562,9 +415,9 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
out:
zfs_range_unlock(rl);
- ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
+ ZFS_ACCESSTIME_STAMP(zsb, zp);
zfs_inode_update(zp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_read);
@@ -572,12 +425,11 @@ EXPORT_SYMBOL(zfs_read);
/*
* Write the bytes to a file.
*
- * IN: vp - vnode of file to be written to.
+ * IN: ip - inode of file to be written to.
* uio - structure supplying write location, range info,
* and data buffer.
* ioflag - FAPPEND flag set if in append mode.
* cr - credentials of caller.
- * ct - caller context (NFS/CIFS fem monitor only)
*
* OUT: uio - updated offset and range.
*
@@ -585,36 +437,36 @@ EXPORT_SYMBOL(zfs_read);
* error code if failure
*
* Timestamps:
- * vp - ctime|mtime updated if byte count > 0
+ * ip - ctime|mtime updated if byte count > 0
*/
/* ARGSUSED */
int
-zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
+zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- rlim64_t limit = uio->uio_llimit;
+ znode_t *zp = ITOZ(ip);
+ rlim64_t limit = uio->uio_limit;
ssize_t start_resid = uio->uio_resid;
ssize_t tx_bytes;
uint64_t end_size;
dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
zilog_t *zilog;
offset_t woff;
ssize_t n, nbytes;
rl_t *rl;
- int max_blksz = zfsvfs->z_max_blksz;
- int error;
+ int max_blksz = zsb->z_max_blksz;
+ int error = 0;
arc_buf_t *abuf;
- iovec_t *aiov;
+ iovec_t *aiov = NULL;
xuio_t *xuio = NULL;
int i_iov = 0;
- int iovcnt = uio->uio_iovcnt;
iovec_t *iovp = uio->uio_iov;
int write_eof;
int count = 0;
sa_bulk_attr_t bulk[4];
uint64_t mtime[2], ctime[2];
+ ASSERTV(int iovcnt = uio->uio_iovcnt);
/*
* Fasttrack empty write
@@ -626,14 +478,13 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
limit = MAXOFFSET_T;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
- &zp->z_size, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, 8);
/*
@@ -642,31 +493,34 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
(uio->uio_loffset < zp->z_size))) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EPERM);
}
- zilog = zfsvfs->z_log;
+ zilog = zsb->z_log;
/*
* Validate file offset
*/
woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
if (woff < 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
+#ifdef HAVE_MANDLOCKS
/*
* Check for mandatory locks before calling zfs_range_lock()
* in order to prevent a deadlock with locks set via fcntl().
*/
if (MANDMODE((mode_t)zp->z_mode) &&
- (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
- ZFS_EXIT(zfsvfs);
+ (error = chklock(ip, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
+ ZFS_EXIT(zsb);
return (error);
}
+#endif /* HAVE_MANDLOCKS */
+#ifdef HAVE_UIO_ZEROCOPY
/*
* Pre-fault the pages to ensure slow (eg NFS) pages
* don't hold up txg.
@@ -677,6 +531,7 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
xuio = (xuio_t *)uio;
else
uio_prefaultpages(MIN(n, max_blksz), uio);
+#endif /* HAVE_UIO_ZEROCOPY */
/*
* If in append mode, set the io offset pointer to eof.
@@ -708,7 +563,7 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
if (woff >= limit) {
zfs_range_unlock(rl);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EFBIG);
}
@@ -729,8 +584,8 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
abuf = NULL;
woff = uio->uio_loffset;
again:
- if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
- zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
+ if (zfs_owner_overquota(zsb, zp, B_FALSE) ||
+ zfs_owner_overquota(zsb, zp, B_TRUE)) {
if (abuf != NULL)
dmu_return_arcbuf(abuf);
error = EDQUOT;
@@ -742,8 +597,6 @@ again:
aiov = &iovp[i_iov];
abuf = dmu_xuio_arcbuf(xuio, i_iov);
dmu_xuio_clear(xuio, i_iov);
- DTRACE_PROBE3(zfs_cp_write, int, i_iov,
- iovec_t *, aiov, arc_buf_t *, abuf);
ASSERT((aiov->iov_base == abuf->b_data) ||
((char *)aiov->iov_base - (char *)abuf->b_data +
aiov->iov_len == arc_buf_size(abuf)));
@@ -776,7 +629,7 @@ again:
/*
* Start a transaction.
*/
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
zfs_sa_upgrade_txholds(tx, zp);
@@ -835,7 +688,7 @@ again:
if (tx_bytes < max_blksz && (!write_eof ||
aiov->iov_base != abuf->b_data)) {
ASSERT(xuio);
- dmu_write(zfsvfs->z_os, zp->z_id, woff,
+ dmu_write(zsb->z_os, zp->z_id, woff,
aiov->iov_len, aiov->iov_base, tx);
dmu_return_arcbuf(abuf);
xuio_stat_wbuf_copied();
@@ -847,17 +700,20 @@ again:
ASSERT(tx_bytes <= uio->uio_resid);
uioskip(uio, tx_bytes);
}
- if (tx_bytes && vn_has_cached_data(vp)) {
- update_pages(vp, woff,
- tx_bytes, zfsvfs->z_os, zp->z_id);
+/* XXX: Drop this, ARC update handled by zpl layer */
+#ifdef HAVE_MMAP
+ if (tx_bytes && vn_has_cached_data(ip)) {
+ update_pages(ip, woff,
+ tx_bytes, zsb->z_os, zp->z_id);
}
+#endif /* HAVE_MMAP */
/*
* If we made no progress, we're done. If we made even
* partial progress, update the znode and ZIL accordingly.
*/
if (tx_bytes == 0) {
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
+ (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
(void *)&zp->z_size, sizeof (uint64_t), tx);
dmu_tx_commit(tx);
ASSERT(error != 0);
@@ -884,7 +740,7 @@ again:
uint64_t newmode;
zp->z_mode &= ~(S_ISUID | S_ISGID);
newmode = zp->z_mode;
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
+ (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zsb),
(void *)&newmode, sizeof (uint64_t), tx);
}
mutex_exit(&zp->z_acl_lock);
@@ -906,8 +762,8 @@ again:
* the file size to the specified eof. Note, there's no
* concurrency during replay.
*/
- if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
- zp->z_size = zfsvfs->z_replay_eof;
+ if (zsb->z_replay && zsb->z_replay_eof != 0)
+ zp->z_size = zsb->z_replay_eof;
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
@@ -929,26 +785,36 @@ again:
* If we're in replay mode, or we made no progress, return error.
* Otherwise, it's at least a partial write, so it's successful.
*/
- if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
- ZFS_EXIT(zfsvfs);
+ if (zsb->z_replay || uio->uio_resid == start_resid) {
+ ZFS_EXIT(zsb);
return (error);
}
if (ioflag & (FSYNC | FDSYNC) ||
- zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, zp->z_id);
zfs_inode_update(zp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
EXPORT_SYMBOL(zfs_write);
+static void
+iput_async(struct inode *ip, taskq_t *taskq)
+{
+ ASSERT(atomic_read(&ip->i_count) > 0);
+ if (atomic_read(&ip->i_count) == 1)
+ taskq_dispatch(taskq, (task_func_t *)iput, ip, TQ_SLEEP);
+ else
+ iput(ip);
+}
+
void
zfs_get_done(zgd_t *zgd, int error)
{
znode_t *zp = zgd->zgd_private;
- objset_t *os = zp->z_zfsvfs->z_os;
+ objset_t *os = ZTOZSB(zp)->z_os;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
@@ -959,7 +825,7 @@ zfs_get_done(zgd_t *zgd, int error)
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
- VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
+ iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os)));
if (error == 0 && zgd->zgd_bp)
zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
@@ -977,8 +843,8 @@ static int zil_fault_io = 0;
int
zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
{
- zfsvfs_t *zfsvfs = arg;
- objset_t *os = zfsvfs->z_os;
+ zfs_sb_t *zsb = arg;
+ objset_t *os = zsb->z_os;
znode_t *zp;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
@@ -994,20 +860,19 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
/*
* Nothing to do if the file has been removed
*/
- if (zfs_zget(zfsvfs, object, &zp) != 0)
+ if (zfs_zget(zsb, object, &zp) != 0)
return (ENOENT);
if (zp->z_unlinked) {
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
- VN_RELE_ASYNC(ZTOV(zp),
- dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
+ iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os)));
return (ENOENT);
}
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
- zgd->zgd_zilog = zfsvfs->z_log;
+ zgd->zgd_zilog = zsb->z_log;
zgd->zgd_private = zp;
/*
@@ -1092,15 +957,14 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
}
/*ARGSUSED*/
-static int
-zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
- caller_context_t *ct)
+int
+zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
int error;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
if (flag & V_ACE_MASK)
@@ -1108,46 +972,23 @@ zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
else
error = zfs_zaccess_rwx(zp, mode, flag, cr);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
-
-/*
- * If vnode is for a device return a specfs vnode instead.
- */
-static int
-specvp_check(vnode_t **vpp, cred_t *cr)
-{
- int error = 0;
-
- if (IS_DEVVP(*vpp)) {
- struct vnode *svp;
-
- svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
- VN_RELE(*vpp);
- if (svp == NULL)
- error = ENOSYS;
- *vpp = svp;
- }
- return (error);
-}
-
+EXPORT_SYMBOL(zfs_access);
/*
* Lookup an entry in a directory, or an extended attribute directory.
- * If it exists, return a held vnode reference for it.
+ * If it exists, return a held inode reference for it.
*
- * IN: dvp - vnode of directory to search.
+ * IN: dip - inode of directory to search.
* nm - name of entry to lookup.
- * pnp - full pathname to lookup [UNUSED].
* flags - LOOKUP_XATTR set if looking for an attribute.
- * rdir - root directory vnode [UNUSED].
* cr - credentials of caller.
- * ct - caller context
* direntflags - directory lookup flags
* realpnp - returned pathname.
*
- * OUT: vpp - vnode of located entry, NULL if not found.
+ * OUT: ipp - inode of located entry, NULL if not found.
*
* RETURN: 0 if success
* error code if failure
@@ -1157,18 +998,17 @@ specvp_check(vnode_t **vpp, cred_t *cr)
*/
/* ARGSUSED */
int
-zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
- int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
- int *direntflags, pathname_t *realpnp)
+zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
+ cred_t *cr, int *direntflags, pathname_t *realpnp)
{
- znode_t *zdp = VTOZ(dvp);
- zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
- int error = 0;
+ znode_t *zdp = ITOZ(dip);
+ zfs_sb_t *zsb = ITOZSB(dip);
+ int error = 0;
/* fast path */
if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
- if (dvp->v_type != VDIR) {
+ if (!S_ISDIR(dip->i_mode)) {
return (ENOTDIR);
} else if (zdp->z_sa_hdl == NULL) {
return (EIO);
@@ -1177,44 +1017,44 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
error = zfs_fastaccesschk_execute(zdp, cr);
if (!error) {
- *vpp = dvp;
- VN_HOLD(*vpp);
+ *ipp = dip;
+ igrab(*ipp);
return (0);
}
return (error);
+#ifdef HAVE_DNLC
} else {
vnode_t *tvp = dnlc_lookup(dvp, nm);
if (tvp) {
error = zfs_fastaccesschk_execute(zdp, cr);
if (error) {
- VN_RELE(tvp);
+ iput(tvp);
return (error);
}
if (tvp == DNLC_NO_VNODE) {
- VN_RELE(tvp);
+ iput(tvp);
return (ENOENT);
} else {
*vpp = tvp;
return (specvp_check(vpp, cr));
}
}
+#endif /* HAVE_DNLC */
}
}
- DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm);
-
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zdp);
- *vpp = NULL;
+ *ipp = NULL;
if (flags & LOOKUP_XATTR) {
/*
* If the xattr property is off, refuse the lookup request.
*/
- if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
- ZFS_EXIT(zfsvfs);
+ if (!(zsb->z_flags & ZSB_XATTR_USER)) {
+ ZFS_EXIT(zsb);
return (EINVAL);
}
@@ -1223,12 +1063,12 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
* Maybe someday we will.
*/
if (zdp->z_pflags & ZFS_XATTR) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
- if ((error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags))) {
- ZFS_EXIT(zfsvfs);
+ if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
+ ZFS_EXIT(zsb);
return (error);
}
@@ -1236,18 +1076,18 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
* Do we have permission to get into attribute directory?
*/
- if ((error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0,
+ if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
B_FALSE, cr))) {
- VN_RELE(*vpp);
- *vpp = NULL;
+ iput(*ipp);
+ *ipp = NULL;
}
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
- if (dvp->v_type != VDIR) {
- ZFS_EXIT(zfsvfs);
+ if (!S_ISDIR(dip->i_mode)) {
+ ZFS_EXIT(zsb);
return (ENOTDIR);
}
@@ -1256,25 +1096,21 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
*/
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
- if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
+ if (zsb->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EILSEQ);
}
- error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp);
- if (error == 0) {
- if (*vpp)
- zfs_inode_update(VTOZ(*vpp));
+ error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
+ if ((error == 0) && (*ipp))
+ zfs_inode_update(ITOZ(*ipp));
- error = specvp_check(vpp, cr);
- }
-
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_lookup);
@@ -1282,42 +1118,39 @@ EXPORT_SYMBOL(zfs_lookup);
/*
* Attempt to create a new entry in a directory. If the entry
* already exists, truncate the file if permissible, else return
- * an error. Return the vp of the created or trunc'd file.
+ * an error. Return the ip of the created or trunc'd file.
*
- * IN: dvp - vnode of directory to put new file entry in.
+ * IN: dip - inode of directory to put new file entry in.
* name - name of new file entry.
* vap - attributes of new file.
* excl - flag indicating exclusive or non-exclusive mode.
* mode - mode to open file with.
* cr - credentials of caller.
* flag - large file flag [UNUSED].
- * ct - caller context
- * vsecp - ACL to be set
+ * vsecp - ACL to be set
*
- * OUT: vpp - vnode of created or trunc'd entry.
+ * OUT: ipp - inode of created or trunc'd entry.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * dvp - ctime|mtime updated if new entry created
- * vp - ctime|mtime always, atime if new
+ * dip - ctime|mtime updated if new entry created
+ * ip - ctime|mtime always, atime if new
*/
/* ARGSUSED */
int
-zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl,
- int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct,
- vsecattr_t *vsecp)
+zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
+ int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
- znode_t *zp, *dzp = VTOZ(dvp);
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ znode_t *zp, *dzp = ITOZ(dip);
+ zfs_sb_t *zsb = ITOZSB(dip);
zilog_t *zilog;
objset_t *os;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
- ksid_t *ksid;
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
@@ -1330,51 +1163,45 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl,
*/
gid = crgetgid(cr);
- ksid = crgetsid(cr, KSID_OWNER);
- if (ksid)
- uid = ksid_getid(ksid);
- else
- uid = crgetuid(cr);
+ uid = crgetuid(cr);
- if (zfsvfs->z_use_fuids == B_FALSE &&
- (vsecp || (vap->va_mask & AT_XVATTR) ||
- IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
+ if (zsb->z_use_fuids == B_FALSE &&
+ (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (EINVAL);
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(dzp);
- os = zfsvfs->z_os;
- zilog = zfsvfs->z_log;
+ os = zsb->z_os;
+ zilog = zsb->z_log;
- if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
+ if (zsb->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EILSEQ);
}
+#ifdef HAVE_XVATTR
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
- crgetuid(cr), cr, vap->va_type)) != 0) {
- ZFS_EXIT(zfsvfs);
+ crgetuid(cr), cr, vap->va_mode)) != 0) {
+ ZFS_EXIT(zsb);
return (error);
}
}
-top:
- *vpp = NULL;
-
- if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr))
- vap->va_mode &= ~VSVTX;
+#endif /* HAVE_XVATTR */
+top:
+ *ipp = NULL;
if (*name == '\0') {
/*
* Null component name refers to the directory itself.
*/
- VN_HOLD(dvp);
+ igrab(dip);
zp = dzp;
dl = NULL;
error = 0;
} else {
- /* possible VN_HOLD(zp) */
+ /* possible igrab(zp) */
int zflg = 0;
if (flag & FIGNORECASE)
@@ -1387,7 +1214,7 @@ top:
zfs_acl_ids_free(&acl_ids);
if (strcmp(name, "..") == 0)
error = EISDIR;
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
}
@@ -1410,8 +1237,7 @@ top:
* extended attribute directories.
*/
- if ((dzp->z_pflags & ZFS_XATTR) &&
- (vap->va_type != VREG)) {
+ if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
error = EINVAL;
@@ -1423,7 +1249,7 @@ top:
goto out;
have_acl = B_TRUE;
- if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
+ if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
zfs_acl_ids_free(&acl_ids);
error = EDQUOT;
goto out;
@@ -1434,12 +1260,12 @@ top:
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
- fuid_dirtied = zfsvfs->z_fuid_dirty;
+ fuid_dirtied = zsb->z_fuid_dirty;
if (fuid_dirtied)
- zfs_fuid_txhold(zfsvfs, tx);
+ zfs_fuid_txhold(zsb, tx);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
- if (!zfsvfs->z_use_sa &&
+ if (!zsb->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
@@ -1454,13 +1280,13 @@ top:
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
- zfs_fuid_sync(zfsvfs, tx);
+ zfs_fuid_sync(zsb, tx);
(void) zfs_link_create(dl, zp, tx, ZNEW);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
@@ -1483,14 +1309,14 @@ top:
/*
* Can't truncate an existing file if in exclusive mode.
*/
- if (excl == EXCL) {
+ if (excl) {
error = EEXIST;
goto out;
}
/*
* Can't open a directory for writing.
*/
- if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
+ if (S_ISDIR(ZTOI(zp)->i_mode)) {
error = EISDIR;
goto out;
}
@@ -1508,15 +1334,12 @@ top:
/*
* Truncate regular files if requested.
*/
- if ((ZTOV(zp)->v_type == VREG) &&
- (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) {
+ if (S_ISREG(ZTOI(zp)->i_mode) &&
+ (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
/* we can't hold any locks when calling zfs_freesp() */
zfs_dirent_unlock(dl);
dl = NULL;
error = zfs_freesp(zp, 0, 0, mode, TRUE);
- if (error == 0) {
- vnevent_create(ZTOV(zp), ct);
- }
}
}
out:
@@ -1526,18 +1349,17 @@ out:
if (error) {
if (zp)
- VN_RELE(ZTOV(zp));
+ iput(ZTOI(zp));
} else {
zfs_inode_update(dzp);
zfs_inode_update(zp);
- *vpp = ZTOV(zp);
- error = specvp_check(vpp, cr);
+ *ipp = ZTOI(zp);
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_create);
@@ -1545,54 +1367,54 @@ EXPORT_SYMBOL(zfs_create);
/*
* Remove an entry from a directory.
*
- * IN: dvp - vnode of directory to remove entry from.
+ * IN: dip - inode of directory to remove entry from.
* name - name of entry to remove.
* cr - credentials of caller.
- * ct - caller context
- * flags - case flags
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * dvp - ctime|mtime
- * vp - ctime (if nlink > 0)
+ * dip - ctime|mtime
+ * ip - ctime (if nlink > 0)
*/
uint64_t null_xattr = 0;
/*ARGSUSED*/
int
-zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct,
- int flags)
+zfs_remove(struct inode *dip, char *name, cred_t *cr)
{
- znode_t *zp, *dzp = VTOZ(dvp);
+ znode_t *zp, *dzp = ITOZ(dip);
znode_t *xzp;
- vnode_t *vp;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ struct inode *ip;
+ zfs_sb_t *zsb = ITOZSB(dip);
zilog_t *zilog;
- uint64_t acl_obj, xattr_obj;
- uint64_t xattr_obj_unlinked = 0;
+ uint64_t xattr_obj;
+ uint64_t xattr_obj_unlinked = 0;
uint64_t obj = 0;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
- boolean_t may_delete_now, delete_now = FALSE;
- boolean_t unlinked, toobig = FALSE;
+ boolean_t unlinked;
uint64_t txtype;
pathname_t *realnmp = NULL;
+#ifdef HAVE_PN_UTILS
pathname_t realnm;
+#endif /* HAVE_PN_UTILS */
int error;
int zflg = ZEXISTS;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(dzp);
- zilog = zfsvfs->z_log;
+ zilog = zsb->z_log;
+#ifdef HAVE_PN_UTILS
if (flags & FIGNORECASE) {
zflg |= ZCILOOK;
pn_alloc(&realnm);
realnmp = &realnm;
}
+#endif /* HAVE_PN_UTILS */
top:
xattr_obj = 0;
@@ -1602,13 +1424,15 @@ top:
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, realnmp))) {
+#ifdef HAVE_PN_UTILS
if (realnmp)
pn_free(realnmp);
- ZFS_EXIT(zfsvfs);
+#endif /* HAVE_PN_UTILS */
+ ZFS_EXIT(zsb);
return (error);
}
- vp = ZTOV(zp);
+ ip = ZTOI(zp);
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
@@ -1617,75 +1441,60 @@ top:
/*
* Need to use rmdir for removing directories.
*/
- if (vp->v_type == VDIR) {
+ if (S_ISDIR(ip->i_mode)) {
error = EPERM;
goto out;
}
- vnevent_remove(vp, dvp, name, ct);
-
+#ifdef HAVE_DNLC
if (realnmp)
dnlc_remove(dvp, realnmp->pn_buf);
else
dnlc_remove(dvp, name);
-
- mutex_enter(&vp->v_lock);
- may_delete_now = ((vp->v_count == 1) && (!vn_has_cached_data(vp)));
- mutex_exit(&vp->v_lock);
+#endif /* HAVE_DNLC */
/*
- * We may delete the znode now, or we may put it in the unlinked set;
- * it depends on whether we're the last link, and on whether there are
- * other holds on the vnode. So we dmu_tx_hold() the right things to
- * allow for either case.
+ * We never delete the znode and always place it in the unlinked
+ * set. The dentry cache will always hold the last reference and
+ * is responsible for safely freeing the znode.
*/
obj = zp->z_id;
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
- if (may_delete_now) {
- toobig =
- zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT;
- /* if the file is too big, only hold_free a token amount */
- dmu_tx_hold_free(tx, zp->z_id, 0,
- (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
- }
/* are there any extended attributes? */
- error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
+ error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
- error = zfs_zget(zfsvfs, xattr_obj, &xzp);
+ error = zfs_zget(zsb, xattr_obj, &xzp);
ASSERT3U(error, ==, 0);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
- mutex_enter(&zp->z_lock);
- if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
- dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
- mutex_exit(&zp->z_lock);
-
/* charge as an update -- would be nice not to charge at all */
- dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
+ dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
- VN_RELE(vp);
+ iput(ip);
if (xzp)
- VN_RELE(ZTOV(xzp));
+ iput(ZTOI(xzp));
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
+#ifdef HAVE_PN_UTILS
if (realnmp)
pn_free(realnmp);
+#endif /* HAVE_PN_UTILS */
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
@@ -1700,64 +1509,31 @@ top:
}
if (unlinked) {
-
/*
* Hold z_lock so that we can make sure that the ACL obj
* hasn't changed. Could have been deleted due to
* zfs_sa_upgrade().
*/
mutex_enter(&zp->z_lock);
- mutex_enter(&vp->v_lock);
- (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
+ (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
&xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
- delete_now = may_delete_now && !toobig &&
- vp->v_count == 1 && !vn_has_cached_data(vp) &&
- xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
- acl_obj;
- mutex_exit(&vp->v_lock);
- }
-
- if (delete_now) {
- if (xattr_obj_unlinked) {
- ASSERT3U(xzp->z_links, ==, 2);
- mutex_enter(&xzp->z_lock);
- xzp->z_unlinked = 1;
- xzp->z_links = 0;
- error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
- &xzp->z_links, sizeof (xzp->z_links), tx);
- ASSERT3U(error, ==, 0);
- mutex_exit(&xzp->z_lock);
- zfs_unlinked_add(xzp, tx);
-
- if (zp->z_is_sa)
- error = sa_remove(zp->z_sa_hdl,
- SA_ZPL_XATTR(zfsvfs), tx);
- else
- error = sa_update(zp->z_sa_hdl,
- SA_ZPL_XATTR(zfsvfs), &null_xattr,
- sizeof (uint64_t), tx);
- ASSERT3U(error, ==, 0);
- }
- mutex_enter(&vp->v_lock);
- vp->v_count--;
- ASSERT3U(vp->v_count, ==, 0);
- mutex_exit(&vp->v_lock);
- mutex_exit(&zp->z_lock);
- zfs_znode_delete(zp, tx);
- } else if (unlinked) {
mutex_exit(&zp->z_lock);
zfs_unlinked_add(zp, tx);
}
txtype = TX_REMOVE;
+#ifdef HAVE_PN_UTILS
if (flags & FIGNORECASE)
txtype |= TX_CI;
+#endif /* HAVE_PN_UTILS */
zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
dmu_tx_commit(tx);
out:
+#ifdef HAVE_PN_UTILS
if (realnmp)
pn_free(realnmp);
+#endif /* HAVE_PN_UTILS */
zfs_dirent_unlock(dl);
zfs_inode_update(dzp);
@@ -1765,103 +1541,97 @@ out:
if (xzp)
zfs_inode_update(xzp);
- if (!delete_now)
- VN_RELE(vp);
+ iput(ip);
if (xzp)
- VN_RELE(ZTOV(xzp));
+ iput(ZTOI(xzp));
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_remove);
/*
- * Create a new directory and insert it into dvp using the name
+ * Create a new directory and insert it into dip using the name
* provided. Return a pointer to the inserted directory.
*
- * IN: dvp - vnode of directory to add subdir to.
+ * IN: dip - inode of directory to add subdir to.
* dirname - name of new directory.
* vap - attributes of new directory.
* cr - credentials of caller.
- * ct - caller context
* vsecp - ACL to be set
*
- * OUT: vpp - vnode of created directory.
+ * OUT: ipp - inode of created directory.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * dvp - ctime|mtime updated
- * vp - ctime|mtime|atime updated
+ * dip - ctime|mtime updated
+ * ipp - ctime|mtime|atime updated
*/
/*ARGSUSED*/
int
-zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
- caller_context_t *ct, int flags, vsecattr_t *vsecp)
+zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
+ cred_t *cr, int flags, vsecattr_t *vsecp)
{
- znode_t *zp, *dzp = VTOZ(dvp);
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ znode_t *zp, *dzp = ITOZ(dip);
+ zfs_sb_t *zsb = ITOZSB(dip);
zilog_t *zilog;
zfs_dirlock_t *dl;
uint64_t txtype;
dmu_tx_t *tx;
int error;
int zf = ZNEW;
- ksid_t *ksid;
uid_t uid;
gid_t gid = crgetgid(cr);
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
- ASSERT(vap->va_type == VDIR);
+ ASSERT(S_ISDIR(vap->va_mode));
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
- ksid = crgetsid(cr, KSID_OWNER);
- if (ksid)
- uid = ksid_getid(ksid);
- else
- uid = crgetuid(cr);
- if (zfsvfs->z_use_fuids == B_FALSE &&
- (vsecp || (vap->va_mask & AT_XVATTR) ||
- IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
+ uid = crgetuid(cr);
+ if (zsb->z_use_fuids == B_FALSE &&
+ (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (EINVAL);
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(dzp);
- zilog = zfsvfs->z_log;
+ zilog = zsb->z_log;
if (dzp->z_pflags & ZFS_XATTR) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
- if (zfsvfs->z_utf8 && u8_validate(dirname,
+ if (zsb->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EILSEQ);
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
+#ifdef HAVE_XVATTR
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
- crgetuid(cr), cr, vap->va_type)) != 0) {
- ZFS_EXIT(zfsvfs);
+ crgetuid(cr), cr, vap->va_mode)) != 0) {
+ ZFS_EXIT(zsb);
return (error);
}
}
+#endif /* HAVE_XVATTR */
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
vsecp, &acl_ids)) != 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
/*
@@ -1872,39 +1642,39 @@ zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
* to fail.
*/
top:
- *vpp = NULL;
+ *ipp = NULL;
if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
NULL, NULL))) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
- if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
+ if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EDQUOT);
}
/*
* Add a new entry to the directory.
*/
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
- fuid_dirtied = zfsvfs->z_fuid_dirty;
+ fuid_dirtied = zsb->z_fuid_dirty;
if (fuid_dirtied)
- zfs_fuid_txhold(zfsvfs, tx);
- if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
+ zfs_fuid_txhold(zsb, tx);
+ if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
@@ -1922,7 +1692,7 @@ top:
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
@@ -1932,14 +1702,14 @@ top:
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
- zfs_fuid_sync(zfsvfs, tx);
+ zfs_fuid_sync(zsb, tx);
/*
* Now put new name in parent dir.
*/
(void) zfs_link_create(dl, zp, tx, ZNEW);
- *vpp = ZTOV(zp);
+ *ipp = ZTOI(zp);
txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
if (flags & FIGNORECASE)
@@ -1953,12 +1723,12 @@ top:
zfs_dirent_unlock(dl);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_inode_update(dzp);
zfs_inode_update(zp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
EXPORT_SYMBOL(zfs_mkdir);
@@ -1968,37 +1738,36 @@ EXPORT_SYMBOL(zfs_mkdir);
* directory is the same as the subdir to be removed, the
* remove will fail.
*
- * IN: dvp - vnode of directory to remove from.
+ * IN: dip - inode of directory to remove from.
* name - name of directory to be removed.
- * cwd - vnode of current working directory.
+ * cwd - inode of current working directory.
* cr - credentials of caller.
- * ct - caller context
* flags - case flags
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * dvp - ctime|mtime updated
+ * dip - ctime|mtime updated
*/
/*ARGSUSED*/
int
-zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
- caller_context_t *ct, int flags)
+zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
+ int flags)
{
- znode_t *dzp = VTOZ(dvp);
+ znode_t *dzp = ITOZ(dip);
znode_t *zp;
- vnode_t *vp;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ struct inode *ip;
+ zfs_sb_t *zsb = ITOZSB(dip);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
int zflg = ZEXISTS;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(dzp);
- zilog = zfsvfs->z_log;
+ zilog = zsb->z_log;
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
@@ -2010,28 +1779,26 @@ top:
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, NULL))) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
- vp = ZTOV(zp);
+ ip = ZTOI(zp);
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
- if (vp->v_type != VDIR) {
+ if (!S_ISDIR(ip->i_mode)) {
error = ENOTDIR;
goto out;
}
- if (vp == cwd) {
+ if (ip == cwd) {
error = EINVAL;
goto out;
}
- vnevent_rmdir(vp, dvp, name, ct);
-
/*
* Grab a lock on the directory to make sure that noone is
* trying to add (or lookup) entries while we are removing it.
@@ -2044,10 +1811,10 @@ top:
*/
rw_enter(&zp->z_parent_lock, RW_WRITER);
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
- dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
+ dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
@@ -2055,14 +1822,14 @@ top:
rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock);
zfs_dirent_unlock(dl);
- VN_RELE(vp);
+ iput(ip);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
@@ -2082,38 +1849,32 @@ top:
out:
zfs_dirent_unlock(dl);
- VN_RELE(vp);
+ iput(ip);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_inode_update(dzp);
zfs_inode_update(zp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_rmdir);
/*
* Read as many directory entries as will fit into the provided
- * buffer from the given directory cursor position (specified in
- * the uio structure.
+ * dirent buffer from the given directory cursor position.
*
- * IN: vp - vnode of directory to read.
- * uio - structure supplying read location, range info,
- * and return buffer.
- * cr - credentials of caller.
- * ct - caller context
- * flags - case flags
+ * IN: ip - inode of directory to read.
+ * dirent - buffer for directory entries.
*
- * OUT: uio - updated offset and range, buffer filled.
- * eofp - set to true if end-of-file detected.
+ * OUT: dirent - filler buffer of directory entries.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * vp - atime updated
+ * ip - atime updated
*
* Note that the low 4 bits of the cookie returned by zap is always zero.
* This allows us to use the low range for "special" directory entries:
@@ -2121,70 +1882,42 @@ EXPORT_SYMBOL(zfs_rmdir);
* we use the offset 2 for the '.zfs' directory.
*/
/* ARGSUSED */
-static int
-zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
- caller_context_t *ct, int flags)
+int
+zfs_readdir(struct inode *ip, void *dirent, filldir_t filldir,
+ loff_t *pos, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- iovec_t *iovp;
- edirent_t *eodp;
- dirent64_t *odp;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
objset_t *os;
- caddr_t outbuf;
- size_t bufsize;
zap_cursor_t zc;
zap_attribute_t zap;
- uint_t bytes_wanted;
- uint64_t offset; /* must be unsigned; checks for < 1 */
- uint64_t parent;
- int local_eof;
int outcount;
int error;
uint8_t prefetch;
- boolean_t check_sysattrs;
+ int done = 0;
+ uint64_t parent;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
- &parent, sizeof (parent))) != 0) {
- ZFS_EXIT(zfsvfs);
- return (error);
- }
-
- /*
- * If we are not given an eof variable,
- * use a local one.
- */
- if (eofp == NULL)
- eofp = &local_eof;
-
- /*
- * Check for valid iov_len.
- */
- if (uio->uio_iov->iov_len <= 0) {
- ZFS_EXIT(zfsvfs);
- return (EINVAL);
- }
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zsb),
+ &parent, sizeof (parent))) != 0)
+ goto out;
/*
* Quit if directory has been removed (posix)
*/
- if ((*eofp = zp->z_unlinked) != 0) {
- ZFS_EXIT(zfsvfs);
- return (0);
- }
-
error = 0;
- os = zfsvfs->z_os;
- offset = uio->uio_loffset;
+ if (zp->z_unlinked)
+ goto out;
+
+ os = zsb->z_os;
prefetch = zp->z_zn_prefetch;
/*
* Initialize the iterator cursor.
*/
- if (offset <= 3) {
+ if (*pos <= 3) {
/*
* Start iteration from the beginning of the directory.
*/
@@ -2193,55 +1926,28 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
/*
* The offset is a serialized cursor.
*/
- zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
+ zap_cursor_init_serialized(&zc, os, zp->z_id, *pos);
}
/*
- * Get space to change directory entries into fs independent format.
- */
- iovp = uio->uio_iov;
- bytes_wanted = iovp->iov_len;
- if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
- bufsize = bytes_wanted;
- outbuf = kmem_alloc(bufsize, KM_SLEEP);
- odp = (struct dirent64 *)outbuf;
- } else {
- bufsize = bytes_wanted;
- odp = (struct dirent64 *)iovp->iov_base;
- }
- eodp = (struct edirent *)odp;
-
- /*
- * If this VFS supports the system attribute view interface; and
- * we're looking at an extended attribute directory; and we care
- * about normalization conflicts on this vfs; then we must check
- * for normalization conflicts with the sysattr name space.
- */
- check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
- (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
- (flags & V_RDDIR_ENTFLAGS);
-
- /*
* Transform to file-system independent format
*/
outcount = 0;
- while (outcount < bytes_wanted) {
- ino64_t objnum;
- ushort_t reclen;
- off64_t *next = NULL;
+ while (!done) {
+ uint64_t objnum;
/*
* Special case `.', `..', and `.zfs'.
*/
- if (offset == 0) {
+ if (*pos == 0) {
(void) strcpy(zap.za_name, ".");
zap.za_normalization_conflict = 0;
objnum = zp->z_id;
- } else if (offset == 1) {
+ } else if (*pos == 1) {
(void) strcpy(zap.za_name, "..");
zap.za_normalization_conflict = 0;
objnum = parent;
- } else if (offset == 2 && zfs_show_ctldir(zp)) {
+ } else if (*pos == 2 && zfs_show_ctldir(zp)) {
(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
zap.za_normalization_conflict = 0;
objnum = ZFSCTL_INO_ROOT;
@@ -2249,8 +1955,8 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
/*
* Grab next entry.
*/
- if (error = zap_cursor_retrieve(&zc, &zap)) {
- if ((*eofp = (error == ENOENT)) != 0)
+ if ((error = zap_cursor_retrieve(&zc, &zap))) {
+ if (error == ENOENT)
break;
else
goto update;
@@ -2261,155 +1967,63 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
cmn_err(CE_WARN, "zap_readdir: bad directory "
"entry, obj = %lld, offset = %lld\n",
(u_longlong_t)zp->z_id,
- (u_longlong_t)offset);
+ (u_longlong_t)*pos);
error = ENXIO;
goto update;
}
objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
- /*
- * MacOS X can extract the object type here such as:
- * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
- */
-
- if (check_sysattrs && !zap.za_normalization_conflict) {
- zap.za_normalization_conflict =
- xattr_sysattr_casechk(zap.za_name);
- }
- }
-
- if (flags & V_RDDIR_ACCFILTER) {
- /*
- * If we have no access at all, don't include
- * this entry in the returned information
- */
- znode_t *ezp;
- if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
- goto skip_entry;
- if (!zfs_has_access(ezp, cr)) {
- VN_RELE(ZTOV(ezp));
- goto skip_entry;
- }
- VN_RELE(ZTOV(ezp));
}
-
- if (flags & V_RDDIR_ENTFLAGS)
- reclen = EDIRENT_RECLEN(strlen(zap.za_name));
- else
- reclen = DIRENT64_RECLEN(strlen(zap.za_name));
-
- /*
- * Will this entry fit in the buffer?
- */
- if (outcount + reclen > bufsize) {
- /*
- * Did we manage to fit anything in the buffer?
- */
- if (!outcount) {
- error = EINVAL;
- goto update;
- }
+ done = filldir(dirent, zap.za_name, strlen(zap.za_name),
+ zap_cursor_serialize(&zc), objnum, 0);
+ if (done) {
break;
}
- if (flags & V_RDDIR_ENTFLAGS) {
- /*
- * Add extended flag entry:
- */
- eodp->ed_ino = objnum;
- eodp->ed_reclen = reclen;
- /* NOTE: ed_off is the offset for the *next* entry */
- next = &(eodp->ed_off);
- eodp->ed_eflags = zap.za_normalization_conflict ?
- ED_CASE_CONFLICT : 0;
- (void) strncpy(eodp->ed_name, zap.za_name,
- EDIRENT_NAMELEN(reclen));
- eodp = (edirent_t *)((intptr_t)eodp + reclen);
- } else {
- /*
- * Add normal entry:
- */
- odp->d_ino = objnum;
- odp->d_reclen = reclen;
- /* NOTE: d_off is the offset for the *next* entry */
- next = &(odp->d_off);
- (void) strncpy(odp->d_name, zap.za_name,
- DIRENT64_NAMELEN(reclen));
- odp = (dirent64_t *)((intptr_t)odp + reclen);
- }
- outcount += reclen;
-
- ASSERT(outcount <= bufsize);
/* Prefetch znode */
- if (prefetch)
+ if (prefetch) {
dmu_prefetch(os, objnum, 0, 0);
+ }
- skip_entry:
- /*
- * Move to the next entry, fill in the previous offset.
- */
- if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
+ if (*pos >= 2) {
zap_cursor_advance(&zc);
- offset = zap_cursor_serialize(&zc);
+ *pos = zap_cursor_serialize(&zc);
} else {
- offset += 1;
+ (*pos)++;
}
- if (next)
- *next = offset;
}
zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
- if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
- iovp->iov_base += outcount;
- iovp->iov_len -= outcount;
- uio->uio_resid -= outcount;
- } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
- /*
- * Reset the pointer.
- */
- offset = uio->uio_loffset;
- }
-
update:
zap_cursor_fini(&zc);
- if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
- kmem_free(outbuf, bufsize);
-
if (error == ENOENT)
error = 0;
- ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
+ ZFS_ACCESSTIME_STAMP(zsb, zp);
+ zfs_inode_update(zp);
+
+out:
+ ZFS_EXIT(zsb);
- uio->uio_loffset = offset;
- ZFS_EXIT(zfsvfs);
return (error);
}
+EXPORT_SYMBOL(zfs_readdir);
ulong_t zfs_fsync_sync_cnt = 4;
int
-zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
+zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- /*
- * Regardless of whether this is required for standards conformance,
- * this is the logical behavior when fsync() is called on a file with
- * dirty pages. We use B_ASYNC since the ZIL transactions are already
- * going to be pushed out as part of the zil_commit().
- */
- if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) &&
- (vp->v_type == VREG) && !(IS_SWAPVP(vp)))
- (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct);
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
- if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
- ZFS_ENTER(zfsvfs);
+ if (zsb->z_os->os_sync != ZFS_SYNC_DISABLED) {
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- zil_commit(zfsvfs->z_log, zp->z_id);
- ZFS_EXIT(zfsvfs);
+ zil_commit(zsb->z_log, zp->z_id);
+ ZFS_EXIT(zsb);
}
return (0);
}
@@ -2420,43 +2034,37 @@ EXPORT_SYMBOL(zfs_fsync);
* Get the requested file attributes and place them in the provided
* vattr structure.
*
- * IN: vp - vnode of file.
- * vap - va_mask identifies requested attributes.
- * If AT_XVATTR set, then optional attrs are requested
+ * IN: ip - inode of file.
+ * stat - kstat structure to fill in.
* flags - ATTR_NOACLCHECK (CIFS server context)
* cr - credentials of caller.
- * ct - caller context
- *
- * OUT: vap - attribute values.
*
- * RETURN: 0 (always succeeds)
+ * OUT: stat - filled in kstat values.
*/
/* ARGSUSED */
int
-zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
- caller_context_t *ct)
+zfs_getattr(struct inode *ip, struct kstat *stat, int flags, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
int error = 0;
uint64_t links;
uint64_t mtime[2], ctime[2];
- xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
- xoptattr_t *xoap = NULL;
+ uint32_t blksz;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
sa_bulk_attr_t bulk[2];
int count = 0;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
+ zfs_fuid_map_ids(zp, cr, &stat->uid, &stat->gid);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
@@ -2466,10 +2074,10 @@ zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
* always be allowed to read basic attributes of file.
*/
if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
- (vap->va_uid != crgetuid(cr))) {
+ (stat->uid != crgetuid(cr))) {
if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
skipaclchk, cr))) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
}
@@ -2480,142 +2088,36 @@ zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
*/
mutex_enter(&zp->z_lock);
- vap->va_type = vp->v_type;
- vap->va_mode = zp->z_mode & MODEMASK;
- vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
- vap->va_nodeid = zp->z_id;
- if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp))
+ stat->ino = ip->i_ino;
+ stat->mode = zp->z_mode;
+ stat->uid = zp->z_uid;
+ stat->gid = zp->z_gid;
+ if ((zp->z_id == zsb->z_root) && zfs_show_ctldir(zp))
links = zp->z_links + 1;
else
links = zp->z_links;
- vap->va_nlink = MIN(links, UINT32_MAX); /* nlink_t limit! */
- vap->va_size = zp->z_size;
- vap->va_rdev = vp->v_rdev;
- vap->va_seq = zp->z_seq;
+ stat->nlink = MIN(links, ZFS_LINK_MAX);
+ stat->size = i_size_read(ip);
+ stat->rdev = ip->i_rdev;
+ stat->dev = ip->i_rdev;
- /*
- * Add in any requested optional attributes and the create time.
- * Also set the corresponding bits in the returned attribute bitmap.
- */
- if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
- if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
- xoap->xoa_archive =
- ((zp->z_pflags & ZFS_ARCHIVE) != 0);
- XVA_SET_RTN(xvap, XAT_ARCHIVE);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
- xoap->xoa_readonly =
- ((zp->z_pflags & ZFS_READONLY) != 0);
- XVA_SET_RTN(xvap, XAT_READONLY);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
- xoap->xoa_system =
- ((zp->z_pflags & ZFS_SYSTEM) != 0);
- XVA_SET_RTN(xvap, XAT_SYSTEM);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
- xoap->xoa_hidden =
- ((zp->z_pflags & ZFS_HIDDEN) != 0);
- XVA_SET_RTN(xvap, XAT_HIDDEN);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
- xoap->xoa_nounlink =
- ((zp->z_pflags & ZFS_NOUNLINK) != 0);
- XVA_SET_RTN(xvap, XAT_NOUNLINK);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
- xoap->xoa_immutable =
- ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
- XVA_SET_RTN(xvap, XAT_IMMUTABLE);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
- xoap->xoa_appendonly =
- ((zp->z_pflags & ZFS_APPENDONLY) != 0);
- XVA_SET_RTN(xvap, XAT_APPENDONLY);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
- xoap->xoa_nodump =
- ((zp->z_pflags & ZFS_NODUMP) != 0);
- XVA_SET_RTN(xvap, XAT_NODUMP);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
- xoap->xoa_opaque =
- ((zp->z_pflags & ZFS_OPAQUE) != 0);
- XVA_SET_RTN(xvap, XAT_OPAQUE);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
- xoap->xoa_av_quarantined =
- ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
- XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
- xoap->xoa_av_modified =
- ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
- XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
- vp->v_type == VREG) {
- zfs_sa_get_scanstamp(zp, xvap);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
- uint64_t times[2];
-
- (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
- times, sizeof (times));
- ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
- XVA_SET_RTN(xvap, XAT_CREATETIME);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
- xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
- XVA_SET_RTN(xvap, XAT_REPARSE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
- xoap->xoa_generation = zp->z_gen;
- XVA_SET_RTN(xvap, XAT_GEN);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
- xoap->xoa_offline =
- ((zp->z_pflags & ZFS_OFFLINE) != 0);
- XVA_SET_RTN(xvap, XAT_OFFLINE);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
- xoap->xoa_sparse =
- ((zp->z_pflags & ZFS_SPARSE) != 0);
- XVA_SET_RTN(xvap, XAT_SPARSE);
- }
- }
-
- ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
- ZFS_TIME_DECODE(&vap->va_mtime, mtime);
- ZFS_TIME_DECODE(&vap->va_ctime, ctime);
+ ZFS_TIME_DECODE(&stat->atime, zp->z_atime);
+ ZFS_TIME_DECODE(&stat->mtime, mtime);
+ ZFS_TIME_DECODE(&stat->ctime, ctime);
mutex_exit(&zp->z_lock);
- sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
+ sa_object_size(zp->z_sa_hdl, &blksz, &stat->blocks);
+ stat->blksize = (1 << ip->i_blkbits);
if (zp->z_blksz == 0) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
- vap->va_blksize = zfsvfs->z_max_blksz;
+ stat->blksize = zsb->z_max_blksz;
}
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
EXPORT_SYMBOL(zfs_getattr);
@@ -2624,32 +2126,29 @@ EXPORT_SYMBOL(zfs_getattr);
* Set the file attributes to the values contained in the
* vattr structure.
*
- * IN: vp - vnode of file to be modified.
+ * IN: ip - inode of file to be modified.
* vap - new attribute values.
* If AT_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
- * ct - caller context
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * vp - ctime updated, mtime updated if size changed.
+ * ip - ctime updated, mtime updated if size changed.
*/
/* ARGSUSED */
int
-zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
- caller_context_t *ct)
+zfs_setattr(struct inode *ip, struct iattr *attr, int flags, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
- xvattr_t tmpxvattr;
- uint_t mask = vap->va_mask;
+ uint_t mask = attr->ia_valid;
uint_t saved_mask;
int trim_mask = 0;
uint64_t new_mode;
@@ -2660,10 +2159,8 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
int need_policy = FALSE;
int err, err2;
zfs_fuid_info_t *fuidp = NULL;
- xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
- xoptattr_t *xoap;
- zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
+ zfs_acl_t *aclp = NULL;
boolean_t fuid_dirtied = B_FALSE;
sa_bulk_attr_t bulk[7], xattr_bulk[7];
int count = 0, xattr_count = 0;
@@ -2671,81 +2168,44 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
if (mask == 0)
return (0);
- if (mask & AT_NOSET)
- return (EINVAL);
-
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- zilog = zfsvfs->z_log;
+ zilog = zsb->z_log;
/*
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
-
- if (zfsvfs->z_use_fuids == B_FALSE &&
- (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
- ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
- (mask & AT_XVATTR))) {
- ZFS_EXIT(zfsvfs);
+ if (zsb->z_use_fuids == B_FALSE &&
+ (((mask & ATTR_UID) && IS_EPHEMERAL(attr->ia_uid)) ||
+ ((mask & ATTR_GID) && IS_EPHEMERAL(attr->ia_gid)))) {
+ ZFS_EXIT(zsb);
return (EINVAL);
}
- if (mask & AT_SIZE && vp->v_type == VDIR) {
- ZFS_EXIT(zfsvfs);
+ if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
+ ZFS_EXIT(zsb);
return (EISDIR);
}
- if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
- ZFS_EXIT(zfsvfs);
+ if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
+ ZFS_EXIT(zsb);
return (EINVAL);
}
- /*
- * If this is an xvattr_t, then get a pointer to the structure of
- * optional attributes. If this is NULL, then we have a vattr_t.
- */
- xoap = xva_getxoptattr(xvap);
-
- xva_init(&tmpxvattr);
-
- /*
- * Immutable files can only alter immutable bit and atime
- */
- if ((zp->z_pflags & ZFS_IMMUTABLE) &&
- ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
- ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
- ZFS_EXIT(zfsvfs);
- return (EPERM);
- }
-
- if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
- ZFS_EXIT(zfsvfs);
+ if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
+ ZFS_EXIT(zsb);
return (EPERM);
}
- /*
- * Verify timestamps doesn't overflow 32 bits.
- * ZFS can handle large timestamps, but 32bit syscalls can't
- * handle times greater than 2039. This check should be removed
- * once large timestamps are fully supported.
- */
- if (mask & (AT_ATIME | AT_MTIME)) {
- if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
- ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
- ZFS_EXIT(zfsvfs);
- return (EOVERFLOW);
- }
- }
-
top:
attrzp = NULL;
aclp = NULL;
/* Can this be moved to before the top label? */
- if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
- ZFS_EXIT(zfsvfs);
+ if (zsb->z_vfs->mnt_flags & MNT_READONLY) {
+ ZFS_EXIT(zsb);
return (EROFS);
}
@@ -2753,10 +2213,10 @@ top:
* First validate permissions
*/
- if (mask & AT_SIZE) {
+ if (mask & ATTR_SIZE) {
err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
if (err) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (err);
}
/*
@@ -2766,27 +2226,22 @@ top:
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
- err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
+ err = zfs_freesp(zp, attr->ia_size, 0, 0, FALSE);
if (err) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (err);
}
- }
- if (mask & (AT_ATIME|AT_MTIME) ||
- ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
- XVA_ISSET_REQ(xvap, XAT_READONLY) ||
- XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
- XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
- XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
- XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
- XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
- need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
- skipaclchk, cr);
+ /* Careful negative Linux return code here */
+ err = -vmtruncate(ip, attr->ia_size);
+ if (err) {
+ ZFS_EXIT(zsb);
+ return (err);
+ }
}
- if (mask & (AT_UID|AT_GID)) {
- int idmask = (mask & (AT_UID|AT_GID));
+ if (mask & (ATTR_UID|ATTR_GID)) {
+ int idmask = (mask & (ATTR_UID|ATTR_GID));
int take_owner;
int take_group;
@@ -2795,16 +2250,17 @@ top:
* we may clear S_ISUID/S_ISGID bits.
*/
- if (!(mask & AT_MODE))
- vap->va_mode = zp->z_mode;
+ if (!(mask & ATTR_MODE))
+ attr->ia_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
- take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
- take_group = (mask & AT_GID) &&
- zfs_groupmember(zfsvfs, vap->va_gid, cr);
+ take_owner = (mask & ATTR_UID) &&
+ (attr->ia_uid == crgetuid(cr));
+ take_group = (mask & ATTR_GID) &&
+ zfs_groupmember(zsb, attr->ia_gid, cr);
/*
* If both AT_UID and AT_GID are set then take_owner and
@@ -2815,16 +2271,17 @@ top:
*
*/
- if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
- ((idmask == AT_UID) && take_owner) ||
- ((idmask == AT_GID) && take_group)) {
+ if (((idmask == (ATTR_UID|ATTR_GID)) &&
+ take_owner && take_group) ||
+ ((idmask == ATTR_UID) && take_owner) ||
+ ((idmask == ATTR_GID) && take_group)) {
if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
skipaclchk, cr) == 0) {
/*
* Remove setuid/setgid for non-privileged users
*/
- secpolicy_setid_clear(vap, cr);
- trim_mask = (mask & (AT_UID|AT_GID));
+ secpolicy_setid_clear(attr, cr);
+ trim_mask = (mask & (ATTR_UID|ATTR_GID));
} else {
need_policy = TRUE;
}
@@ -2836,100 +2293,18 @@ top:
mutex_enter(&zp->z_lock);
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
- if (mask & AT_XVATTR) {
- /*
- * Update xvattr mask to include only those attributes
- * that are actually changing.
- *
- * the bits will be restored prior to actually setting
- * the attributes so the caller thinks they were set.
- */
- if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
- if (xoap->xoa_appendonly !=
- ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
- need_policy = TRUE;
- } else {
- XVA_CLR_REQ(xvap, XAT_APPENDONLY);
- XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
- }
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
- if (xoap->xoa_nounlink !=
- ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
- need_policy = TRUE;
- } else {
- XVA_CLR_REQ(xvap, XAT_NOUNLINK);
- XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
- }
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
- if (xoap->xoa_immutable !=
- ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
- need_policy = TRUE;
- } else {
- XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
- XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
- }
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
- if (xoap->xoa_nodump !=
- ((zp->z_pflags & ZFS_NODUMP) != 0)) {
- need_policy = TRUE;
- } else {
- XVA_CLR_REQ(xvap, XAT_NODUMP);
- XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
- }
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
- if (xoap->xoa_av_modified !=
- ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
- need_policy = TRUE;
- } else {
- XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
- XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
- }
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
- if ((vp->v_type != VREG &&
- xoap->xoa_av_quarantined) ||
- xoap->xoa_av_quarantined !=
- ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
- need_policy = TRUE;
- } else {
- XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
- XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
- }
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
- mutex_exit(&zp->z_lock);
- ZFS_EXIT(zfsvfs);
- return (EPERM);
- }
-
- if (need_policy == FALSE &&
- (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
- XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
- need_policy = TRUE;
- }
- }
mutex_exit(&zp->z_lock);
- if (mask & AT_MODE) {
+ if (mask & ATTR_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
- err = secpolicy_setid_setsticky_clear(vp, vap,
+ err = secpolicy_setid_setsticky_clear(ip, attr,
&oldva, cr);
if (err) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (err);
}
- trim_mask |= AT_MODE;
+ trim_mask |= ATTR_MODE;
} else {
need_policy = TRUE;
}
@@ -2945,65 +2320,65 @@ top:
*/
if (trim_mask) {
- saved_mask = vap->va_mask;
- vap->va_mask &= ~trim_mask;
+ saved_mask = attr->ia_valid;
+ attr->ia_valid &= ~trim_mask;
}
- err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
+ err = secpolicy_vnode_setattr(cr, ip, attr, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
if (err) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (err);
}
if (trim_mask)
- vap->va_mask |= saved_mask;
+ attr->ia_valid |= saved_mask;
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
- mask = vap->va_mask;
+ mask = attr->ia_valid;
- if ((mask & (AT_UID | AT_GID))) {
- err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
+ if ((mask & (ATTR_UID | ATTR_GID))) {
+ err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
&xattr_obj, sizeof (xattr_obj));
if (err == 0 && xattr_obj) {
- err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
+ err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
if (err)
goto out2;
}
- if (mask & AT_UID) {
- new_uid = zfs_fuid_create(zfsvfs,
- (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
+ if (mask & ATTR_UID) {
+ new_uid = zfs_fuid_create(zsb,
+ (uint64_t)attr->ia_uid, cr, ZFS_OWNER, &fuidp);
if (new_uid != zp->z_uid &&
- zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
+ zfs_fuid_overquota(zsb, B_FALSE, new_uid)) {
if (attrzp)
- VN_RELE(ZTOV(attrzp));
+ iput(ZTOI(attrzp));
err = EDQUOT;
goto out2;
}
}
- if (mask & AT_GID) {
- new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
+ if (mask & ATTR_GID) {
+ new_gid = zfs_fuid_create(zsb, (uint64_t)attr->ia_gid,
cr, ZFS_GROUP, &fuidp);
if (new_gid != zp->z_gid &&
- zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
+ zfs_fuid_overquota(zsb, B_TRUE, new_gid)) {
if (attrzp)
- VN_RELE(ZTOV(attrzp));
+ iput(ZTOI(attrzp));
err = EDQUOT;
goto out2;
}
}
}
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
- if (mask & AT_MODE) {
+ if (mask & ATTR_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
- new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
+ new_mode = (pmode & S_IFMT) | (attr->ia_mode & ~S_IFMT);
zfs_acl_chmod_setattr(zp, &aclp, new_mode);
@@ -3013,7 +2388,7 @@ top:
* Are we upgrading ACL from old V0 format
* to V1 format?
*/
- if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
+ if (zsb->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) ==
ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
@@ -3031,20 +2406,16 @@ top:
mutex_exit(&zp->z_lock);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
- if ((mask & AT_XVATTR) &&
- XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
- dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
- else
- dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
+ dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
}
- fuid_dirtied = zfsvfs->z_fuid_dirty;
+ fuid_dirtied = zsb->z_fuid_dirty;
if (fuid_dirtied)
- zfs_fuid_txhold(zfsvfs, tx);
+ zfs_fuid_txhold(zsb, tx);
zfs_sa_upgrade_txholds(tx, zp);
@@ -3065,49 +2436,49 @@ top:
*/
- if (mask & (AT_UID|AT_GID|AT_MODE))
+ if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (attrzp) {
- if (mask & (AT_UID|AT_GID|AT_MODE))
+ if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&attrzp->z_acl_lock);
mutex_enter(&attrzp->z_lock);
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
- SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
+ SA_ZPL_FLAGS(zsb), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
}
- if (mask & (AT_UID|AT_GID)) {
+ if (mask & (ATTR_UID|ATTR_GID)) {
- if (mask & AT_UID) {
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
+ if (mask & ATTR_UID) {
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
&new_uid, sizeof (new_uid));
zp->z_uid = new_uid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
- SA_ZPL_UID(zfsvfs), NULL, &new_uid,
+ SA_ZPL_UID(zsb), NULL, &new_uid,
sizeof (new_uid));
attrzp->z_uid = new_uid;
}
}
- if (mask & AT_GID) {
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
+ if (mask & ATTR_GID) {
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb),
NULL, &new_gid, sizeof (new_gid));
zp->z_gid = new_gid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
- SA_ZPL_GID(zfsvfs), NULL, &new_gid,
+ SA_ZPL_GID(zsb), NULL, &new_gid,
sizeof (new_gid));
attrzp->z_gid = new_gid;
}
}
- if (!(mask & AT_MODE)) {
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
+ if (!(mask & ATTR_MODE)) {
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb),
NULL, &new_mode, sizeof (new_mode));
new_mode = zp->z_mode;
}
@@ -3119,8 +2490,8 @@ top:
}
}
- if (mask & AT_MODE) {
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ if (mask & ATTR_MODE) {
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = new_mode;
ASSERT3U((uintptr_t)aclp, !=, NULL);
@@ -3133,34 +2504,34 @@ top:
}
- if (mask & AT_ATIME) {
- ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ if (mask & ATTR_ATIME) {
+ ZFS_TIME_ENCODE(&attr->ia_atime, zp->z_atime);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
&zp->z_atime, sizeof (zp->z_atime));
}
- if (mask & AT_MTIME) {
- ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
+ if (mask & ATTR_MTIME) {
+ ZFS_TIME_ENCODE(&attr->ia_mtime, mtime);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL,
mtime, sizeof (mtime));
}
/* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
- if (mask & AT_SIZE && !(mask & AT_MTIME)) {
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
+ if (mask & ATTR_SIZE && !(mask & ATTR_MTIME)) {
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb),
NULL, mtime, sizeof (mtime));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
B_TRUE);
} else if (mask != 0) {
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
B_TRUE);
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
- SA_ZPL_CTIME(zfsvfs), NULL,
+ SA_ZPL_CTIME(zsb), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
mtime, ctime, B_TRUE);
@@ -3171,50 +2542,18 @@ top:
* update from toggling bit
*/
- if (xoap && (mask & AT_XVATTR)) {
-
- /*
- * restore trimmed off masks
- * so that return masks can be set for caller.
- */
-
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
- XVA_SET_REQ(xvap, XAT_APPENDONLY);
- }
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
- XVA_SET_REQ(xvap, XAT_NOUNLINK);
- }
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
- XVA_SET_REQ(xvap, XAT_IMMUTABLE);
- }
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
- XVA_SET_REQ(xvap, XAT_NODUMP);
- }
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
- XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
- }
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
- XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
- }
-
- if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
- ASSERT(vp->v_type == VREG);
-
- zfs_xvattr_set(zp, xvap, tx);
- }
-
if (fuid_dirtied)
- zfs_fuid_sync(zfsvfs, tx);
+ zfs_fuid_sync(zsb, tx);
if (mask != 0)
- zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
+ zfs_log_setattr(zilog, tx, TX_SETATTR, zp, attr, mask, fuidp);
mutex_exit(&zp->z_lock);
- if (mask & (AT_UID|AT_GID|AT_MODE))
+ if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&zp->z_acl_lock);
if (attrzp) {
- if (mask & (AT_UID|AT_GID|AT_MODE))
+ if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&attrzp->z_acl_lock);
mutex_exit(&attrzp->z_lock);
}
@@ -3226,7 +2565,7 @@ out:
}
if (attrzp)
- VN_RELE(ZTOV(attrzp));
+ iput(ZTOI(attrzp));
if (aclp)
zfs_acl_free(aclp);
@@ -3246,10 +2585,10 @@ out:
}
out2:
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (err);
}
EXPORT_SYMBOL(zfs_setattr);
@@ -3270,7 +2609,7 @@ zfs_rename_unlock(zfs_zlock_t **zlpp)
while ((zl = *zlpp) != NULL) {
if (zl->zl_znode != NULL)
- VN_RELE(ZTOV(zl->zl_znode));
+ iput(ZTOI(zl->zl_znode));
rw_exit(zl->zl_rwlock);
*zlpp = zl->zl_next;
kmem_free(zl, sizeof (*zl));
@@ -3288,7 +2627,7 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
{
zfs_zlock_t *zl;
znode_t *zp = tdzp;
- uint64_t rootid = zp->z_zfsvfs->z_root;
+ uint64_t rootid = ZTOZSB(zp)->z_root;
uint64_t oidp = zp->z_id;
krwlock_t *rwlp = &szp->z_parent_lock;
krw_t rw = RW_WRITER;
@@ -3336,12 +2675,12 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
return (0);
if (rw == RW_READER) { /* i.e. not the first pass */
- int error = zfs_zget(zp->z_zfsvfs, oidp, &zp);
+ int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
if (error)
return (error);
zl->zl_znode = zp;
}
- (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs),
+ (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
&oidp, sizeof (oidp));
rwlp = &zp->z_parent_lock;
rw = RW_READER;
@@ -3355,30 +2694,28 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
* Move an entry from the provided source directory to the target
* directory. Change the entry name as indicated.
*
- * IN: sdvp - Source directory containing the "old entry".
+ * IN: sdip - Source directory containing the "old entry".
* snm - Old entry name.
- * tdvp - Target directory to contain the "new entry".
+ * tdip - Target directory to contain the "new entry".
* tnm - New entry name.
* cr - credentials of caller.
- * ct - caller context
* flags - case flags
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * sdvp,tdvp - ctime|mtime updated
+ * sdip,tdip - ctime|mtime updated
*/
/*ARGSUSED*/
int
-zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
- caller_context_t *ct, int flags)
+zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
+ cred_t *cr, int flags)
{
znode_t *tdzp, *szp, *tzp;
- znode_t *sdzp = VTOZ(sdvp);
- zfsvfs_t *zfsvfs = sdzp->z_zfsvfs;
+ znode_t *sdzp = ITOZ(sdip);
+ zfs_sb_t *zsb = ITOZSB(sdip);
zilog_t *zilog;
- vnode_t *realvp;
zfs_dirlock_t *sdl, *tdl;
dmu_tx_t *tx;
zfs_zlock_t *zl;
@@ -3386,26 +2723,20 @@ zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
int error = 0;
int zflg = 0;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(sdzp);
- zilog = zfsvfs->z_log;
+ zilog = zsb->z_log;
- /*
- * Make sure we have the real vp for the target directory.
- */
- if (VOP_REALVP(tdvp, &realvp, ct) == 0)
- tdvp = realvp;
-
- if (tdvp->v_vfsp != sdvp->v_vfsp || zfsctl_is_node(tdvp)) {
- ZFS_EXIT(zfsvfs);
+ if (tdip->i_sb != sdip->i_sb) {
+ ZFS_EXIT(zsb);
return (EXDEV);
}
- tdzp = VTOZ(tdvp);
+ tdzp = ITOZ(tdip);
ZFS_VERIFY_ZP(tdzp);
- if (zfsvfs->z_utf8 && u8_validate(tnm,
+ if (zsb->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EILSEQ);
}
@@ -3423,7 +2754,7 @@ top:
* See the comment in zfs_link() for why this is considered bad.
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
@@ -3442,10 +2773,10 @@ top:
* First compare the two name arguments without
* considering any case folding.
*/
- int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
+ int nofold = (zsb->z_norm & ~U8_TEXTPREP_TOUPPER);
cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
- ASSERT(error == 0 || !zfsvfs->z_utf8);
+ ASSERT(error == 0 || !zsb->z_utf8);
if (cmp == 0) {
/*
* POSIX: "If the old argument and the new argument
@@ -3453,7 +2784,7 @@ top:
* the rename() function shall return successfully
* and perform no other action."
*/
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
/*
@@ -3474,10 +2805,10 @@ top:
* is an exact match, we will allow this to proceed as
* a name-change request.
*/
- if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
- (zfsvfs->z_case == ZFS_CASE_MIXED &&
+ if ((zsb->z_case == ZFS_CASE_INSENSITIVE ||
+ (zsb->z_case == ZFS_CASE_MIXED &&
flags & FIGNORECASE)) &&
- u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
+ u8_strcmp(snm, tnm, 0, zsb->z_norm, U8_UNICODE_LATEST,
&error) == 0) {
/*
* case preserving rename request, require exact
@@ -3517,7 +2848,7 @@ top:
if (!terr) {
zfs_dirent_unlock(tdl);
if (tzp)
- VN_RELE(ZTOV(tzp));
+ iput(ZTOI(tzp));
}
if (sdzp == tdzp)
@@ -3525,19 +2856,19 @@ top:
if (strcmp(snm, "..") == 0)
serr = EINVAL;
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (serr);
}
if (terr) {
zfs_dirent_unlock(sdl);
- VN_RELE(ZTOV(szp));
+ iput(ZTOI(szp));
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (strcmp(tnm, "..") == 0)
terr = EINVAL;
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (terr);
}
@@ -3551,7 +2882,7 @@ top:
if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
goto out;
- if (ZTOV(szp)->v_type == VDIR) {
+ if (S_ISDIR(ZTOI(szp)->i_mode)) {
/*
* Check to make sure rename is valid.
* Can't do a move like this: /usr/a/b to /usr/a/b/c/d
@@ -3567,13 +2898,13 @@ top:
/*
* Source and target must be the same type.
*/
- if (ZTOV(szp)->v_type == VDIR) {
- if (ZTOV(tzp)->v_type != VDIR) {
+ if (S_ISDIR(ZTOI(szp)->i_mode)) {
+ if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
error = ENOTDIR;
goto out;
}
} else {
- if (ZTOV(tzp)->v_type == VDIR) {
+ if (S_ISDIR(ZTOI(tzp)->i_mode)) {
error = EISDIR;
goto out;
}
@@ -3589,19 +2920,7 @@ top:
}
}
- vnevent_rename_src(ZTOV(szp), sdvp, snm, ct);
- if (tzp)
- vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct);
-
- /*
- * notify the target directory if it is not the same
- * as source directory.
- */
- if (tdvp != sdvp) {
- vnevent_rename_dest_dir(tdvp, ct);
- }
-
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
@@ -3616,7 +2935,7 @@ top:
}
zfs_sa_upgrade_txholds(tx, szp);
- dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
+ dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
if (zl != NULL)
@@ -3627,16 +2946,16 @@ top:
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
- VN_RELE(ZTOV(szp));
+ iput(ZTOI(szp));
if (tzp)
- VN_RELE(ZTOV(tzp));
+ iput(ZTOI(tzp));
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
@@ -3648,7 +2967,7 @@ top:
if (error == 0) {
szp->z_pflags |= ZFS_AV_MODIFIED;
- error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
+ error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zsb),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
ASSERT3U(error, ==, 0);
@@ -3657,12 +2976,6 @@ top:
zfs_log_rename(zilog, tx, TX_RENAME |
(flags & FIGNORECASE ? TX_CI : 0), sdzp,
sdl->dl_name, tdzp, tdl->dl_name, szp);
-
- /*
- * Update path information for the target vnode
- */
- vn_renamepath(tdvp, ZTOV(szp), tnm,
- strlen(tnm));
} else {
/*
* At this point, we have successfully created
@@ -3698,16 +3011,16 @@ out:
zfs_inode_update(tdzp);
zfs_inode_update(szp);
- VN_RELE(ZTOV(szp));
+ iput(ZTOI(szp));
if (tzp) {
zfs_inode_update(tzp);
- VN_RELE(ZTOV(tzp));
+ iput(ZTOI(tzp));
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_rename);
@@ -3715,29 +3028,29 @@ EXPORT_SYMBOL(zfs_rename);
/*
* Insert the indicated symbolic reference entry into the directory.
*
- * IN: dvp - Directory to contain new symbolic link.
+ * IN: dip - Directory to contain new symbolic link.
* link - Name for new symlink entry.
* vap - Attributes of new entry.
* target - Target path of new symlink.
+ *
* cr - credentials of caller.
- * ct - caller context
* flags - case flags
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * dvp - ctime|mtime updated
+ * dip - ctime|mtime updated
*/
/*ARGSUSED*/
int
-zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr,
- caller_context_t *ct, int flags)
+zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
+ struct inode **ipp, cred_t *cr, int flags)
{
- znode_t *zp, *dzp = VTOZ(dvp);
+ znode_t *zp, *dzp = ITOZ(dip);
zfs_dirlock_t *dl;
dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ITOZSB(dip);
zilog_t *zilog;
uint64_t len = strlen(link);
int error;
@@ -3746,67 +3059,69 @@ zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr,
boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK;
- ASSERT(vap->va_type == VLNK);
+ ASSERT(S_ISLNK(vap->va_mode));
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(dzp);
- zilog = zfsvfs->z_log;
+ zilog = zsb->z_log;
- if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
+ if (zsb->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EILSEQ);
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
if (len > MAXPATHLEN) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (ENAMETOOLONG);
}
if ((error = zfs_acl_ids_create(dzp, 0,
vap, cr, NULL, &acl_ids)) != 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
top:
+ *ipp = NULL;
+
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
if (error) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
- if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
+ if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EDQUOT);
}
- tx = dmu_tx_create(zfsvfs->z_os);
- fuid_dirtied = zfsvfs->z_fuid_dirty;
+ tx = dmu_tx_create(zsb->z_os);
+ fuid_dirtied = zsb->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE + len);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
- if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
+ if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
if (fuid_dirtied)
- zfs_fuid_txhold(zfsvfs, tx);
+ zfs_fuid_txhold(zsb, tx);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
@@ -3817,7 +3132,7 @@ top:
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
@@ -3828,18 +3143,18 @@ top:
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
- zfs_fuid_sync(zfsvfs, tx);
+ zfs_fuid_sync(zsb, tx);
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
- error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
+ error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zsb),
link, len, tx);
else
zfs_sa_symlink(zp, link, len, tx);
mutex_exit(&zp->z_lock);
zp->z_size = len;
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
+ (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
&zp->z_size, sizeof (zp->z_size), tx);
/*
* Insert the new object into the directory.
@@ -3859,138 +3174,148 @@ top:
zfs_dirent_unlock(dl);
- VN_RELE(ZTOV(zp));
+ *ipp = ZTOI(zp);
+ iput(ZTOI(zp));
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_symlink);
/*
* Return, in the buffer contained in the provided uio structure,
- * the symbolic path referred to by vp.
+ * the symbolic path referred to by ip.
*
- * IN: vp - vnode of symbolic link.
- * uoip - structure to contain the link path.
- * cr - credentials of caller.
- * ct - caller context
- *
- * OUT: uio - structure to contain the link path.
+ * IN: dentry - dentry of symbolic link.
+ * nd - namedata for symlink
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * vp - atime updated
+ * ip - atime updated
*/
/* ARGSUSED */
int
-zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct)
+zfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ struct inode *ip = dentry->d_inode;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+ struct iovec iov;
+ uio_t uio;
int error;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
+ iov.iov_len = MAXPATHLEN + 1;
+ iov.iov_base = kmem_zalloc(iov.iov_len, KM_SLEEP);
+
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_resid = iov.iov_len;
+ uio.uio_segflg = UIO_SYSSPACE;
+
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
- error = sa_lookup_uio(zp->z_sa_hdl,
- SA_ZPL_SYMLINK(zfsvfs), uio);
+ error = sa_lookup_uio(zp->z_sa_hdl, SA_ZPL_SYMLINK(zsb), &uio);
else
- error = zfs_sa_readlink(zp, uio);
+ error = zfs_sa_readlink(zp, &uio);
mutex_exit(&zp->z_lock);
- ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
-
+ ZFS_ACCESSTIME_STAMP(zsb, zp);
zfs_inode_update(zp);
- ZFS_EXIT(zfsvfs);
+
+ if (error) {
+ kmem_free(iov.iov_base, iov.iov_len);
+ nd_set_link(nd, ERR_PTR(error));
+ } else {
+ nd_set_link(nd, iov.iov_base);
+ }
+
+ ZFS_EXIT(zsb);
return (error);
}
-EXPORT_SYMBOL(zfs_readlink);
+EXPORT_SYMBOL(zfs_follow_link);
/*
- * Insert a new entry into directory tdvp referencing svp.
+ * Insert a new entry into directory tdip referencing sip.
*
- * IN: tdvp - Directory to contain new entry.
- * svp - vnode of new entry.
+ * IN: tdip - Directory to contain new entry.
+ * sip - inode of new entry.
* name - name of new entry.
* cr - credentials of caller.
- * ct - caller context
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * tdvp - ctime|mtime updated
- * svp - ctime updated
+ * tdip - ctime|mtime updated
+ * sip - ctime updated
*/
/* ARGSUSED */
int
-zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
- caller_context_t *ct, int flags)
+zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr)
{
- znode_t *dzp = VTOZ(tdvp);
+ znode_t *dzp = ITOZ(tdip);
znode_t *tzp, *szp;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ITOZSB(tdip);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
- vnode_t *realvp;
int error;
int zf = ZNEW;
uint64_t parent;
uid_t owner;
- ASSERT(tdvp->v_type == VDIR);
+ ASSERT(S_ISDIR(tdip->i_mode));
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(dzp);
- zilog = zfsvfs->z_log;
-
- if (VOP_REALVP(svp, &realvp, ct) == 0)
- svp = realvp;
+ zilog = zsb->z_log;
/*
* POSIX dictates that we return EPERM here.
* Better choices include ENOTSUP or EISDIR.
*/
- if (svp->v_type == VDIR) {
- ZFS_EXIT(zfsvfs);
+ if (S_ISDIR(sip->i_mode)) {
+ ZFS_EXIT(zsb);
return (EPERM);
}
- if (svp->v_vfsp != tdvp->v_vfsp || zfsctl_is_node(svp)) {
- ZFS_EXIT(zfsvfs);
+ if (sip->i_sb != tdip->i_sb) {
+ ZFS_EXIT(zsb);
return (EXDEV);
}
- szp = VTOZ(svp);
+ szp = ITOZ(sip);
ZFS_VERIFY_ZP(szp);
/* Prevent links to .zfs/shares files */
- if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
+ if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zsb),
&parent, sizeof (uint64_t))) != 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
- if (parent == zfsvfs->z_shares_dir) {
- ZFS_EXIT(zfsvfs);
+ if (parent == zsb->z_shares_dir) {
+ ZFS_EXIT(zsb);
return (EPERM);
}
- if (zfsvfs->z_utf8 && u8_validate(name,
+ if (zsb->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EILSEQ);
}
+#ifdef HAVE_PN_UTILS
if (flags & FIGNORECASE)
zf |= ZCILOOK;
+#endif /* HAVE_PN_UTILS */
/*
* We do not support links between attributes and non-attributes
@@ -3999,19 +3324,18 @@ zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
* imposed in attribute space.
*/
if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
-
- owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
+ owner = zfs_fuid_map_id(zsb, szp->z_uid, cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EPERM);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
@@ -4021,11 +3345,11 @@ top:
*/
error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
if (error) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
zfs_sa_upgrade_txholds(tx, szp);
@@ -4039,7 +3363,7 @@ top:
goto top;
}
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
@@ -4047,8 +3371,10 @@ top:
if (error == 0) {
uint64_t txtype = TX_LINK;
+#ifdef HAVE_PN_UTILS
if (flags & FIGNORECASE)
txtype |= TX_CI;
+#endif /* HAVE_PN_UTILS */
zfs_log_link(zilog, tx, txtype, dzp, szp, name);
}
@@ -4056,16 +3382,12 @@ top:
zfs_dirent_unlock(dl);
- if (error == 0) {
- vnevent_link(svp, ct);
- }
-
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_inode_update(dzp);
zfs_inode_update(szp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_link);
@@ -4355,7 +3677,7 @@ zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
dmu_tx_abort(tx);
} else {
mutex_enter(&zp->z_lock);
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
+ (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zsb),
(void *)&zp->z_atime, sizeof (zp->z_atime), tx);
zp->z_atime_dirty = 0;
mutex_exit(&zp->z_lock);
@@ -4364,14 +3686,14 @@ zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
}
zfs_zinactive(zp);
- rw_exit(&zfsvfs->z_teardown_inactive_lock);
+ rw_exit(&zsb->z_teardown_inactive_lock);
}
EXPORT_SYMBOL(zfs_inactive);
/*
* Bounds-check the seek operation.
*
- * IN: vp - vnode seeking within
+ * IN: ip - inode seeking within
* ooff - old file offset
* noffp - pointer to new file offset
* ct - caller context
@@ -4380,14 +3702,15 @@ EXPORT_SYMBOL(zfs_inactive);
* EINVAL if new offset invalid
*/
/* ARGSUSED */
-static int
-zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp,
+int
+zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp,
caller_context_t *ct)
{
- if (vp->v_type == VDIR)
+ if (S_ISDIR(ip->i_mode))
return (0);
return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
}
+EXPORT_SYMBOL(zfs_seek);
/*
* Pre-filter the generic locking function to trap attempts to place
@@ -4735,50 +4058,97 @@ zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
}
/*
+ * convoff - converts the given data (start, whence) to the
+ * given whence.
+ */
+int
+convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
+{
+ struct kstat stat;
+ int error;
+
+ if ((lckdat->l_whence == 2) || (whence == 2)) {
+ if ((error = zfs_getattr(ip, &stat, 0, CRED()) != 0))
+ return (error);
+ }
+
+ switch (lckdat->l_whence) {
+ case 1:
+ lckdat->l_start += offset;
+ break;
+ case 2:
+ lckdat->l_start += stat.size;
+ /* FALLTHRU */
+ case 0:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (lckdat->l_start < 0)
+ return (EINVAL);
+
+ switch (whence) {
+ case 1:
+ lckdat->l_start -= offset;
+ break;
+ case 2:
+ lckdat->l_start -= stat.size;
+ /* FALLTHRU */
+ case 0:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ lckdat->l_whence = (short)whence;
+ return (0);
+}
+
+/*
* Free or allocate space in a file. Currently, this function only
* supports the `F_FREESP' command. However, this command is somewhat
* misnamed, as its functionality includes the ability to allocate as
* well as free space.
*
- * IN: vp - vnode of file to free data in.
+ * IN: ip - inode of file to free data in.
* cmd - action to take (only F_FREESP supported).
* bfp - section of file to free/alloc.
* flag - current file open mode flags.
* offset - current file offset.
* cr - credentials of caller [UNUSED].
- * ct - caller context.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
- * vp - ctime|mtime updated
+ * ip - ctime|mtime updated
*/
/* ARGSUSED */
int
-zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
- offset_t offset, cred_t *cr, caller_context_t *ct)
+zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
+ offset_t offset, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
uint64_t off, len;
int error;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
if (cmd != F_FREESP) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
- if ((error = convoff(vp, bfp, 0, offset))) {
- ZFS_EXIT(zfsvfs);
+ if ((error = convoff(ip, bfp, 0, offset))) {
+ ZFS_EXIT(zsb);
return (error);
}
if (bfp->l_len < 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
@@ -4787,38 +4157,38 @@ zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
error = zfs_freesp(zp, off, len, flag, TRUE);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_space);
/*ARGSUSED*/
int
-zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
+zfs_fid(struct inode *ip, fid_t *fidp)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
uint32_t gen;
uint64_t gen64;
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int size, i, error;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb),
&gen64, sizeof (uint64_t))) != 0) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
gen = (uint32_t)gen64;
- size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
+ size = (zsb->z_parent != zsb) ? LONG_FID_LEN : SHORT_FID_LEN;
if (fidp->fid_len < size) {
fidp->fid_len = size;
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (ENOSPC);
}
@@ -4836,7 +4206,7 @@ zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
if (size == LONG_FID_LEN) {
- uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
+ uint64_t objsetid = dmu_objset_id(zsb->z_os);
zfid_long_t *zlfid;
zlfid = (zfid_long_t *)fidp;
@@ -4849,95 +4219,24 @@ zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
zlfid->zf_setgen[i] = 0;
}
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
EXPORT_SYMBOL(zfs_fid);
-static int
-zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
- caller_context_t *ct)
-{
- znode_t *zp, *xzp;
- zfsvfs_t *zfsvfs;
- zfs_dirlock_t *dl;
- int error;
-
- switch (cmd) {
- case _PC_LINK_MAX:
- *valp = ULONG_MAX;
- return (0);
-
- case _PC_FILESIZEBITS:
- *valp = 64;
- return (0);
-
- case _PC_XATTR_EXISTS:
- zp = VTOZ(vp);
- zfsvfs = zp->z_zfsvfs;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- *valp = 0;
- error = zfs_dirent_lock(&dl, zp, "", &xzp,
- ZXATTR | ZEXISTS | ZSHARED, NULL, NULL);
- if (error == 0) {
- zfs_dirent_unlock(dl);
- if (!zfs_dirempty(xzp))
- *valp = 1;
- VN_RELE(ZTOV(xzp));
- } else if (error == ENOENT) {
- /*
- * If there aren't extended attributes, it's the
- * same as having zero of them.
- */
- error = 0;
- }
- ZFS_EXIT(zfsvfs);
- return (error);
-
- case _PC_SATTR_ENABLED:
- case _PC_SATTR_EXISTS:
- *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
- (vp->v_type == VREG || vp->v_type == VDIR);
- return (0);
-
- case _PC_ACCESS_FILTERING:
- *valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) &&
- vp->v_type == VDIR;
- return (0);
-
- case _PC_ACL_ENABLED:
- *valp = _ACL_ACE_ENABLED;
- return (0);
-
- case _PC_MIN_HOLE_SIZE:
- *valp = (ulong_t)SPA_MINBLOCKSIZE;
- return (0);
-
- case _PC_TIMESTAMP_RESOLUTION:
- /* nanosecond timestamp resolution */
- *valp = 1L;
- return (0);
-
- default:
- return (fs_pathconf(vp, cmd, valp, cr, ct));
- }
-}
-
/*ARGSUSED*/
int
-zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
- caller_context_t *ct)
+zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
error = zfs_getacl(zp, vsecp, skipaclchk, cr);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
@@ -4945,46 +4244,45 @@ EXPORT_SYMBOL(zfs_getsecattr);
/*ARGSUSED*/
int
-zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
- caller_context_t *ct)
+zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
- zilog_t *zilog = zfsvfs->z_log;
+ zilog_t *zilog = zsb->z_log;
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (error);
}
EXPORT_SYMBOL(zfs_setsecattr);
+#ifdef HAVE_UIO_ZEROCOPY
/*
* Tunable, both must be a power of 2.
*
* zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
* zcr_blksz_max: if set to less than the file block size, allow loaning out of
- * an arcbuf for a partial block read
+ * an arcbuf for a partial block read
*/
int zcr_blksz_min = (1 << 10); /* 1K */
int zcr_blksz_max = (1 << 17); /* 128K */
/*ARGSUSED*/
static int
-zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
- caller_context_t *ct)
+zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- int max_blksz = zfsvfs->z_max_blksz;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+ int max_blksz = zsb->z_max_blksz;
uio_t *uio = &xuio->xu_uio;
ssize_t size = uio->uio_resid;
offset_t offset = uio->uio_loffset;
@@ -4997,7 +4295,7 @@ zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
if (xuio->xu_type != UIOTYPE_ZEROCOPY)
return (EINVAL);
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
switch (ioflag) {
case UIO_WRITE:
@@ -5007,7 +4305,7 @@ zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
*/
blksz = max_blksz;
if (size < blksz || zp->z_blksz != blksz) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
/*
@@ -5030,9 +4328,6 @@ zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
fullblk = size / blksz;
(void) dmu_xuio_init(xuio,
(preamble != 0) + fullblk + (postamble != 0));
- DTRACE_PROBE3(zfs_reqzcbuf_align, int, preamble,
- int, postamble, int,
- (preamble != 0) + fullblk + (postamble != 0));
/*
* Have to fix iov base/len for partial buffers. They
@@ -5075,7 +4370,7 @@ zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
blksz = zcr_blksz_max;
/* avoid potential complexity of dealing with it */
if (blksz > max_blksz) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
@@ -5083,25 +4378,25 @@ zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
if (size > maxsize)
size = maxsize;
- if (size < blksz || vn_has_cached_data(vp)) {
- ZFS_EXIT(zfsvfs);
+ if (size < blksz) {
+ ZFS_EXIT(zsb);
return (EINVAL);
}
break;
default:
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EINVAL);
}
uio->uio_extflg = UIO_XUIO;
XUIO_XUZC_RW(xuio) = ioflag;
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (0);
}
/*ARGSUSED*/
static int
-zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct)
+zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
{
int i;
arc_buf_t *abuf;
@@ -5124,174 +4419,4 @@ zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct)
dmu_xuio_fini(xuio);
return (0);
}
-
-/*
- * Predeclare these here so that the compiler assumes that
- * this is an "old style" function declaration that does
- * not include arguments => we won't get type mismatch errors
- * in the initializations that follow.
- */
-static int zfs_inval();
-static int zfs_isdir();
-
-static int
-zfs_inval()
-{
- return (EINVAL);
-}
-
-static int
-zfs_isdir()
-{
- return (EISDIR);
-}
-/*
- * Directory vnode operations template
- */
-vnodeops_t *zfs_dvnodeops;
-const fs_operation_def_t zfs_dvnodeops_template[] = {
- VOPNAME_OPEN, { .vop_open = zfs_open },
- VOPNAME_CLOSE, { .vop_close = zfs_close },
- VOPNAME_READ, { .error = zfs_isdir },
- VOPNAME_WRITE, { .error = zfs_isdir },
- VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl },
- VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
- VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
- VOPNAME_ACCESS, { .vop_access = zfs_access },
- VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup },
- VOPNAME_CREATE, { .vop_create = zfs_create },
- VOPNAME_REMOVE, { .vop_remove = zfs_remove },
- VOPNAME_LINK, { .vop_link = zfs_link },
- VOPNAME_RENAME, { .vop_rename = zfs_rename },
- VOPNAME_MKDIR, { .vop_mkdir = zfs_mkdir },
- VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir },
- VOPNAME_READDIR, { .vop_readdir = zfs_readdir },
- VOPNAME_SYMLINK, { .vop_symlink = zfs_symlink },
- VOPNAME_FSYNC, { .vop_fsync = zfs_fsync },
- VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
- VOPNAME_FID, { .vop_fid = zfs_fid },
- VOPNAME_SEEK, { .vop_seek = zfs_seek },
- VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
- VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
- VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
- VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
- NULL, NULL
-};
-
-/*
- * Regular file vnode operations template
- */
-vnodeops_t *zfs_fvnodeops;
-const fs_operation_def_t zfs_fvnodeops_template[] = {
- VOPNAME_OPEN, { .vop_open = zfs_open },
- VOPNAME_CLOSE, { .vop_close = zfs_close },
- VOPNAME_READ, { .vop_read = zfs_read },
- VOPNAME_WRITE, { .vop_write = zfs_write },
- VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl },
- VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
- VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
- VOPNAME_ACCESS, { .vop_access = zfs_access },
- VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup },
- VOPNAME_RENAME, { .vop_rename = zfs_rename },
- VOPNAME_FSYNC, { .vop_fsync = zfs_fsync },
- VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
- VOPNAME_FID, { .vop_fid = zfs_fid },
- VOPNAME_SEEK, { .vop_seek = zfs_seek },
- VOPNAME_FRLOCK, { .vop_frlock = zfs_frlock },
- VOPNAME_SPACE, { .vop_space = zfs_space },
- VOPNAME_GETPAGE, { .vop_getpage = zfs_getpage },
- VOPNAME_PUTPAGE, { .vop_putpage = zfs_putpage },
- VOPNAME_MAP, { .vop_map = zfs_map },
- VOPNAME_ADDMAP, { .vop_addmap = zfs_addmap },
- VOPNAME_DELMAP, { .vop_delmap = zfs_delmap },
- VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
- VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
- VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
- VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
- VOPNAME_REQZCBUF, { .vop_reqzcbuf = zfs_reqzcbuf },
- VOPNAME_RETZCBUF, { .vop_retzcbuf = zfs_retzcbuf },
- NULL, NULL
-};
-
-/*
- * Symbolic link vnode operations template
- */
-vnodeops_t *zfs_symvnodeops;
-const fs_operation_def_t zfs_symvnodeops_template[] = {
- VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
- VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
- VOPNAME_ACCESS, { .vop_access = zfs_access },
- VOPNAME_RENAME, { .vop_rename = zfs_rename },
- VOPNAME_READLINK, { .vop_readlink = zfs_readlink },
- VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
- VOPNAME_FID, { .vop_fid = zfs_fid },
- VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
- VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
- NULL, NULL
-};
-
-/*
- * special share hidden files vnode operations template
- */
-vnodeops_t *zfs_sharevnodeops;
-const fs_operation_def_t zfs_sharevnodeops_template[] = {
- VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
- VOPNAME_ACCESS, { .vop_access = zfs_access },
- VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
- VOPNAME_FID, { .vop_fid = zfs_fid },
- VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
- VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
- VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
- VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
- NULL, NULL
-};
-
-/*
- * Extended attribute directory vnode operations template
- * This template is identical to the directory vnodes
- * operation template except for restricted operations:
- * VOP_MKDIR()
- * VOP_SYMLINK()
- * Note that there are other restrictions embedded in:
- * zfs_create() - restrict type to VREG
- * zfs_link() - no links into/out of attribute space
- * zfs_rename() - no moves into/out of attribute space
- */
-vnodeops_t *zfs_xdvnodeops;
-const fs_operation_def_t zfs_xdvnodeops_template[] = {
- VOPNAME_OPEN, { .vop_open = zfs_open },
- VOPNAME_CLOSE, { .vop_close = zfs_close },
- VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl },
- VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
- VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
- VOPNAME_ACCESS, { .vop_access = zfs_access },
- VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup },
- VOPNAME_CREATE, { .vop_create = zfs_create },
- VOPNAME_REMOVE, { .vop_remove = zfs_remove },
- VOPNAME_LINK, { .vop_link = zfs_link },
- VOPNAME_RENAME, { .vop_rename = zfs_rename },
- VOPNAME_MKDIR, { .error = zfs_inval },
- VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir },
- VOPNAME_READDIR, { .vop_readdir = zfs_readdir },
- VOPNAME_SYMLINK, { .error = zfs_inval },
- VOPNAME_FSYNC, { .vop_fsync = zfs_fsync },
- VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
- VOPNAME_FID, { .vop_fid = zfs_fid },
- VOPNAME_SEEK, { .vop_seek = zfs_seek },
- VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
- VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
- VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
- VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
- NULL, NULL
-};
-
-/*
- * Error vnode operations template
- */
-vnodeops_t *zfs_evnodeops;
-const fs_operation_def_t zfs_evnodeops_template[] = {
- VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
- VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
- NULL, NULL
-};
-#endif /* HAVE_ZPL */
+#endif /* HAVE_UIO_ZEROCOPY */
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index 283b4d511..024668287 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
@@ -51,9 +51,11 @@
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
+#include <sys/zfs_vnops.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/kidmap.h>
+#include <sys/zpl.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
@@ -88,11 +90,6 @@
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
-/*
- * Needed to close a small window in zfs_znode_move() that allows the zfsvfs to
- * be freed before it can be safely accessed.
- */
-krwlock_t zfsvfs_lock;
static kmem_cache_t *znode_cache = NULL;
@@ -102,14 +99,7 @@ zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
znode_t *zp = buf;
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
-
- zp->z_vnode = vn_alloc(kmflags);
- if (zp->z_vnode == NULL) {
- return (-1);
- }
- ZTOV(zp)->v_data = zp;
-
+ inode_init_once(ZTOI(zp));
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -133,9 +123,6 @@ zfs_znode_cache_destructor(void *buf, void *arg)
{
znode_t *zp = buf;
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
- ASSERT(ZTOV(zp)->v_data == zp);
- vn_free(ZTOV(zp));
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
rw_destroy(&zp->z_parent_lock);
@@ -154,11 +141,10 @@ zfs_znode_init(void)
/*
* Initialize zcache
*/
- rw_init(&zfsvfs_lock, NULL, RW_DEFAULT, NULL);
ASSERT(znode_cache == NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
- zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
+ zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_KMEM);
}
void
@@ -170,12 +156,10 @@ zfs_znode_fini(void)
if (znode_cache)
kmem_cache_destroy(znode_cache);
znode_cache = NULL;
- rw_destroy(&zfsvfs_lock);
}
-#ifdef HAVE_ZPL
int
-zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
+zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx)
{
#ifdef HAVE_SHARE
zfs_acl_ids_t acl_ids;
@@ -186,13 +170,11 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
int error;
vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
- vattr.va_type = VDIR;
- vattr.va_mode = S_IFDIR|0555;
+ vattr.va_mode = S_IFDIR | 0555;
vattr.va_uid = crgetuid(kcred);
vattr.va_gid = crgetgid(kcred);
sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
- ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
sharezp->z_moved = 0;
sharezp->z_unlinked = 0;
sharezp->z_atime_dirty = 0;
@@ -214,7 +196,7 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
zfsvfs->z_shares_dir = sharezp->z_id;
zfs_acl_ids_free(&acl_ids);
- ZTOV(sharezp)->v_count = 0;
+ // ZTOV(sharezp)->v_count = 0;
sa_handle_destroy(sharezp->z_sa_hdl);
kmem_cache_free(znode_cache, sharezp);
@@ -238,8 +220,6 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
#define MAXMIN64 0xffffffffUL
#endif
-#endif /* HAVE_ZPL */
-
/*
* Create special expldev for ZFS private use.
* Can't use standard expldev since it doesn't do
@@ -260,42 +240,18 @@ zfs_expldev(dev_t dev)
#endif
}
-/*
- * Special cmpldev for ZFS private use.
- * Can't use standard cmpldev since it takes
- * a long dev_t and compresses it to dev32_t in
- * LP64. We need to do a compaction of a long dev_t
- * to a dev32_t in ILP32.
- */
-dev_t
-zfs_cmpldev(uint64_t dev)
-{
-#ifndef _LP64
- minor_t minor = (minor_t)dev & MAXMIN64;
- major_t major = (major_t)(dev >> NBITSMINOR64) & MAXMAJ64;
-
- if (major > MAXMAJ32 || minor > MAXMIN32)
- return (NODEV32);
-
- return (((dev32_t)major << NBITSMINOR32) | minor);
-#else
- return (dev);
-#endif
-}
-
static void
-zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
+zfs_znode_sa_init(zfs_sb_t *zsb, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
- ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
+ ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zsb, zp->z_id)));
mutex_enter(&zp->z_lock);
ASSERT(zp->z_sa_hdl == NULL);
ASSERT(zp->z_acl_cached == NULL);
if (sa_hdl == NULL) {
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
+ VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
@@ -304,60 +260,119 @@ zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
- /*
- * Slap on VROOT if we are the root znode
- */
- if (zp->z_id == zfsvfs->z_root)
- ZTOV(zp)->v_flag |= VROOT;
-
mutex_exit(&zp->z_lock);
- vn_exists(ZTOV(zp));
}
void
zfs_znode_dmu_fini(znode_t *zp)
{
- ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
+ ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(ZTOZSB(zp), zp->z_id)) ||
zp->z_unlinked ||
- RW_WRITE_HELD(&zp->z_zfsvfs->z_teardown_inactive_lock));
+ RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
}
/*
- * Construct a new znode+inode and initialize.
+ * Called by new_inode() to allocate a new inode.
+ */
+int
+zfs_inode_alloc(struct super_block *sb, struct inode **ip)
+{
+ znode_t *zp;
+
+ zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
+ *ip = ZTOI(zp);
+
+ return (0);
+}
+
+/*
+ * Called in multiple places when an inode should be destroyed.
+ */
+void
+zfs_inode_destroy(struct inode *ip)
+{
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ZTOZSB(zp);
+
+ mutex_enter(&zsb->z_znodes_lock);
+ list_remove(&zsb->z_all_znodes, zp);
+ mutex_exit(&zsb->z_znodes_lock);
+
+ if (zp->z_acl_cached) {
+ zfs_acl_free(zp->z_acl_cached);
+ zp->z_acl_cached = NULL;
+ }
+
+ kmem_cache_free(znode_cache, zp);
+}
+
+static void
+zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip)
+{
+ uint64_t rdev;
+
+ switch (ip->i_mode & S_IFMT) {
+ case S_IFREG:
+ ip->i_op = &zpl_inode_operations;
+ ip->i_fop = &zpl_file_operations;
+ ip->i_mapping->a_ops = &zpl_address_space_operations;
+ break;
+
+ case S_IFDIR:
+ ip->i_op = &zpl_dir_inode_operations;
+ ip->i_fop = &zpl_dir_file_operations;
+ ITOZ(ip)->z_zn_prefetch = B_TRUE;
+ break;
+
+ case S_IFLNK:
+ ip->i_op = &zpl_symlink_inode_operations;
+ break;
+
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ VERIFY(sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zsb),
+ &rdev, sizeof (rdev)) == 0);
+ init_special_inode(ip, ip->i_mode, rdev);
+ ip->i_op = &zpl_special_inode_operations;
+ break;
+
+ default:
+ printk("ZFS: Invalid mode: 0x%x\n", ip->i_mode);
+ VERIFY(0);
+ }
+}
+
+/*
+ * Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
-zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
- dmu_object_type_t obj_type, sa_handle_t *hdl)
+zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
+ dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl)
{
znode_t *zp;
- struct inode *inode;
+ struct inode *ip;
uint64_t parent;
sa_bulk_attr_t bulk[9];
int count = 0;
- ASSERT(zfsvfs != NULL);
- ASSERT(zfsvfs->z_vfs != NULL);
- ASSERT(zfsvfs->z_vfs->mnt_sb != NULL);
+ ASSERT(zsb != NULL);
- inode = iget_locked(zfsvfs->z_vfs->mnt_sb, db->db_object);
- zp = ITOZ(inode);
+ ip = new_inode(zsb->z_sb);
+ if (ip == NULL)
+ return (NULL);
- ASSERT(inode->i_state & I_NEW);
+ zp = ITOZ(ip);
ASSERT(zp->z_dirlocks == NULL);
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
zp->z_moved = 0;
-
- /*
- * Defer setting z_zfsvfs until the znode is ready to be a candidate for
- * the zfs_znode_move() callback.
- */
zp->z_sa_hdl = NULL;
zp->z_unlinked = 0;
zp->z_atime_dirty = 0;
@@ -367,59 +382,48 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
- zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
-
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
- &zp->z_mode, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
- &zp->z_gen, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
- &zp->z_size, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
- &zp->z_links, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);
+
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &zp->z_mode, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
&parent, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
&zp->z_atime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
- &zp->z_uid, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
- &zp->z_gid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
- iput(inode);
- return (NULL);
- }
- inode->i_mode = (umode_t)zp->z_mode;
- if ((S_ISCHR(inode->i_mode)) || (S_ISBLK(inode->i_mode))) {
- uint64_t rdev;
- VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs),
- &rdev, sizeof (rdev)) == 0);
- inode->i_rdev = zfs_cmpldev(rdev);
+ goto error;
}
- /* zp->z_set_ops_inode() must be set in sb->alloc_inode() */
- ASSERT(zp->z_set_ops_inode != NULL);
- zp->z_set_ops_inode(inode);
- unlock_new_inode(inode);
+ ip->i_ino = obj;
+ ip->i_mode = zp->z_mode;
+ ip->i_mtime = ip->i_atime = ip->i_ctime = CURRENT_TIME_SEC;
+ zfs_inode_set_ops(zsb, ip);
+
+ if (insert_inode_locked(ip))
+ goto error;
- mutex_enter(&zfsvfs->z_znodes_lock);
- list_insert_tail(&zfsvfs->z_all_znodes, zp);
+ mutex_enter(&zsb->z_znodes_lock);
+ list_insert_tail(&zsb->z_all_znodes, zp);
membar_producer();
- /*
- * Everything else must be valid before assigning z_zfsvfs makes the
- * znode eligible for zfs_znode_move().
- */
- zp->z_zfsvfs = zfsvfs;
- mutex_exit(&zfsvfs->z_znodes_lock);
+ mutex_exit(&zsb->z_znodes_lock);
- VFS_HOLD(zfsvfs->z_vfs);
+ unlock_new_inode(ip);
return (zp);
+
+error:
+ unlock_new_inode(ip);
+ iput(ip);
+ return NULL;
}
/*
@@ -432,35 +436,35 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
void
zfs_inode_update(znode_t *zp)
{
- zfsvfs_t *zfsvfs;
- struct inode *inode;
+ zfs_sb_t *zsb;
+ struct inode *ip;
uint32_t blksize;
uint64_t atime[2], mtime[2], ctime[2];
ASSERT(zp != NULL);
- zfsvfs = zp->z_zfsvfs;
- inode = ZTOI(zp);
-
- sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs), &atime, 16);
- sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zfsvfs), &mtime, 16);
- sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zfsvfs), &ctime, 16);
-
- spin_lock(&inode->i_lock);
- inode->i_generation = zp->z_gen;
- inode->i_uid = zp->z_uid;
- inode->i_gid = zp->z_gid;
- inode->i_nlink = zp->z_links;
- inode->i_mode = zp->z_mode;
- inode->i_blkbits = SPA_MINBLOCKSHIFT;
+ zsb = ZTOZSB(zp);
+ ip = ZTOI(zp);
+
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zsb), &atime, 16);
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zsb), &mtime, 16);
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zsb), &ctime, 16);
+
+ spin_lock(&ip->i_lock);
+ ip->i_generation = zp->z_gen;
+ ip->i_uid = zp->z_uid;
+ ip->i_gid = zp->z_gid;
+ ip->i_nlink = zp->z_links;
+ ip->i_mode = zp->z_mode;
+ ip->i_blkbits = SPA_MINBLOCKSHIFT;
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize,
- (u_longlong_t *)&inode->i_blocks);
+ (u_longlong_t *)&ip->i_blocks);
- ZFS_TIME_DECODE(&inode->i_atime, atime);
- ZFS_TIME_DECODE(&inode->i_mtime, mtime);
- ZFS_TIME_DECODE(&inode->i_ctime, ctime);
+ ZFS_TIME_DECODE(&ip->i_atime, atime);
+ ZFS_TIME_DECODE(&ip->i_mtime, mtime);
+ ZFS_TIME_DECODE(&ip->i_ctime, ctime);
- i_size_write(inode, zp->z_size);
- spin_unlock(&inode->i_lock);
+ i_size_write(ip, zp->z_size);
+ spin_unlock(&ip->i_lock);
}
static uint64_t empty_xattr;
@@ -491,7 +495,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
uint64_t mode, size, links, parent, pflags;
uint64_t dzp_pflags = 0;
uint64_t rdev = 0;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
dmu_buf_t *db;
timestruc_t now;
uint64_t gen, obj;
@@ -503,9 +507,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 };
- ASSERT(vap && (vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
-
- if (zfsvfs->z_replay) {
+ if (zsb->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
@@ -515,7 +517,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
gen = dmu_tx_get_txg(tx);
}
- obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
+ obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE;
@@ -528,32 +530,32 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
- if (vap->va_type == VDIR) {
- if (zfsvfs->z_replay) {
- err = zap_create_claim_norm(zfsvfs->z_os, obj,
- zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
+ if (S_ISDIR(vap->va_mode)) {
+ if (zsb->z_replay) {
+ err = zap_create_claim_norm(zsb->z_os, obj,
+ zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, tx);
ASSERT3U(err, ==, 0);
} else {
- obj = zap_create_norm(zfsvfs->z_os,
- zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
+ obj = zap_create_norm(zsb->z_os,
+ zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, tx);
}
} else {
- if (zfsvfs->z_replay) {
- err = dmu_object_claim(zfsvfs->z_os, obj,
+ if (zsb->z_replay) {
+ err = dmu_object_claim(zsb->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, tx);
ASSERT3U(err, ==, 0);
} else {
- obj = dmu_object_alloc(zfsvfs->z_os,
+ obj = dmu_object_alloc(zsb->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, tx);
}
}
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
- VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
+ ZFS_OBJ_HOLD_ENTER(zsb, obj);
+ VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
@@ -572,21 +574,20 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
flag |= IS_XATTR;
}
- if (zfsvfs->z_use_fuids)
+ if (zsb->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
- if (vap->va_type == VDIR) {
+ if (S_ISDIR(vap->va_mode)) {
size = 2; /* contents ("." and "..") */
links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
} else {
size = links = 0;
}
- if (vap->va_type == VBLK || vap->va_type == VCHR) {
+ if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
rdev = zfs_expldev(vap->va_rdev);
- }
parent = dzp->z_id;
mode = acl_ids->z_mode;
@@ -603,20 +604,20 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
ZFS_TIME_ENCODE(&now, crtime);
ZFS_TIME_ENCODE(&now, ctime);
- if (vap->va_mask & AT_ATIME) {
+ if (vap->va_mask & ATTR_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, atime);
} else {
ZFS_TIME_ENCODE(&now, atime);
}
- if (vap->va_mask & AT_MTIME) {
+ if (vap->va_mask & ATTR_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
ZFS_TIME_ENCODE(&now, mtime);
}
/* Now add in all of the "SA" attributes */
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
+ VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
@@ -628,75 +629,75 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
sa_attrs = kmem_alloc(sizeof(sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
if (obj_type == DMU_OT_ZNODE) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
NULL, &atime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
NULL, &crtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
NULL, &gen, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
NULL, &mode, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
NULL, &size, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
NULL, &parent, 8);
} else {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
NULL, &mode, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
NULL, &size, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
NULL, &gen, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
- &acl_ids->z_fuid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
- &acl_ids->z_fgid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb),
+ NULL, &acl_ids->z_fuid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb),
+ NULL, &acl_ids->z_fgid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
NULL, &parent, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
NULL, &pflags, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
NULL, &atime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
NULL, &crtime, 16);
}
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL,
&empty_xattr, 8);
}
if (obj_type == DMU_OT_ZNODE ||
- (vap->va_type == VBLK || vap->va_type == VCHR)) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
+ (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb),
NULL, &rdev, 8);
}
if (obj_type == DMU_OT_ZNODE) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
NULL, &pflags, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL,
&acl_ids->z_fuid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL,
&acl_ids->z_fgid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad,
sizeof (uint64_t) * 4);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
@@ -706,8 +707,11 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
- *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
+ *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl);
ASSERT(*zpp != NULL);
+ ASSERT(dzp != NULL);
+ err = zpl_xattr_security_init(ZTOI(*zpp), ZTOI(dzp));
+ ASSERT3S(err, ==, 0);
} else {
/*
* If we are creating the root node, the "parent" we
@@ -721,118 +725,17 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = mode;
- if (vap->va_mask & AT_XVATTR)
- zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
-
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
err = zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx);
ASSERT3S(err, ==, 0);
}
kmem_free(sa_attrs, sizeof(sa_bulk_attr_t) * ZPL_END);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
-}
-
-/*
- * zfs_xvattr_set only updates the in-core attributes
- * it is assumed the caller will be doing an sa_bulk_update
- * to push the changes out
- */
-void
-zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
-{
-#ifdef HAVE_XVATTR
- xoptattr_t *xoap;
-
- xoap = xva_getxoptattr(xvap);
- ASSERT(xoap);
-
- if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
- uint64_t times[2];
- ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs),
- &times, sizeof (times), tx);
- XVA_SET_RTN(xvap, XAT_CREATETIME);
- }
- if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
- ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_READONLY);
- }
- if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
- ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_HIDDEN);
- }
- if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
- ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_SYSTEM);
- }
- if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
- ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_ARCHIVE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
- ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_IMMUTABLE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
- ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_NOUNLINK);
- }
- if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
- ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_APPENDONLY);
- }
- if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
- ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_NODUMP);
- }
- if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
- ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_OPAQUE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
- ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
- xoap->xoa_av_quarantined, zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
- }
- if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
- ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
- }
- if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
- zfs_sa_set_scanstamp(zp, xvap, tx);
- XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
- }
- if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
- ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_REPARSE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
- ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_OFFLINE);
- }
- if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
- ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
- zp->z_pflags, tx);
- XVA_SET_RTN(xvap, XAT_SPARSE);
- }
-#endif /* HAVE_XVATTR */
+ ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
int
-zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
+zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
@@ -842,11 +745,11 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
*zpp = NULL;
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
- err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
+ err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
if (err) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
@@ -856,7 +759,7 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EINVAL);
}
@@ -878,19 +781,18 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
if (zp->z_unlinked) {
err = ENOENT;
} else {
- VN_HOLD(ZTOV(zp));
+ igrab(ZTOI(zp));
*zpp = zp;
err = 0;
}
sa_buf_rele(db, NULL);
mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
/*
- * Not found create new znode/vnode
- * but only if file exists.
+ * Not found create new znode/vnode but only if file exists.
*
* There is a small window where zfs_vget() could
* find this object while a file create is still in
@@ -899,21 +801,21 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
- zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
- doi.doi_bonus_type, NULL);
+ zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size,
+ doi.doi_bonus_type, obj_num, NULL);
if (zp == NULL) {
err = ENOENT;
} else {
*zpp = zp;
}
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
int
zfs_rezget(znode_t *zp)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
@@ -923,7 +825,7 @@ zfs_rezget(znode_t *zp)
int count = 0;
uint64_t gen;
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
@@ -933,9 +835,9 @@ zfs_rezget(znode_t *zp)
mutex_exit(&zp->z_acl_lock);
ASSERT(zp->z_sa_hdl == NULL);
- err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
+ err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
if (err) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
@@ -945,33 +847,33 @@ zfs_rezget(znode_t *zp)
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EINVAL);
}
- zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
+ zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);
/* reload cached values */
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
&gen, sizeof (gen));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
&zp->z_size, sizeof (zp->z_size));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
&zp->z_links, sizeof (zp->z_links));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
&zp->z_atime, sizeof (zp->z_atime));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
&zp->z_uid, sizeof (zp->z_uid));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL,
&zp->z_gid, sizeof (zp->z_gid));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
&mode, sizeof (mode));
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EIO);
}
@@ -979,14 +881,14 @@ zfs_rezget(znode_t *zp)
if (gen != zp->z_gen) {
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EIO);
}
zp->z_unlinked = (zp->z_links == 0);
zp->z_blksz = doi.doi_data_block_size;
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (0);
}
@@ -994,27 +896,25 @@ zfs_rezget(znode_t *zp)
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- objset_t *os = zfsvfs->z_os;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ objset_t *os = zsb->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
}
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
- zfs_znode_free(zp);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
void
zfs_zinactive(znode_t *zp)
{
- vnode_t *vp = ZTOV(zp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
ASSERT(zp->z_sa_hdl);
@@ -1022,29 +922,8 @@ zfs_zinactive(znode_t *zp)
/*
* Don't allow a zfs_zget() while were trying to release this znode
*/
- ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
-
+ ZFS_OBJ_HOLD_ENTER(zsb, z_id);
mutex_enter(&zp->z_lock);
- mutex_enter(&vp->v_lock);
- vp->v_count--;
- if (vp->v_count > 0 || vn_has_cached_data(vp)) {
- /*
- * If the hold count is greater than zero, somebody has
- * obtained a new reference on this znode while we were
- * processing it here, so we are done. If we still have
- * mapped pages then we are also done, since we don't
- * want to inactivate the znode until the pages get pushed.
- *
- * XXX - if vn_has_cached_data(vp) is true, but count == 0,
- * this seems like it would leave the znode hanging with
- * no chance to go inactive...
- */
- mutex_exit(&vp->v_lock);
- mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
- return;
- }
- mutex_exit(&vp->v_lock);
/*
* If this was the last reference to a file with no links,
@@ -1052,39 +931,14 @@ zfs_zinactive(znode_t *zp)
*/
if (zp->z_unlinked) {
mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
zfs_rmnode(zp);
return;
}
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
- zfs_znode_free(zp);
-}
-
-void
-zfs_znode_free(znode_t *zp)
-{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- vn_invalid(ZTOV(zp));
-
- ASSERT(ZTOV(zp)->v_count == 0);
-
- mutex_enter(&zfsvfs->z_znodes_lock);
- POINTER_INVALIDATE(&zp->z_zfsvfs);
- list_remove(&zfsvfs->z_all_znodes, zp);
- mutex_exit(&zfsvfs->z_znodes_lock);
-
- if (zp->z_acl_cached) {
- zfs_acl_free(zp->z_acl_cached);
- zp->z_acl_cached = NULL;
- }
-
- kmem_cache_free(znode_cache, zp);
-
- VFS_RELE(zfsvfs->z_vfs);
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
}
void
@@ -1102,21 +956,21 @@ zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
zp->z_atime_dirty = 1;
}
- if (flag & AT_ATIME) {
+ if (flag & ATTR_ATIME) {
ZFS_TIME_ENCODE(&now, zp->z_atime);
}
- if (flag & AT_MTIME) {
+ if (flag & ATTR_MTIME) {
ZFS_TIME_ENCODE(&now, mtime);
- if (zp->z_zfsvfs->z_use_fuids) {
+ if (ZTOZSB(zp)->z_use_fuids) {
zp->z_pflags |= (ZFS_ARCHIVE |
ZFS_AV_MODIFIED);
}
}
- if (flag & AT_CTIME) {
+ if (flag & ATTR_CTIME) {
ZFS_TIME_ENCODE(&now, ctime);
- if (zp->z_zfsvfs->z_use_fuids)
+ if (ZTOZSB(zp)->z_use_fuids)
zp->z_pflags |= ZFS_ARCHIVE;
}
}
@@ -1146,7 +1000,7 @@ zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
- error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
+ error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
size, 0, tx);
if (error == ENOTSUP)
@@ -1157,7 +1011,7 @@ zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
-#ifdef HAVE_ZPL
+#ifdef HAVE_MMAP
/*
* This is a dummy interface used when pvn_vplist_dirty() should *not*
* be calling back into the fs for a putpage(). E.g.: when truncating
@@ -1171,7 +1025,7 @@ zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
ASSERT(0);
return (0);
}
-#endif /* HAVE_ZPL */
+#endif /* HAVE_MMAP */
/*
* Increase the file length
@@ -1185,7 +1039,7 @@ zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
static int
zfs_extend(znode_t *zp, uint64_t end)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
uint64_t newblksz;
@@ -1204,19 +1058,19 @@ zfs_extend(znode_t *zp, uint64_t end)
return (0);
}
top:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
- (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
+ (!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
- if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
+ if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, SPA_MAXBLOCKSIZE);
} else {
- newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
+ newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
@@ -1240,7 +1094,7 @@ top:
zp->z_size = end;
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
+ VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_range_unlock(rl);
@@ -1257,13 +1111,13 @@ top:
* off - start of section to free.
* len - length of section to free.
*
- * RETURN: 0 if success
+ * RETURN: 0 if success
* error code if failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
rl_t *rl;
int error;
@@ -1283,7 +1137,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
if (off + len > zp->z_size)
len = zp->z_size - off;
- error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
+ error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
zfs_range_unlock(rl);
@@ -1296,16 +1150,13 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
- * RETURN: 0 if success
+ * RETURN: 0 if success
* error code if failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-#ifdef HAVE_ZPL
- vnode_t *vp = ZTOV(zp);
-#endif /* HAVE_ZPL */
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
int error;
@@ -1325,13 +1176,13 @@ zfs_trunc(znode_t *zp, uint64_t end)
return (0);
}
- error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, -1);
+ error = dmu_free_long_range(zsb->z_os, zp->z_id, end, -1);
if (error) {
zfs_range_unlock(rl);
return (error);
}
top:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
@@ -1347,44 +1198,18 @@ top:
}
zp->z_size = end;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
NULL, &zp->z_pflags, 8);
}
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
dmu_tx_commit(tx);
-#ifdef HAVE_ZPL
- /*
- * Clear any mapped pages in the truncated region. This has to
- * happen outside of the transaction to avoid the possibility of
- * a deadlock with someone trying to push a page that we are
- * about to invalidate.
- */
- if (vn_has_cached_data(vp)) {
- page_t *pp;
- uint64_t start = end & PAGEMASK;
- int poff = end & PAGEOFFSET;
-
- if (poff != 0 && (pp = page_lookup(vp, start, SE_SHARED))) {
- /*
- * We need to zero a partial page.
- */
- pagezero(pp, poff, PAGESIZE - poff);
- start += PAGESIZE;
- page_unlock(pp);
- }
- error = pvn_vplist_dirty(vp, start, zfs_no_putpage,
- B_INVAL | B_TRUNC, NULL);
- ASSERT(error == 0);
- }
-#endif /* HAVE_ZPL */
-
zfs_range_unlock(rl);
return (0);
@@ -1399,25 +1224,25 @@ top:
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
- * RETURN: 0 if success
+ * RETURN: 0 if success
* error code if failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
-#ifdef HAVE_ZPL
- vnode_t *vp = ZTOV(zp);
-#endif /* HAVE_ZPL */
+#ifdef HAVE_MANDLOCKS
+ struct inode *ip = ZTOI(zp);
+#endif /* HAVE_MANDLOCKS */
dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- zilog_t *zilog = zfsvfs->z_log;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ zilog_t *zilog = zsb->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode,
sizeof (mode))) != 0)
return (error);
@@ -1429,17 +1254,17 @@ zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
return (error);
}
-#ifdef HAVE_ZPL
+#ifdef HAVE_MANDLOCKS
/*
* Check for any locks in the region to be freed.
*/
- if (MANDLOCK(vp, (mode_t)mode)) {
+ if (MANDLOCK(ip, (mode_t)mode)) {
uint64_t length = (len ? len : zp->z_size - off);
- if (error = chklock(vp, FWRITE, off, length, flag, NULL))
+ if (error = chklock(ip, FWRITE, off, length, flag, NULL))
return (error);
}
-#endif /* HAVE_ZPL */
+#endif /* HAVE_MANDLOCKS */
if (len == 0) {
error = zfs_trunc(zp, off);
@@ -1451,7 +1276,7 @@ zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
if (error || !log)
return (error);
log:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
@@ -1465,9 +1290,9 @@ log:
return (error);
}
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
@@ -1589,7 +1414,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
ASSERT(error == 0);
dmu_buf_rele(db, FTAG);
-#endif /* HAVE_ZPL */
}
#endif /* _KERNEL */