summaryrefslogtreecommitdiffstats
path: root/module/zfs/sa.c
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs/sa.c')
-rw-r--r--module/zfs/sa.c189
1 files changed, 186 insertions, 3 deletions
diff --git a/module/zfs/sa.c b/module/zfs/sa.c
index f0a18bad8..4a863f9a5 100644
--- a/module/zfs/sa.c
+++ b/module/zfs/sa.c
@@ -44,6 +44,10 @@
#include <sys/errno.h>
#include <sys/zfs_context.h>
+#ifdef _KERNEL
+#include <sys/zfs_znode.h>
+#endif
+
/*
* ZFS System attributes:
*
@@ -1456,8 +1460,9 @@ sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
return (sa_attr_op(hdl, bulk, count, SA_LOOKUP, NULL));
}
-int
-sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
+static int
+sa_lookup_locked(sa_handle_t *hdl, sa_attr_type_t attr, void *buf,
+ uint32_t buflen)
{
int error;
sa_bulk_attr_t bulk;
@@ -1470,9 +1475,19 @@ sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
bulk.sa_data_func = NULL;
ASSERT(hdl);
- mutex_enter(&hdl->sa_lock);
error = sa_lookup_impl(hdl, &bulk, 1);
+ return (error);
+}
+
+int
+sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
+{
+ int error;
+
+ mutex_enter(&hdl->sa_lock);
+ error = sa_lookup_locked(hdl, attr, buf, buflen);
mutex_exit(&hdl->sa_lock);
+
return (error);
}
@@ -1497,6 +1512,173 @@ sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio)
mutex_exit(&hdl->sa_lock);
return (error);
}
+
+/*
+ * For the existed object that is upgraded from old system, its ondisk layout
+ * has no slot for the project ID attribute. But quota accounting logic needs
+ * to access related slots by offset directly. So we need to adjust these old
+ * objects' layout to make the project ID to some unified and fixed offset.
+ */
+int
+sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
+{
+ znode_t *zp = sa_get_userdata(hdl);
+ dmu_buf_t *db = sa_get_db(hdl);
+ zfsvfs_t *zfsvfs = ZTOZSB(zp);
+ int count = 0, err = 0;
+ sa_bulk_attr_t *bulk, *attrs;
+ zfs_acl_locator_cb_t locate = { 0 };
+ uint64_t uid, gid, mode, rdev, xattr = 0, parent, gen, links;
+ uint64_t crtime[2], mtime[2], ctime[2], atime[2];
+ zfs_acl_phys_t znode_acl = { 0 };
+ char scanstamp[AV_SCANSTAMP_SZ];
+
+ if (zp->z_acl_cached == NULL) {
+ zfs_acl_t *aclp;
+
+ mutex_enter(&zp->z_acl_lock);
+ err = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
+ mutex_exit(&zp->z_acl_lock);
+ if (err != 0 && err != ENOENT)
+ return (err);
+ }
+
+ bulk = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
+ attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
+ mutex_enter(&hdl->sa_lock);
+ mutex_enter(&zp->z_lock);
+
+ err = sa_lookup_locked(hdl, SA_ZPL_PROJID(zfsvfs), &projid,
+ sizeof (uint64_t));
+ if (unlikely(err == 0))
+ /* Someone has added project ID attr by race. */
+ err = EEXIST;
+ if (err != ENOENT)
+ goto out;
+
+ /* First do a bulk query of the attributes that aren't cached */
+ if (zp->z_is_sa) {
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ &mode, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
+ &gen, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
+ &uid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
+ &gid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
+ &parent, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ &atime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
+ &mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ &ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
+ &crtime, 16);
+ if (S_ISBLK(ZTOI(zp)->i_mode) || S_ISCHR(ZTOI(zp)->i_mode))
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
+ &rdev, 8);
+ } else {
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ &atime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
+ &mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ &ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
+ &crtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
+ &gen, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ &mode, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
+ &parent, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL,
+ &xattr, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
+ &rdev, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
+ &uid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
+ &gid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
+ &znode_acl, 88);
+ }
+ err = sa_bulk_lookup_locked(hdl, bulk, count);
+ if (err != 0)
+ goto out;
+
+ err = sa_lookup_locked(hdl, SA_ZPL_XATTR(zfsvfs), &xattr, 8);
+ if (err != 0 && err != ENOENT)
+ goto out;
+
+ zp->z_projid = projid;
+ zp->z_pflags |= ZFS_PROJID;
+ links = ZTOI(zp)->i_nlink;
+ count = 0;
+ err = 0;
+
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
+ &zp->z_size, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ &zp->z_pflags, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
+ &crtime, 16);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PROJID(zfsvfs), NULL, &projid, 8);
+
+ if (S_ISBLK(ZTOI(zp)->i_mode) || S_ISCHR(ZTOI(zp)->i_mode))
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
+ &rdev, 8);
+
+ if (zp->z_acl_cached != NULL) {
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
+ &zp->z_acl_cached->z_acl_count, 8);
+ if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
+ zfs_acl_xform(zp, zp->z_acl_cached, CRED());
+ locate.cb_aclp = zp->z_acl_cached;
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
+ zfs_acl_data_locator, &locate,
+ zp->z_acl_cached->z_acl_bytes);
+ }
+
+ if (xattr)
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_XATTR(zfsvfs), NULL,
+ &xattr, 8);
+
+ if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
+ bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
+ scanstamp, AV_SCANSTAMP_SZ);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
+ scanstamp, AV_SCANSTAMP_SZ);
+ zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
+ }
+
+ VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
+ VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
+ if (znode_acl.z_acl_extern_obj) {
+ VERIFY(0 == dmu_object_free(zfsvfs->z_os,
+ znode_acl.z_acl_extern_obj, tx));
+ }
+
+ zp->z_is_sa = B_TRUE;
+
+out:
+ mutex_exit(&zp->z_lock);
+ mutex_exit(&hdl->sa_lock);
+ kmem_free(attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
+ kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
+ return (err);
+}
#endif
static sa_idx_tab_t *
@@ -2062,4 +2244,5 @@ EXPORT_SYMBOL(sa_hdrsize);
EXPORT_SYMBOL(sa_handle_lock);
EXPORT_SYMBOL(sa_handle_unlock);
EXPORT_SYMBOL(sa_lookup_uio);
+EXPORT_SYMBOL(sa_add_projid);
#endif /* _KERNEL */