summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/zcommon/zfeature_common.c12
-rw-r--r--module/zcommon/zfs_deleg.c4
-rw-r--r--module/zcommon/zfs_prop.c6
-rw-r--r--module/zfs/dbuf.c2
-rw-r--r--module/zfs/dmu.c4
-rw-r--r--module/zfs/dmu_objset.c181
-rw-r--r--module/zfs/dmu_traverse.c26
-rw-r--r--module/zfs/dnode.c22
-rw-r--r--module/zfs/dsl_pool.c2
-rw-r--r--module/zfs/dsl_scan.c6
-rw-r--r--module/zfs/sa.c189
-rw-r--r--module/zfs/spa.c2
-rw-r--r--module/zfs/zfs_acl.c14
-rw-r--r--module/zfs/zfs_dir.c2
-rw-r--r--module/zfs/zfs_ioctl.c35
-rw-r--r--module/zfs/zfs_log.c14
-rw-r--r--module/zfs/zfs_replay.c13
-rw-r--r--module/zfs/zfs_sa.c22
-rw-r--r--module/zfs/zfs_vfsops.c322
-rw-r--r--module/zfs/zfs_vnops.c327
-rw-r--r--module/zfs/zfs_znode.c54
-rw-r--r--module/zfs/zpl_file.c148
22 files changed, 1191 insertions, 216 deletions
diff --git a/module/zcommon/zfeature_common.c b/module/zcommon/zfeature_common.c
index 7b782b45d..36d0d9613 100644
--- a/module/zcommon/zfeature_common.c
+++ b/module/zcommon/zfeature_common.c
@@ -321,6 +321,18 @@ zpool_feature_init(void)
"Support for dataset level encryption",
ZFEATURE_FLAG_PER_DATASET, encryption_deps);
}
+
+ {
+ static const spa_feature_t project_quota_deps[] = {
+ SPA_FEATURE_EXTENSIBLE_DATASET,
+ SPA_FEATURE_NONE
+ };
+ zfeature_register(SPA_FEATURE_PROJECT_QUOTA,
+ "org.zfsonlinux:project_quota", "project_quota",
+ "space/object accounting based on project ID.",
+ ZFEATURE_FLAG_READONLY_COMPAT | ZFEATURE_FLAG_PER_DATASET,
+ project_quota_deps);
+ }
}
#if defined(_KERNEL) && defined(HAVE_SPL)
diff --git a/module/zcommon/zfs_deleg.c b/module/zcommon/zfs_deleg.c
index 18e5c11cc..3a51bc49a 100644
--- a/module/zcommon/zfs_deleg.c
+++ b/module/zcommon/zfs_deleg.c
@@ -71,6 +71,10 @@ zfs_deleg_perm_tab_t zfs_deleg_perm_tab[] = {
{ZFS_DELEG_PERM_RELEASE},
{ZFS_DELEG_PERM_LOAD_KEY},
{ZFS_DELEG_PERM_CHANGE_KEY},
+ {ZFS_DELEG_PERM_PROJECTUSED},
+ {ZFS_DELEG_PERM_PROJECTQUOTA},
+ {ZFS_DELEG_PERM_PROJECTOBJUSED},
+ {ZFS_DELEG_PERM_PROJECTOBJQUOTA},
{NULL}
};
diff --git a/module/zcommon/zfs_prop.c b/module/zcommon/zfs_prop.c
index 42af9468c..0d44fd139 100644
--- a/module/zcommon/zfs_prop.c
+++ b/module/zcommon/zfs_prop.c
@@ -58,7 +58,11 @@ const char *zfs_userquota_prop_prefixes[] = {
"userobjused@",
"userobjquota@",
"groupobjused@",
- "groupobjquota@"
+ "groupobjquota@",
+ "projectused@",
+ "projectquota@",
+ "projectobjused@",
+ "projectobjquota@"
};
zprop_desc_t *
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index 3668ea315..51cb0c982 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -2456,7 +2456,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
/*
* Note: While bpp will always be updated if the function returns success,
* parentp will not be updated if the dnode does not have dn_dbuf filled in;
- * this happens when the dnode is the meta-dnode, or a userused or groupused
+ * this happens when the dnode is the meta-dnode, or {user|group|project}used
* object.
*/
__attribute__((always_inline))
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 20ed3ebff..cb86800f4 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -113,8 +113,8 @@ const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{ DMU_BSWAP_UINT64, TRUE, FALSE, "FUID table size" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DSL dataset next clones"},
{ DMU_BSWAP_ZAP, TRUE, FALSE, "scan work queue" },
- { DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS user/group used" },
- { DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS user/group quota" },
+ { DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS user/group/project used" },
+ { DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS user/group/project quota"},
{ DMU_BSWAP_ZAP, TRUE, FALSE, "snapshot refcount tags"},
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DDT ZAP algorithm" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DDT statistics" },
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index befce9be6..0d9273fbb 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -58,6 +58,7 @@
#include <sys/policy.h>
#include <sys/spa_impl.h>
#include <sys/dmu_send.h>
+#include <sys/zfs_project.h>
/*
* Needed to close a window in dnode_move() that allows the objset to be freed
@@ -336,14 +337,17 @@ dmu_objset_byteswap(void *buf, size_t size)
{
objset_phys_t *osp = buf;
- ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
+ ASSERT(size == OBJSET_PHYS_SIZE_V1 || size == OBJSET_PHYS_SIZE_V2 ||
+ size == sizeof (objset_phys_t));
dnode_byteswap(&osp->os_meta_dnode);
byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
osp->os_type = BSWAP_64(osp->os_type);
osp->os_flags = BSWAP_64(osp->os_flags);
- if (size == sizeof (objset_phys_t)) {
+ if (size >= OBJSET_PHYS_SIZE_V2) {
dnode_byteswap(&osp->os_userused_dnode);
dnode_byteswap(&osp->os_groupused_dnode);
+ if (size >= sizeof (objset_phys_t))
+ dnode_byteswap(&osp->os_projectused_dnode);
}
}
@@ -395,6 +399,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
if (!BP_IS_HOLE(os->os_rootbp)) {
arc_flags_t aflags = ARC_FLAG_WAIT;
zbookmark_phys_t zb;
+ int size;
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
@@ -420,12 +425,19 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
return (err);
}
+ if (spa_version(spa) < SPA_VERSION_USERSPACE)
+ size = OBJSET_PHYS_SIZE_V1;
+ else if (!spa_feature_is_enabled(spa,
+ SPA_FEATURE_PROJECT_QUOTA))
+ size = OBJSET_PHYS_SIZE_V2;
+ else
+ size = sizeof (objset_phys_t);
+
/* Increase the blocksize if we are permitted. */
- if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
- arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
+ if (arc_buf_size(os->os_phys_buf) < size) {
arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
- ARC_BUFC_METADATA, sizeof (objset_phys_t));
- bzero(buf->b_data, sizeof (objset_phys_t));
+ ARC_BUFC_METADATA, size);
+ bzero(buf->b_data, size);
bcopy(os->os_phys_buf->b_data, buf->b_data,
arc_buf_size(os->os_phys_buf));
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
@@ -436,7 +448,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
os->os_flags = os->os_phys->os_flags;
} else {
int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
- sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
+ sizeof (objset_phys_t) : OBJSET_PHYS_SIZE_V1;
os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
os->os_phys = os->os_phys_buf->b_data;
@@ -568,11 +580,15 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
dnode_special_open(os, &os->os_phys->os_meta_dnode,
DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
- if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
+ if (OBJSET_BUF_HAS_USERUSED(os->os_phys_buf)) {
dnode_special_open(os, &os->os_phys->os_userused_dnode,
DMU_USERUSED_OBJECT, &os->os_userused_dnode);
dnode_special_open(os, &os->os_phys->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
+ if (OBJSET_BUF_HAS_PROJECTUSED(os->os_phys_buf))
+ dnode_special_open(os,
+ &os->os_phys->os_projectused_dnode,
+ DMU_PROJECTUSED_OBJECT, &os->os_projectused_dnode);
}
mutex_init(&os->os_upgrade_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -711,9 +727,10 @@ dmu_objset_own(const char *name, dmu_objset_type_t type,
}
/* user accounting requires the dataset to be decrypted */
- if (dmu_objset_userobjspace_upgradable(*osp) &&
+ if ((dmu_objset_userobjspace_upgradable(*osp) ||
+ dmu_objset_projectquota_upgradable(*osp)) &&
(ds->ds_dir->dd_crypto_obj == 0 || decrypt))
- dmu_objset_userobjspace_upgrade(*osp);
+ dmu_objset_id_quota_upgrade(*osp);
dsl_pool_rele(dp, FTAG);
return (0);
@@ -835,6 +852,8 @@ dmu_objset_evict_dbufs(objset_t *os)
kmem_free(dn_marker, sizeof (dnode_t));
if (DMU_USERUSED_DNODE(os) != NULL) {
+ if (DMU_PROJECTUSED_DNODE(os) != NULL)
+ dnode_evict_dbufs(DMU_PROJECTUSED_DNODE(os));
dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
}
@@ -889,6 +908,8 @@ dmu_objset_evict_done(objset_t *os)
dnode_special_close(&os->os_meta_dnode);
if (DMU_USERUSED_DNODE(os)) {
+ if (DMU_PROJECTUSED_DNODE(os))
+ dnode_special_close(&os->os_projectused_dnode);
dnode_special_close(&os->os_userused_dnode);
dnode_special_close(&os->os_groupused_dnode);
}
@@ -1004,6 +1025,12 @@ dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
os->os_phys->os_flags |=
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
}
+ if (dmu_objset_projectquota_enabled(os)) {
+ ds->ds_feature_activation_needed[
+ SPA_FEATURE_PROJECT_QUOTA] = B_TRUE;
+ os->os_phys->os_flags |=
+ OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
+ }
os->os_flags = os->os_phys->os_flags;
}
@@ -1408,7 +1435,7 @@ dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
* Update rootbp fill count: it should be the number of objects
* allocated in the object set (not counting the "special"
* objects that are stored in the objset_phys_t -- the meta
- * dnode and user/group accounting objects).
+ * dnode and user/group/project accounting objects).
*/
for (int i = 0; i < dnp->dn_nblkptr; i++)
fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
@@ -1537,6 +1564,12 @@ dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
}
+ if (DMU_PROJECTUSED_DNODE(os) &&
+ DMU_PROJECTUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
+ DMU_PROJECTUSED_DNODE(os)->dn_zio = zio;
+ dnode_sync(DMU_PROJECTUSED_DNODE(os), tx);
+ }
+
txgoff = tx->tx_txg & TXG_MASK;
if (dmu_objset_userused_enabled(os) &&
@@ -1620,6 +1653,14 @@ dmu_objset_userobjused_enabled(objset_t *os)
spa_feature_is_enabled(os->os_spa, SPA_FEATURE_USEROBJ_ACCOUNTING));
}
+boolean_t
+dmu_objset_projectquota_enabled(objset_t *os)
+{
+ return (used_cbs[os->os_phys->os_type] != NULL &&
+ DMU_PROJECTUSED_DNODE(os) != NULL &&
+ spa_feature_is_enabled(os->os_spa, SPA_FEATURE_PROJECT_QUOTA));
+}
+
typedef struct userquota_node {
/* must be in the first filed, see userquota_update_cache() */
char uqn_id[20 + DMU_OBJACCT_PREFIX_LEN];
@@ -1630,6 +1671,7 @@ typedef struct userquota_node {
typedef struct userquota_cache {
avl_tree_t uqc_user_deltas;
avl_tree_t uqc_group_deltas;
+ avl_tree_t uqc_project_deltas;
} userquota_cache_t;
static int
@@ -1682,6 +1724,19 @@ do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx)
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_group_deltas);
+
+ if (dmu_objset_projectquota_enabled(os)) {
+ cookie = NULL;
+ while ((uqn = avl_destroy_nodes(&cache->uqc_project_deltas,
+ &cookie)) != NULL) {
+ mutex_enter(&os->os_userused_lock);
+ VERIFY0(zap_increment(os, DMU_PROJECTUSED_OBJECT,
+ uqn->uqn_id, uqn->uqn_delta, tx));
+ mutex_exit(&os->os_userused_lock);
+ kmem_free(uqn, sizeof (*uqn));
+ }
+ avl_destroy(&cache->uqc_project_deltas);
+ }
}
static void
@@ -1706,10 +1761,11 @@ userquota_update_cache(avl_tree_t *avl, const char *id, int64_t delta)
}
static void
-do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags,
- uint64_t user, uint64_t group, boolean_t subtract)
+do_userquota_update(objset_t *os, userquota_cache_t *cache, uint64_t used,
+ uint64_t flags, uint64_t user, uint64_t group, uint64_t project,
+ boolean_t subtract)
{
- if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
+ if (flags & DNODE_FLAG_USERUSED_ACCOUNTED) {
int64_t delta = DNODE_MIN_SIZE + used;
char name[20];
@@ -1721,12 +1777,18 @@ do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags,
(void) sprintf(name, "%llx", (longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
+
+ if (dmu_objset_projectquota_enabled(os)) {
+ (void) sprintf(name, "%llx", (longlong_t)project);
+ userquota_update_cache(&cache->uqc_project_deltas,
+ name, delta);
+ }
}
}
static void
-do_userobjquota_update(userquota_cache_t *cache, uint64_t flags,
- uint64_t user, uint64_t group, boolean_t subtract)
+do_userobjquota_update(objset_t *os, userquota_cache_t *cache, uint64_t flags,
+ uint64_t user, uint64_t group, uint64_t project, boolean_t subtract)
{
if (flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) {
char name[20 + DMU_OBJACCT_PREFIX_LEN];
@@ -1739,6 +1801,13 @@ do_userobjquota_update(userquota_cache_t *cache, uint64_t flags,
(void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx",
(longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
+
+ if (dmu_objset_projectquota_enabled(os)) {
+ (void) snprintf(name, sizeof (name),
+ DMU_OBJACCT_PREFIX "%llx", (longlong_t)project);
+ userquota_update_cache(&cache->uqc_project_deltas,
+ name, delta);
+ }
}
}
@@ -1766,6 +1835,10 @@ userquota_updates_task(void *arg)
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
avl_create(&cache.uqc_group_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
+ if (dmu_objset_projectquota_enabled(os))
+ avl_create(&cache.uqc_project_deltas, userquota_compare,
+ sizeof (userquota_node_t), offsetof(userquota_node_t,
+ uqn_node));
while ((dn = multilist_sublist_head(list)) != NULL) {
int flags;
@@ -1777,18 +1850,21 @@ userquota_updates_task(void *arg)
flags = dn->dn_id_flags;
ASSERT(flags);
if (flags & DN_ID_OLD_EXIST) {
- do_userquota_update(&cache,
- dn->dn_oldused, dn->dn_oldflags,
- dn->dn_olduid, dn->dn_oldgid, B_TRUE);
- do_userobjquota_update(&cache, dn->dn_oldflags,
- dn->dn_olduid, dn->dn_oldgid, B_TRUE);
+ do_userquota_update(os, &cache, dn->dn_oldused,
+ dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid,
+ dn->dn_oldprojid, B_TRUE);
+ do_userobjquota_update(os, &cache, dn->dn_oldflags,
+ dn->dn_olduid, dn->dn_oldgid,
+ dn->dn_oldprojid, B_TRUE);
}
if (flags & DN_ID_NEW_EXIST) {
- do_userquota_update(&cache,
+ do_userquota_update(os, &cache,
DN_USED_BYTES(dn->dn_phys), dn->dn_phys->dn_flags,
- dn->dn_newuid, dn->dn_newgid, B_FALSE);
- do_userobjquota_update(&cache, dn->dn_phys->dn_flags,
- dn->dn_newuid, dn->dn_newgid, B_FALSE);
+ dn->dn_newuid, dn->dn_newgid,
+ dn->dn_newprojid, B_FALSE);
+ do_userobjquota_update(os, &cache,
+ dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid,
+ dn->dn_newprojid, B_FALSE);
}
mutex_enter(&dn->dn_mtx);
@@ -1797,6 +1873,7 @@ userquota_updates_task(void *arg)
if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
dn->dn_olduid = dn->dn_newuid;
dn->dn_oldgid = dn->dn_newgid;
+ dn->dn_oldprojid = dn->dn_newprojid;
dn->dn_id_flags |= DN_ID_OLD_EXIST;
if (dn->dn_bonuslen == 0)
dn->dn_id_flags |= DN_ID_CHKED_SPILL;
@@ -1824,7 +1901,7 @@ dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
if (os->os_encrypted && dmu_objset_is_receiving(os))
return;
- /* Allocate the user/groupused objects if necessary. */
+ /* Allocate the user/group/project used objects if necessary. */
if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
VERIFY0(zap_create_claim(os,
DMU_USERUSED_OBJECT,
@@ -1834,6 +1911,12 @@ dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
}
+ if (dmu_objset_projectquota_enabled(os) &&
+ DMU_PROJECTUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
+ VERIFY0(zap_create_claim(os, DMU_PROJECTUSED_OBJECT,
+ DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
+ }
+
for (int i = 0;
i < multilist_get_num_sublists(os->os_synced_dnodes); i++) {
userquota_updates_arg_t *uua =
@@ -1896,6 +1979,7 @@ dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
dmu_buf_impl_t *db = NULL;
uint64_t *user = NULL;
uint64_t *group = NULL;
+ uint64_t *project = NULL;
int flags = dn->dn_id_flags;
int error;
boolean_t have_spill = B_FALSE;
@@ -1953,9 +2037,11 @@ dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
ASSERT(data);
user = &dn->dn_olduid;
group = &dn->dn_oldgid;
+ project = &dn->dn_oldprojid;
} else if (data) {
user = &dn->dn_newuid;
group = &dn->dn_newgid;
+ project = &dn->dn_newprojid;
}
/*
@@ -1963,7 +2049,7 @@ dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
* type has changed and that type isn't an object type to track
*/
error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
- user, group);
+ user, group, project);
/*
* Preserve existing uid/gid when the callback can't determine
@@ -1976,9 +2062,11 @@ dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
if (flags & DN_ID_OLD_EXIST) {
dn->dn_newuid = dn->dn_olduid;
dn->dn_newgid = dn->dn_oldgid;
+ dn->dn_newgid = dn->dn_oldprojid;
} else {
dn->dn_newuid = 0;
dn->dn_newgid = 0;
+ dn->dn_newprojid = ZFS_DEFAULT_PROJID;
}
error = 0;
}
@@ -2016,6 +2104,13 @@ dmu_objset_userobjspace_present(objset_t *os)
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE);
}
+boolean_t
+dmu_objset_projectquota_present(objset_t *os)
+{
+ return (os->os_phys->os_flags &
+ OBJSET_FLAG_PROJECTQUOTA_COMPLETE);
+}
+
static int
dmu_objset_space_upgrade(objset_t *os)
{
@@ -2085,33 +2180,43 @@ dmu_objset_userspace_upgrade(objset_t *os)
}
static int
-dmu_objset_userobjspace_upgrade_cb(objset_t *os)
+dmu_objset_id_quota_upgrade_cb(objset_t *os)
{
int err = 0;
- if (dmu_objset_userobjspace_present(os))
+ if (dmu_objset_userobjspace_present(os) &&
+ dmu_objset_projectquota_present(os))
return (0);
if (dmu_objset_is_snapshot(os))
return (SET_ERROR(EINVAL));
if (!dmu_objset_userobjused_enabled(os))
return (SET_ERROR(ENOTSUP));
+ if (!dmu_objset_projectquota_enabled(os) &&
+ dmu_objset_userobjspace_present(os))
+ return (SET_ERROR(ENOTSUP));
dmu_objset_ds(os)->ds_feature_activation_needed[
SPA_FEATURE_USEROBJ_ACCOUNTING] = B_TRUE;
+ if (dmu_objset_projectquota_enabled(os))
+ dmu_objset_ds(os)->ds_feature_activation_needed[
+ SPA_FEATURE_PROJECT_QUOTA] = B_TRUE;
err = dmu_objset_space_upgrade(os);
if (err)
return (err);
os->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
+ if (dmu_objset_projectquota_enabled(os))
+ os->os_flags |= OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
+
txg_wait_synced(dmu_objset_pool(os), 0);
return (0);
}
void
-dmu_objset_userobjspace_upgrade(objset_t *os)
+dmu_objset_id_quota_upgrade(objset_t *os)
{
- dmu_objset_upgrade(os, dmu_objset_userobjspace_upgrade_cb);
+ dmu_objset_upgrade(os, dmu_objset_id_quota_upgrade_cb);
}
boolean_t
@@ -2123,6 +2228,15 @@ dmu_objset_userobjspace_upgradable(objset_t *os)
!dmu_objset_userobjspace_present(os));
}
+boolean_t
+dmu_objset_projectquota_upgradable(objset_t *os)
+{
+ return (dmu_objset_type(os) == DMU_OST_ZFS &&
+ !dmu_objset_is_snapshot(os) &&
+ dmu_objset_projectquota_enabled(os) &&
+ !dmu_objset_projectquota_present(os));
+}
+
void
dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp)
@@ -2731,7 +2845,10 @@ EXPORT_SYMBOL(dmu_objset_userused_enabled);
EXPORT_SYMBOL(dmu_objset_userspace_upgrade);
EXPORT_SYMBOL(dmu_objset_userspace_present);
EXPORT_SYMBOL(dmu_objset_userobjused_enabled);
-EXPORT_SYMBOL(dmu_objset_userobjspace_upgrade);
EXPORT_SYMBOL(dmu_objset_userobjspace_upgradable);
EXPORT_SYMBOL(dmu_objset_userobjspace_present);
+EXPORT_SYMBOL(dmu_objset_projectquota_enabled);
+EXPORT_SYMBOL(dmu_objset_projectquota_present);
+EXPORT_SYMBOL(dmu_objset_projectquota_upgradable);
+EXPORT_SYMBOL(dmu_objset_id_quota_upgrade);
#endif
diff --git a/module/zfs/dmu_traverse.c b/module/zfs/dmu_traverse.c
index 15d29198f..5407e4817 100644
--- a/module/zfs/dmu_traverse.c
+++ b/module/zfs/dmu_traverse.c
@@ -386,7 +386,11 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
if (osp->os_meta_dnode.dn_maxblkid == 0)
td->td_realloc_possible = B_FALSE;
- if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
+ if (OBJSET_BUF_HAS_USERUSED(buf)) {
+ if (OBJSET_BUF_HAS_PROJECTUSED(buf))
+ prefetch_dnode_metadata(td,
+ &osp->os_projectused_dnode,
+ zb->zb_objset, DMU_PROJECTUSED_OBJECT);
prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
zb->zb_objset, DMU_GROUPUSED_OBJECT);
prefetch_dnode_metadata(td, &osp->os_userused_dnode,
@@ -395,13 +399,19 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
err = traverse_dnode(td, &osp->os_meta_dnode, zb->zb_objset,
DMU_META_DNODE_OBJECT);
- if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
- err = traverse_dnode(td, &osp->os_groupused_dnode,
- zb->zb_objset, DMU_GROUPUSED_OBJECT);
- }
- if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
- err = traverse_dnode(td, &osp->os_userused_dnode,
- zb->zb_objset, DMU_USERUSED_OBJECT);
+ if (err == 0 && OBJSET_BUF_HAS_USERUSED(buf)) {
+ if (OBJSET_BUF_HAS_PROJECTUSED(buf))
+ err = traverse_dnode(td,
+ &osp->os_projectused_dnode, zb->zb_objset,
+ DMU_PROJECTUSED_OBJECT);
+ if (err == 0)
+ err = traverse_dnode(td,
+ &osp->os_groupused_dnode, zb->zb_objset,
+ DMU_GROUPUSED_OBJECT);
+ if (err == 0)
+ err = traverse_dnode(td,
+ &osp->os_userused_dnode, zb->zb_objset,
+ DMU_USERUSED_OBJECT);
}
}
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index b4c131e98..596983b47 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -38,6 +38,7 @@
#include <sys/dmu_zfetch.h>
#include <sys/range_tree.h>
#include <sys/trace_dnode.h>
+#include <sys/zfs_project.h>
dnode_stats_t dnode_stats = {
{ "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
@@ -157,8 +158,10 @@ dnode_cons(void *arg, void *unused, int kmflag)
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
+ dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
+ dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dn->dn_dbufs_count = 0;
@@ -210,8 +213,10 @@ dnode_dest(void *arg, void *unused)
ASSERT0(dn->dn_oldflags);
ASSERT0(dn->dn_olduid);
ASSERT0(dn->dn_oldgid);
+ ASSERT0(dn->dn_oldprojid);
ASSERT0(dn->dn_newuid);
ASSERT0(dn->dn_newgid);
+ ASSERT0(dn->dn_newprojid);
ASSERT0(dn->dn_id_flags);
ASSERT0(dn->dn_dbufs_count);
@@ -543,8 +548,10 @@ dnode_destroy(dnode_t *dn)
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
+ dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
+ dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dmu_zfetch_fini(&dn->dn_zfetch);
@@ -799,8 +806,10 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
ndn->dn_oldflags = odn->dn_oldflags;
ndn->dn_olduid = odn->dn_olduid;
ndn->dn_oldgid = odn->dn_oldgid;
+ ndn->dn_oldprojid = odn->dn_oldprojid;
ndn->dn_newuid = odn->dn_newuid;
ndn->dn_newgid = odn->dn_newgid;
+ ndn->dn_newprojid = odn->dn_newprojid;
ndn->dn_id_flags = odn->dn_id_flags;
dmu_zfetch_init(&ndn->dn_zfetch, NULL);
list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream);
@@ -859,8 +868,10 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
odn->dn_oldflags = 0;
odn->dn_olduid = 0;
odn->dn_oldgid = 0;
+ odn->dn_oldprojid = ZFS_DEFAULT_PROJID;
odn->dn_newuid = 0;
odn->dn_newgid = 0;
+ odn->dn_newprojid = ZFS_DEFAULT_PROJID;
odn->dn_id_flags = 0;
/*
@@ -1265,9 +1276,14 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
(spa_is_root(os->os_spa) &&
spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
- if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT) {
- dn = (object == DMU_USERUSED_OBJECT) ?
- DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os);
+ if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
+ object == DMU_PROJECTUSED_OBJECT) {
+ if (object == DMU_USERUSED_OBJECT)
+ dn = DMU_USERUSED_DNODE(os);
+ else if (object == DMU_GROUPUSED_OBJECT)
+ dn = DMU_GROUPUSED_DNODE(os);
+ else
+ dn = DMU_PROJECTUSED_DNODE(os);
if (dn == NULL)
return (SET_ERROR(ENOENT));
type = dn->dn_type;
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 86863fad8..db2e67742 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -580,7 +580,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
/*
* After the data blocks have been written (ensured by the zio_wait()
- * above), update the user/group space accounting. This happens
+ * above), update the user/group/project space accounting. This happens
* in tasks dispatched to dp_sync_taskq, so wait for them before
* continuing.
*/
diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c
index fc0c24e1c..776f8f239 100644
--- a/module/zfs/dsl_scan.c
+++ b/module/zfs/dsl_scan.c
@@ -1684,11 +1684,15 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
if (OBJSET_BUF_HAS_USERUSED(buf)) {
/*
- * We also always visit user/group accounting
+ * We also always visit user/group/project accounting
* objects, and never skip them, even if we are
* suspending. This is necessary so that the
* space deltas from this txg get integrated.
*/
+ if (OBJSET_BUF_HAS_PROJECTUSED(buf))
+ dsl_scan_visitdnode(scn, ds, osp->os_type,
+ &osp->os_projectused_dnode,
+ DMU_PROJECTUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, tx);
diff --git a/module/zfs/sa.c b/module/zfs/sa.c
index f0a18bad8..4a863f9a5 100644
--- a/module/zfs/sa.c
+++ b/module/zfs/sa.c
@@ -44,6 +44,10 @@
#include <sys/errno.h>
#include <sys/zfs_context.h>
+#ifdef _KERNEL
+#include <sys/zfs_znode.h>
+#endif
+
/*
* ZFS System attributes:
*
@@ -1456,8 +1460,9 @@ sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
return (sa_attr_op(hdl, bulk, count, SA_LOOKUP, NULL));
}
-int
-sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
+static int
+sa_lookup_locked(sa_handle_t *hdl, sa_attr_type_t attr, void *buf,
+ uint32_t buflen)
{
int error;
sa_bulk_attr_t bulk;
@@ -1470,9 +1475,19 @@ sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
bulk.sa_data_func = NULL;
ASSERT(hdl);
- mutex_enter(&hdl->sa_lock);
error = sa_lookup_impl(hdl, &bulk, 1);
+ return (error);
+}
+
+int
+sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
+{
+ int error;
+
+ mutex_enter(&hdl->sa_lock);
+ error = sa_lookup_locked(hdl, attr, buf, buflen);
mutex_exit(&hdl->sa_lock);
+
return (error);
}
@@ -1497,6 +1512,173 @@ sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio)
mutex_exit(&hdl->sa_lock);
return (error);
}
+
+/*
+ * For the existed object that is upgraded from old system, its ondisk layout
+ * has no slot for the project ID attribute. But quota accounting logic needs
+ * to access related slots by offset directly. So we need to adjust these old
+ * objects' layout to make the project ID to some unified and fixed offset.
+ */
+int
+sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
+{
+ znode_t *zp = sa_get_userdata(hdl);
+ dmu_buf_t *db = sa_get_db(hdl);
+ zfsvfs_t *zfsvfs = ZTOZSB(zp);
+ int count = 0, err = 0;
+ sa_bulk_attr_t *bulk, *attrs;
+ zfs_acl_locator_cb_t locate = { 0 };
+ uint64_t uid, gid, mode, rdev, xattr = 0, parent, gen, links;
+ uint64_t crtime[2], mtime[2], ctime[2], atime[2];
+ zfs_acl_phys_t znode_acl = { 0 };
+ char scanstamp[AV_SCANSTAMP_SZ];
+
+ if (zp->z_acl_cached == NULL) {
+ zfs_acl_t *aclp;
+
+ mutex_enter(&zp->z_acl_lock);
+ err = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
+ mutex_exit(&zp->z_acl_lock);
+ if (err != 0 && err != ENOENT)
+ return (err);
+ }
+
+ bulk = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
+ attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
+ mutex_enter(&hdl->sa_lock);
+ mutex_enter(&zp->z_lock);
+
+ err = sa_lookup_locked(hdl, SA_ZPL_PROJID(zfsvfs), &projid,
+ sizeof (uint64_t));
+ if (unlikely(err == 0))
+ /* Someone has added project ID attr by race. */
+ err = EEXIST;
+ if (err != ENOENT)
+ goto out;
+
+ /* First do a bulk query of the attributes that aren't cached */
+ if (zp->z_is_sa) {
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ &mode, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
+ &gen, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
+ &uid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
+ &gid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
+ &parent, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ &atime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
+ &mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ &ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
+ &crtime, 16);
+ if (S_ISBLK(ZTOI(zp)->i_mode) || S_ISCHR(ZTOI(zp)->i_mode))
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
+ &rdev, 8);
+ } else {
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ &atime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
+ &mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ &ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
+ &crtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
+ &gen, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ &mode, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
+ &parent, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL,
+ &xattr, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
+ &rdev, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
+ &uid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
+ &gid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
+ &znode_acl, 88);
+ }
+ err = sa_bulk_lookup_locked(hdl, bulk, count);
+ if (err != 0)
+ goto out;
+
+ err = sa_lookup_locked(hdl, SA_ZPL_XATTR(zfsvfs), &xattr, 8);
+ if (err != 0 && err != ENOENT)
+ goto out;
+
+ zp->z_projid = projid;
+ zp->z_pflags |= ZFS_PROJID;
+ links = ZTOI(zp)->i_nlink;
+ count = 0;
+ err = 0;
+
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
+ &zp->z_size, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ &zp->z_pflags, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
+ &crtime, 16);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PROJID(zfsvfs), NULL, &projid, 8);
+
+ if (S_ISBLK(ZTOI(zp)->i_mode) || S_ISCHR(ZTOI(zp)->i_mode))
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
+ &rdev, 8);
+
+ if (zp->z_acl_cached != NULL) {
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
+ &zp->z_acl_cached->z_acl_count, 8);
+ if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
+ zfs_acl_xform(zp, zp->z_acl_cached, CRED());
+ locate.cb_aclp = zp->z_acl_cached;
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
+ zfs_acl_data_locator, &locate,
+ zp->z_acl_cached->z_acl_bytes);
+ }
+
+ if (xattr)
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_XATTR(zfsvfs), NULL,
+ &xattr, 8);
+
+ if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
+ bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
+ scanstamp, AV_SCANSTAMP_SZ);
+ SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
+ scanstamp, AV_SCANSTAMP_SZ);
+ zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
+ }
+
+ VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
+ VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
+ if (znode_acl.z_acl_extern_obj) {
+ VERIFY(0 == dmu_object_free(zfsvfs->z_os,
+ znode_acl.z_acl_extern_obj, tx));
+ }
+
+ zp->z_is_sa = B_TRUE;
+
+out:
+ mutex_exit(&zp->z_lock);
+ mutex_exit(&hdl->sa_lock);
+ kmem_free(attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
+ kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
+ return (err);
+}
#endif
static sa_idx_tab_t *
@@ -2062,4 +2244,5 @@ EXPORT_SYMBOL(sa_hdrsize);
EXPORT_SYMBOL(sa_handle_lock);
EXPORT_SYMBOL(sa_handle_unlock);
EXPORT_SYMBOL(sa_lookup_uio);
+EXPORT_SYMBOL(sa_add_projid);
#endif /* _KERNEL */
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index dac042464..736b51fea 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -1188,7 +1188,7 @@ spa_activate(spa_t *spa, int mode)
/*
* The taskq to upgrade datasets in this pool. Currently used by
- * feature SPA_FEATURE_USEROBJ_ACCOUNTING.
+ * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
diff --git a/module/zfs/zfs_acl.c b/module/zfs/zfs_acl.c
index 5ef20f088..b366e8f1c 100644
--- a/module/zfs/zfs_acl.c
+++ b/module/zfs/zfs_acl.c
@@ -1054,8 +1054,8 @@ zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp,
* Read an external acl object. If the intent is to modify, always
* create a new acl and leave any cached acl in place.
*/
-static int
-zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
+int
+zfs_acl_node_read(struct znode *zp, boolean_t have_lock, zfs_acl_t **aclpp,
boolean_t will_modify)
{
zfs_acl_t *aclp;
@@ -1883,12 +1883,12 @@ zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
}
boolean_t
-zfs_acl_ids_overquota(zfsvfs_t *zfsvfs, zfs_acl_ids_t *acl_ids)
+zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid)
{
- return (zfs_fuid_overquota(zfsvfs, B_FALSE, acl_ids->z_fuid) ||
- zfs_fuid_overquota(zfsvfs, B_TRUE, acl_ids->z_fgid) ||
- zfs_fuid_overobjquota(zfsvfs, B_FALSE, acl_ids->z_fuid) ||
- zfs_fuid_overobjquota(zfsvfs, B_TRUE, acl_ids->z_fgid));
+ return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) ||
+ zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) ||
+ (projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID &&
+ zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid)));
}
/*
diff --git a/module/zfs/zfs_dir.c b/module/zfs/zfs_dir.c
index 6398a1d15..7eb426b78 100644
--- a/module/zfs/zfs_dir.c
+++ b/module/zfs/zfs_dir.c
@@ -1036,7 +1036,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr)
if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
&acl_ids)) != 0)
return (error);
- if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
+ if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zp->z_projid)) {
zfs_acl_ids_free(&acl_ids);
return (SET_ERROR(EDQUOT));
}
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index fcd2fca12..6bee042a0 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -260,10 +260,14 @@ static const char *userquota_perms[] = {
ZFS_DELEG_PERM_USEROBJQUOTA,
ZFS_DELEG_PERM_GROUPOBJUSED,
ZFS_DELEG_PERM_GROUPOBJQUOTA,
+ ZFS_DELEG_PERM_PROJECTUSED,
+ ZFS_DELEG_PERM_PROJECTQUOTA,
+ ZFS_DELEG_PERM_PROJECTOBJUSED,
+ ZFS_DELEG_PERM_PROJECTOBJQUOTA,
};
static int zfs_ioc_userspace_upgrade(zfs_cmd_t *zc);
-static int zfs_ioc_userobjspace_upgrade(zfs_cmd_t *zc);
+static int zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc);
static int zfs_check_settable(const char *name, nvpair_t *property,
cred_t *cr);
static int zfs_check_clearable(char *dataset, nvlist_t *props,
@@ -1200,10 +1204,14 @@ zfs_secpolicy_userspace_one(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
zc->zc_objset_type == ZFS_PROP_USEROBJQUOTA) {
if (zc->zc_guid == crgetuid(cr))
return (0);
- } else {
+ } else if (zc->zc_objset_type == ZFS_PROP_GROUPUSED ||
+ zc->zc_objset_type == ZFS_PROP_GROUPQUOTA ||
+ zc->zc_objset_type == ZFS_PROP_GROUPOBJUSED ||
+ zc->zc_objset_type == ZFS_PROP_GROUPOBJQUOTA) {
if (groupmember(zc->zc_guid, cr))
return (0);
}
+ /* else is for project quota/used */
}
return (zfs_secpolicy_write_perms(zc->zc_name,
@@ -2516,7 +2524,7 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source,
zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
(void) strcpy(zc->zc_name, dsname);
(void) zfs_ioc_userspace_upgrade(zc);
- (void) zfs_ioc_userobjspace_upgrade(zc);
+ (void) zfs_ioc_id_quota_upgrade(zc);
kmem_free(zc, sizeof (zfs_cmd_t));
}
break;
@@ -3897,6 +3905,10 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA];
const char *giq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA];
+ const char *pq_prefix =
+ zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA];
+ const char *piq_prefix = zfs_userquota_prop_prefixes[\
+ ZFS_PROP_PROJECTOBJQUOTA];
if (strncmp(propname, uq_prefix,
strlen(uq_prefix)) == 0) {
@@ -3910,8 +3922,14 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
} else if (strncmp(propname, giq_prefix,
strlen(giq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_GROUPOBJQUOTA;
+ } else if (strncmp(propname, pq_prefix,
+ strlen(pq_prefix)) == 0) {
+ perm = ZFS_DELEG_PERM_PROJECTQUOTA;
+ } else if (strncmp(propname, piq_prefix,
+ strlen(piq_prefix)) == 0) {
+ perm = ZFS_DELEG_PERM_PROJECTOBJQUOTA;
} else {
- /* USERUSED and GROUPUSED are read-only */
+ /* {USER|GROUP|PROJECT}USED are read-only */
return (SET_ERROR(EINVAL));
}
@@ -5180,7 +5198,7 @@ zfs_ioc_promote(zfs_cmd_t *zc)
}
/*
- * Retrieve a single {user|group}{used|quota}@... property.
+ * Retrieve a single {user|group|project}{used|quota}@... property.
*
* inputs:
* zc_name name of filesystem
@@ -5306,7 +5324,7 @@ zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
* none
*/
static int
-zfs_ioc_userobjspace_upgrade(zfs_cmd_t *zc)
+zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc)
{
objset_t *os;
int error;
@@ -5315,14 +5333,15 @@ zfs_ioc_userobjspace_upgrade(zfs_cmd_t *zc)
if (error != 0)
return (error);
- if (dmu_objset_userobjspace_upgradable(os)) {
+ if (dmu_objset_userobjspace_upgradable(os) ||
+ dmu_objset_projectquota_upgradable(os)) {
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_id == 0) {
/* clear potential error code and retry */
os->os_upgrade_status = 0;
mutex_exit(&os->os_upgrade_lock);
- dmu_objset_userobjspace_upgrade(os);
+ dmu_objset_id_quota_upgrade(os);
} else {
mutex_exit(&os->os_upgrade_lock);
}
diff --git a/module/zfs/zfs_log.c b/module/zfs/zfs_log.c
index 8887f037a..ce7b84927 100644
--- a/module/zfs/zfs_log.c
+++ b/module/zfs/zfs_log.c
@@ -166,8 +166,17 @@ zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
XAT0_AV_MODIFIED;
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
ZFS_TIME_ENCODE(&xoap->xoa_createtime, crtime);
- if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
+ if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
+ ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
+
bcopy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ);
+ } else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
+ /*
+ * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
+ * at the same time, so we can share the same space.
+ */
+ bcopy(&xoap->xoa_projid, scanstamp, sizeof (uint64_t));
+ }
if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
*attrs |= (xoap->xoa_reparse == 0) ? 0 :
XAT0_REPARSE;
@@ -177,6 +186,9 @@ zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
*attrs |= (xoap->xoa_sparse == 0) ? 0 :
XAT0_SPARSE;
+ if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
+ *attrs |= (xoap->xoa_projinherit == 0) ? 0 :
+ XAT0_PROJINHERIT;
}
static void *
diff --git a/module/zfs/zfs_replay.c b/module/zfs/zfs_replay.c
index bfba2fea0..e2ff00789 100644
--- a/module/zfs/zfs_replay.c
+++ b/module/zfs/zfs_replay.c
@@ -128,14 +128,25 @@ zfs_replay_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
((*attrs & XAT0_AV_QUARANTINED) != 0);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
ZFS_TIME_DECODE(&xoap->xoa_createtime, crtime);
- if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
+ if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
+ ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
+
bcopy(scanstamp, xoap->xoa_av_scanstamp, AV_SCANSTAMP_SZ);
+ } else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
+ /*
+ * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
+ * at the same time, so we can share the same space.
+ */
+ bcopy(scanstamp, &xoap->xoa_projid, sizeof (uint64_t));
+ }
if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
xoap->xoa_reparse = ((*attrs & XAT0_REPARSE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
xoap->xoa_offline = ((*attrs & XAT0_OFFLINE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
xoap->xoa_sparse = ((*attrs & XAT0_SPARSE) != 0);
+ if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
+ xoap->xoa_projinherit = ((*attrs & XAT0_PROJINHERIT) != 0);
}
static int
diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c
index 3eff6acc6..bd21ba896 100644
--- a/module/zfs/zfs_sa.c
+++ b/module/zfs/zfs_sa.c
@@ -27,6 +27,8 @@
#include <sys/sa.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_sa.h>
+#include <sys/dmu_objset.h>
+#include <sys/sa_impl.h>
/*
* ZPL attribute registration table.
@@ -63,6 +65,7 @@ sa_attr_reg_t zfs_attr_table[ZPL_END+1] = {
{"ZPL_SCANSTAMP", 32, SA_UINT8_ARRAY, 0},
{"ZPL_DACL_ACES", 0, SA_ACL, 0},
{"ZPL_DXATTR", 0, SA_UINT8_ARRAY, 0},
+ {"ZPL_PROJID", sizeof (uint64_t), SA_UINT64_ARRAY, 0},
{NULL, 0, 0, 0}
};
@@ -317,7 +320,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
}
/* First do a bulk query of the attributes that aren't cached */
- bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP);
+ bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
@@ -332,9 +335,13 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&znode_acl, 88);
- if (sa_bulk_lookup_locked(hdl, bulk, count) != 0) {
- kmem_free(bulk, sizeof (sa_bulk_attr_t) * 20);
+ if (sa_bulk_lookup_locked(hdl, bulk, count) != 0)
goto done;
+
+ if (dmu_objset_projectquota_enabled(hdl->sa_os) &&
+ !(zp->z_pflags & ZFS_PROJID)) {
+ zp->z_pflags |= ZFS_PROJID;
+ zp->z_projid = ZFS_DEFAULT_PROJID;
}
/*
@@ -342,7 +349,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
* it is such a way to pick up an already existing layout number
*/
count = 0;
- sa_attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP);
+ sa_attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
@@ -365,6 +372,9 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
links = ZTOI(zp)->i_nlink;
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, 8);
+ if (dmu_objset_projectquota_enabled(hdl->sa_os))
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PROJID(zfsvfs), NULL,
+ &zp->z_projid, 8);
if (S_ISBLK(ZTOI(zp)->i_mode) || S_ISCHR(ZTOI(zp)->i_mode))
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
@@ -400,9 +410,9 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
znode_acl.z_acl_extern_obj, tx));
zp->z_is_sa = B_TRUE;
- kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * 20);
- kmem_free(bulk, sizeof (sa_bulk_attr_t) * 20);
+ kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
done:
+ kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
if (drop_lock)
mutex_exit(&zp->z_lock);
}
diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c
index bb380c920..971fd54bc 100644
--- a/module/zfs/zfs_vfsops.c
+++ b/module/zfs/zfs_vfsops.c
@@ -536,8 +536,14 @@ unregister:
static int
zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
- uint64_t *userp, uint64_t *groupp)
+ uint64_t *userp, uint64_t *groupp, uint64_t *projectp)
{
+ sa_hdr_phys_t sa;
+ sa_hdr_phys_t *sap = data;
+ uint64_t flags;
+ int hdrsize;
+ boolean_t swap = B_FALSE;
+
/*
* Is it a valid type of object to track?
*/
@@ -557,42 +563,49 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
znode_phys_t *znp = data;
*userp = znp->zp_uid;
*groupp = znp->zp_gid;
+ *projectp = ZFS_DEFAULT_PROJID;
+ return (0);
+ }
+
+ if (sap->sa_magic == 0) {
+ /*
+ * This should only happen for newly created files
+ * that haven't had the znode data filled in yet.
+ */
+ *userp = 0;
+ *groupp = 0;
+ *projectp = ZFS_DEFAULT_PROJID;
+ return (0);
+ }
+
+ sa = *sap;
+ if (sa.sa_magic == BSWAP_32(SA_MAGIC)) {
+ sa.sa_magic = SA_MAGIC;
+ sa.sa_layout_info = BSWAP_16(sa.sa_layout_info);
+ swap = B_TRUE;
} else {
- int hdrsize;
- sa_hdr_phys_t *sap = data;
- sa_hdr_phys_t sa = *sap;
- boolean_t swap = B_FALSE;
-
- ASSERT(bonustype == DMU_OT_SA);
-
- if (sa.sa_magic == 0) {
- /*
- * This should only happen for newly created
- * files that haven't had the znode data filled
- * in yet.
- */
- *userp = 0;
- *groupp = 0;
- return (0);
- }
- if (sa.sa_magic == BSWAP_32(SA_MAGIC)) {
- sa.sa_magic = SA_MAGIC;
- sa.sa_layout_info = BSWAP_16(sa.sa_layout_info);
- swap = B_TRUE;
- } else {
- VERIFY3U(sa.sa_magic, ==, SA_MAGIC);
- }
+ VERIFY3U(sa.sa_magic, ==, SA_MAGIC);
+ }
- hdrsize = sa_hdrsize(&sa);
- VERIFY3U(hdrsize, >=, sizeof (sa_hdr_phys_t));
- *userp = *((uint64_t *)((uintptr_t)data + hdrsize +
- SA_UID_OFFSET));
- *groupp = *((uint64_t *)((uintptr_t)data + hdrsize +
- SA_GID_OFFSET));
- if (swap) {
- *userp = BSWAP_64(*userp);
- *groupp = BSWAP_64(*groupp);
- }
+ hdrsize = sa_hdrsize(&sa);
+ VERIFY3U(hdrsize, >=, sizeof (sa_hdr_phys_t));
+
+ *userp = *((uint64_t *)((uintptr_t)data + hdrsize + SA_UID_OFFSET));
+ *groupp = *((uint64_t *)((uintptr_t)data + hdrsize + SA_GID_OFFSET));
+ flags = *((uint64_t *)((uintptr_t)data + hdrsize + SA_FLAGS_OFFSET));
+ if (swap)
+ flags = BSWAP_64(flags);
+
+ if (flags & ZFS_PROJID)
+ *projectp = *((uint64_t *)((uintptr_t)data + hdrsize +
+ SA_PROJID_OFFSET));
+ else
+ *projectp = ZFS_DEFAULT_PROJID;
+
+ if (swap) {
+ *userp = BSWAP_64(*userp);
+ *groupp = BSWAP_64(*groupp);
+ *projectp = BSWAP_64(*projectp);
}
return (0);
}
@@ -624,6 +637,9 @@ zfs_userquota_prop_to_obj(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type)
case ZFS_PROP_GROUPUSED:
case ZFS_PROP_GROUPOBJUSED:
return (DMU_GROUPUSED_OBJECT);
+ case ZFS_PROP_PROJECTUSED:
+ case ZFS_PROP_PROJECTOBJUSED:
+ return (DMU_PROJECTUSED_OBJECT);
case ZFS_PROP_USERQUOTA:
return (zfsvfs->z_userquota_obj);
case ZFS_PROP_GROUPQUOTA:
@@ -632,6 +648,10 @@ zfs_userquota_prop_to_obj(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type)
return (zfsvfs->z_userobjquota_obj);
case ZFS_PROP_GROUPOBJQUOTA:
return (zfsvfs->z_groupobjquota_obj);
+ case ZFS_PROP_PROJECTQUOTA:
+ return (zfsvfs->z_projectquota_obj);
+ case ZFS_PROP_PROJECTOBJQUOTA:
+ return (zfsvfs->z_projectobjquota_obj);
default:
return (ZFS_NO_OBJECT);
}
@@ -651,8 +671,16 @@ zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
if (!dmu_objset_userspace_present(zfsvfs->z_os))
return (SET_ERROR(ENOTSUP));
+ if ((type == ZFS_PROP_PROJECTQUOTA || type == ZFS_PROP_PROJECTUSED ||
+ type == ZFS_PROP_PROJECTOBJQUOTA ||
+ type == ZFS_PROP_PROJECTOBJUSED) &&
+ !dmu_objset_projectquota_present(zfsvfs->z_os))
+ return (SET_ERROR(ENOTSUP));
+
if ((type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED ||
- type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA) &&
+ type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA ||
+ type == ZFS_PROP_PROJECTOBJUSED ||
+ type == ZFS_PROP_PROJECTOBJQUOTA) &&
!dmu_objset_userobjspace_present(zfsvfs->z_os))
return (SET_ERROR(ENOTSUP));
@@ -662,7 +690,8 @@ zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
return (0);
}
- if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED)
+ if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED ||
+ type == ZFS_PROP_PROJECTOBJUSED)
offset = DMU_OBJACCT_PREFIX_LEN;
for (zap_cursor_init_serialized(&zc, zfsvfs->z_os, obj, *cookiep);
@@ -731,15 +760,27 @@ zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
return (SET_ERROR(ENOTSUP));
if ((type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED ||
- type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA) &&
+ type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA ||
+ type == ZFS_PROP_PROJECTOBJUSED ||
+ type == ZFS_PROP_PROJECTOBJQUOTA) &&
!dmu_objset_userobjspace_present(zfsvfs->z_os))
return (SET_ERROR(ENOTSUP));
+ if (type == ZFS_PROP_PROJECTQUOTA || type == ZFS_PROP_PROJECTUSED ||
+ type == ZFS_PROP_PROJECTOBJQUOTA ||
+ type == ZFS_PROP_PROJECTOBJUSED) {
+ if (!dmu_objset_projectquota_present(zfsvfs->z_os))
+ return (SET_ERROR(ENOTSUP));
+ if (!zpl_is_valid_projid(rid))
+ return (SET_ERROR(EINVAL));
+ }
+
obj = zfs_userquota_prop_to_obj(zfsvfs, type);
if (obj == ZFS_NO_OBJECT)
return (0);
- if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED) {
+ if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED ||
+ type == ZFS_PROP_PROJECTOBJUSED) {
strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN + 1);
offset = DMU_OBJACCT_PREFIX_LEN;
}
@@ -780,6 +821,22 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
case ZFS_PROP_GROUPOBJQUOTA:
objp = &zfsvfs->z_groupobjquota_obj;
break;
+ case ZFS_PROP_PROJECTQUOTA:
+ if (!dmu_objset_projectquota_enabled(zfsvfs->z_os))
+ return (SET_ERROR(ENOTSUP));
+ if (!zpl_is_valid_projid(rid))
+ return (SET_ERROR(EINVAL));
+
+ objp = &zfsvfs->z_projectquota_obj;
+ break;
+ case ZFS_PROP_PROJECTOBJQUOTA:
+ if (!dmu_objset_projectquota_enabled(zfsvfs->z_os))
+ return (SET_ERROR(ENOTSUP));
+ if (!zpl_is_valid_projid(rid))
+ return (SET_ERROR(EINVAL));
+
+ objp = &zfsvfs->z_projectobjquota_obj;
+ break;
default:
return (SET_ERROR(EINVAL));
}
@@ -827,35 +884,51 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
}
boolean_t
-zfs_fuid_overobjquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid)
+zfs_id_overobjquota(zfsvfs_t *zfsvfs, uint64_t usedobj, uint64_t id)
{
char buf[20 + DMU_OBJACCT_PREFIX_LEN];
- uint64_t used, quota, usedobj, quotaobj;
+ uint64_t used, quota, quotaobj;
int err;
if (!dmu_objset_userobjspace_present(zfsvfs->z_os)) {
if (dmu_objset_userobjspace_upgradable(zfsvfs->z_os)) {
dsl_pool_config_enter(
dmu_objset_pool(zfsvfs->z_os), FTAG);
- dmu_objset_userobjspace_upgrade(zfsvfs->z_os);
+ dmu_objset_id_quota_upgrade(zfsvfs->z_os);
dsl_pool_config_exit(
dmu_objset_pool(zfsvfs->z_os), FTAG);
}
return (B_FALSE);
}
- usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
- quotaobj = isgroup ? zfsvfs->z_groupobjquota_obj :
- zfsvfs->z_userobjquota_obj;
+ if (usedobj == DMU_PROJECTUSED_OBJECT) {
+ if (!dmu_objset_projectquota_present(zfsvfs->z_os)) {
+ if (dmu_objset_projectquota_upgradable(zfsvfs->z_os)) {
+ dsl_pool_config_enter(
+ dmu_objset_pool(zfsvfs->z_os), FTAG);
+ dmu_objset_id_quota_upgrade(zfsvfs->z_os);
+ dsl_pool_config_exit(
+ dmu_objset_pool(zfsvfs->z_os), FTAG);
+ }
+ return (B_FALSE);
+ }
+ quotaobj = zfsvfs->z_projectobjquota_obj;
+ } else if (usedobj == DMU_USERUSED_OBJECT) {
+ quotaobj = zfsvfs->z_userobjquota_obj;
+ } else if (usedobj == DMU_GROUPUSED_OBJECT) {
+ quotaobj = zfsvfs->z_groupobjquota_obj;
+ } else {
+ return (B_FALSE);
+ }
if (quotaobj == 0 || zfsvfs->z_replay)
return (B_FALSE);
- (void) sprintf(buf, "%llx", (longlong_t)fuid);
+ (void) sprintf(buf, "%llx", (longlong_t)id);
err = zap_lookup(zfsvfs->z_os, quotaobj, buf, 8, 1, &quota);
if (err != 0)
return (B_FALSE);
- (void) sprintf(buf, DMU_OBJACCT_PREFIX "%llx", (longlong_t)fuid);
+ (void) sprintf(buf, DMU_OBJACCT_PREFIX "%llx", (longlong_t)id);
err = zap_lookup(zfsvfs->z_os, usedobj, buf, 8, 1, &used);
if (err != 0)
return (B_FALSE);
@@ -863,19 +936,35 @@ zfs_fuid_overobjquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid)
}
boolean_t
-zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid)
+zfs_id_overblockquota(zfsvfs_t *zfsvfs, uint64_t usedobj, uint64_t id)
{
char buf[20];
- uint64_t used, quota, usedobj, quotaobj;
+ uint64_t used, quota, quotaobj;
int err;
- usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
- quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
-
+ if (usedobj == DMU_PROJECTUSED_OBJECT) {
+ if (!dmu_objset_projectquota_present(zfsvfs->z_os)) {
+ if (dmu_objset_projectquota_upgradable(zfsvfs->z_os)) {
+ dsl_pool_config_enter(
+ dmu_objset_pool(zfsvfs->z_os), FTAG);
+ dmu_objset_id_quota_upgrade(zfsvfs->z_os);
+ dsl_pool_config_exit(
+ dmu_objset_pool(zfsvfs->z_os), FTAG);
+ }
+ return (B_FALSE);
+ }
+ quotaobj = zfsvfs->z_projectquota_obj;
+ } else if (usedobj == DMU_USERUSED_OBJECT) {
+ quotaobj = zfsvfs->z_userquota_obj;
+ } else if (usedobj == DMU_GROUPUSED_OBJECT) {
+ quotaobj = zfsvfs->z_groupquota_obj;
+ } else {
+ return (B_FALSE);
+ }
if (quotaobj == 0 || zfsvfs->z_replay)
return (B_FALSE);
- (void) sprintf(buf, "%llx", (longlong_t)fuid);
+ (void) sprintf(buf, "%llx", (longlong_t)id);
err = zap_lookup(zfsvfs->z_os, quotaobj, buf, 8, 1, &quota);
if (err != 0)
return (B_FALSE);
@@ -887,20 +976,10 @@ zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid)
}
boolean_t
-zfs_owner_overquota(zfsvfs_t *zfsvfs, znode_t *zp, boolean_t isgroup)
+zfs_id_overquota(zfsvfs_t *zfsvfs, uint64_t usedobj, uint64_t id)
{
- uint64_t fuid;
- uint64_t quotaobj;
- struct inode *ip = ZTOI(zp);
-
- quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
-
- fuid = isgroup ? KGID_TO_SGID(ip->i_gid) : KUID_TO_SUID(ip->i_uid);
-
- if (quotaobj == 0 || zfsvfs->z_replay)
- return (B_FALSE);
-
- return (zfs_fuid_overquota(zfsvfs, isgroup, fuid));
+ return (zfs_id_overblockquota(zfsvfs, usedobj, id) ||
+ zfs_id_overobjquota(zfsvfs, usedobj, id));
}
/*
@@ -1008,6 +1087,14 @@ zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
+ zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
+ 8, 1, &zfsvfs->z_projectquota_obj);
+ if (error == ENOENT)
+ zfsvfs->z_projectquota_obj = 0;
+ else if (error != 0)
+ return (error);
+
+ error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
@@ -1023,6 +1110,14 @@ zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
else if (error != 0)
return (error);
+ error = zap_lookup(os, MASTER_NODE_OBJ,
+ zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
+ 8, 1, &zfsvfs->z_projectobjquota_obj);
+ if (error == ENOENT)
+ zfsvfs->z_projectobjquota_obj = 0;
+ else if (error != 0)
+ return (error);
+
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
@@ -1264,6 +1359,83 @@ zfs_check_global_label(const char *dsname, const char *hexsl)
}
#endif /* HAVE_MLSLABEL */
+static int
+zfs_statfs_project(zfsvfs_t *zfsvfs, znode_t *zp, struct kstatfs *statp,
+ uint32_t bshift)
+{
+ char buf[20 + DMU_OBJACCT_PREFIX_LEN];
+ uint64_t offset = DMU_OBJACCT_PREFIX_LEN;
+ uint64_t quota;
+ uint64_t used;
+ int err;
+
+ strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN + 1);
+ err = id_to_fuidstr(zfsvfs, NULL, zp->z_projid, buf + offset, B_FALSE);
+ if (err)
+ return (err);
+
+ if (zfsvfs->z_projectquota_obj == 0)
+ goto objs;
+
+ err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectquota_obj,
+ buf + offset, 8, 1, &quota);
+ if (err == ENOENT)
+ goto objs;
+ else if (err)
+ return (err);
+
+ err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
+ buf + offset, 8, 1, &used);
+ if (unlikely(err == ENOENT)) {
+ uint32_t blksize;
+ u_longlong_t nblocks;
+
+ /*
+ * Quota accounting is async, so it is possible race case.
+ * There is at least one object with the given project ID.
+ */
+ sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
+ if (unlikely(zp->z_blksz == 0))
+ blksize = zfsvfs->z_max_blksz;
+
+ used = blksize * nblocks;
+ } else if (err) {
+ return (err);
+ }
+
+ statp->f_blocks = quota >> bshift;
+ statp->f_bfree = (quota > used) ? ((quota - used) >> bshift) : 0;
+ statp->f_bavail = statp->f_bfree;
+
+objs:
+ if (zfsvfs->z_projectobjquota_obj == 0)
+ return (0);
+
+ err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectobjquota_obj,
+ buf + offset, 8, 1, &quota);
+ if (err == ENOENT)
+ return (0);
+ else if (err)
+ return (err);
+
+ err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
+ buf, 8, 1, &used);
+ if (unlikely(err == ENOENT)) {
+ /*
+ * Quota accounting is async, so it is possible race case.
+ * There is at least one object with the given project ID.
+ */
+ used = 1;
+ } else if (err) {
+ return (err);
+ }
+
+ statp->f_files = quota;
+ statp->f_ffree = (quota > used) ? (quota - used) : 0;
+
+ return (0);
+}
+
int
zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
{
@@ -1271,6 +1443,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
uint64_t refdbytes, availbytes, usedobjs, availobjs;
uint64_t fsid;
uint32_t bshift;
+ int err = 0;
ZFS_ENTER(zfsvfs);
@@ -1322,8 +1495,17 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
*/
bzero(statp->f_spare, sizeof (statp->f_spare));
+ if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
+ dmu_objset_projectquota_present(zfsvfs->z_os)) {
+ znode_t *zp = ITOZ(dentry->d_inode);
+
+ if (zp->z_pflags & ZFS_PROJINHERIT && zp->z_projid &&
+ zpl_is_valid_projid(zp->z_projid))
+ err = zfs_statfs_project(zfsvfs, zp, statp, bshift);
+ }
+
ZFS_EXIT(zfsvfs);
- return (0);
+ return (err);
}
int
@@ -2170,9 +2352,9 @@ EXPORT_SYMBOL(zfs_resume_fs);
EXPORT_SYMBOL(zfs_userspace_one);
EXPORT_SYMBOL(zfs_userspace_many);
EXPORT_SYMBOL(zfs_set_userquota);
-EXPORT_SYMBOL(zfs_owner_overquota);
-EXPORT_SYMBOL(zfs_fuid_overquota);
-EXPORT_SYMBOL(zfs_fuid_overobjquota);
+EXPORT_SYMBOL(zfs_id_overblockquota);
+EXPORT_SYMBOL(zfs_id_overobjquota);
+EXPORT_SYMBOL(zfs_id_overquota);
EXPORT_SYMBOL(zfs_set_version);
EXPORT_SYMBOL(zfsvfs_create);
EXPORT_SYMBOL(zfsvfs_free);
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index e21fe1aca..f35165de3 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -79,6 +79,7 @@
#include <sys/attr.h>
#include <sys/zpl.h>
#include <sys/zil.h>
+#include <sys/sa_impl.h>
/*
* Programming rules.
@@ -728,8 +729,13 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
while (n > 0) {
abuf = NULL;
woff = uio->uio_loffset;
- if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
- zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
+ if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
+ KUID_TO_SUID(ip->i_uid)) ||
+ zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
+ KGID_TO_SGID(ip->i_gid)) ||
+ (zp->z_projid != ZFS_DEFAULT_PROJID &&
+ zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
+ zp->z_projid))) {
if (abuf != NULL)
dmu_return_arcbuf(abuf);
error = SET_ERROR(EDQUOT);
@@ -1380,6 +1386,7 @@ top:
if (zp == NULL) {
uint64_t txtype;
+ uint64_t projid = ZFS_DEFAULT_PROJID;
/*
* Create a new file object and update the directory
@@ -1408,7 +1415,9 @@ top:
goto out;
have_acl = B_TRUE;
- if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
+ if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
+ projid = zfs_inherit_projid(dzp);
+ if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
@@ -1552,6 +1561,7 @@ zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
+ uint64_t projid = ZFS_DEFAULT_PROJID;
boolean_t fuid_dirtied;
boolean_t have_acl = B_FALSE;
boolean_t waited = B_FALSE;
@@ -1598,7 +1608,9 @@ top:
goto out;
have_acl = B_TRUE;
- if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
+ if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
+ projid = zfs_inherit_projid(dzp);
+ if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
@@ -2009,7 +2021,7 @@ top:
return (error);
}
- if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
+ if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
@@ -2591,6 +2603,17 @@ zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
((zp->z_pflags & ZFS_SPARSE) != 0);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
+
+ if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
+ xoap->xoa_projinherit =
+ ((zp->z_pflags & ZFS_PROJINHERIT) != 0);
+ XVA_SET_RTN(xvap, XAT_PROJINHERIT);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
+ xoap->xoa_projid = zp->z_projid;
+ XVA_SET_RTN(xvap, XAT_PROJID);
+ }
}
ZFS_TIME_DECODE(&vap->va_atime, atime);
@@ -2669,6 +2692,125 @@ zfs_getattr_fast(struct inode *ip, struct kstat *sp)
}
/*
+ * For the operation of changing file's user/group/project, we need to
+ * handle not only the main object that is assigned to the file directly,
+ * but also the ones that are used by the file via hidden xattr directory.
+ *
+ * Because the xattr directory may contains many EA entries, as to it may
+ * be impossible to change all of them via the transaction of changing the
+ * main object's user/group/project attributes. Then we have to change them
+ * via other multiple independent transactions one by one. It may be not good
+ * solution, but we have no better idea yet.
+ */
+static int
+zfs_setattr_dir(znode_t *dzp)
+{
+ struct inode *dxip = ZTOI(dzp);
+ struct inode *xip = NULL;
+ zfsvfs_t *zfsvfs = ITOZSB(dxip);
+ objset_t *os = zfsvfs->z_os;
+ zap_cursor_t zc;
+ zap_attribute_t zap;
+ zfs_dirlock_t *dl;
+ znode_t *zp;
+ dmu_tx_t *tx = NULL;
+ uint64_t uid, gid;
+ sa_bulk_attr_t bulk[4];
+ int count = 0;
+ int err;
+
+ zap_cursor_init(&zc, os, dzp->z_id);
+ while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) {
+ if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
+ err = ENXIO;
+ break;
+ }
+
+ err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp,
+ ZEXISTS, NULL, NULL);
+ if (err == ENOENT)
+ goto next;
+ if (err)
+ break;
+
+ xip = ZTOI(zp);
+ if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
+ KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
+ zp->z_projid == dzp->z_projid)
+ goto next;
+
+ tx = dmu_tx_create(os);
+ if (!(zp->z_pflags & ZFS_PROJID))
+ dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
+ else
+ dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
+
+ err = dmu_tx_assign(tx, TXG_WAIT);
+ if (err)
+ break;
+
+ mutex_enter(&dzp->z_lock);
+
+ if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
+ xip->i_uid = dxip->i_uid;
+ uid = zfs_uid_read(dxip);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
+ &uid, sizeof (uid));
+ }
+
+ if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
+ xip->i_gid = dxip->i_gid;
+ gid = zfs_gid_read(dxip);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
+ &gid, sizeof (gid));
+ }
+
+ if (zp->z_projid != dzp->z_projid) {
+ if (!(zp->z_pflags & ZFS_PROJID)) {
+ zp->z_pflags |= ZFS_PROJID;
+ SA_ADD_BULK_ATTR(bulk, count,
+ SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags,
+ sizeof (zp->z_pflags));
+ }
+
+ zp->z_projid = dzp->z_projid;
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs),
+ NULL, &zp->z_projid, sizeof (zp->z_projid));
+ }
+
+ mutex_exit(&dzp->z_lock);
+
+ if (likely(count > 0)) {
+ err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
+ dmu_tx_commit(tx);
+ } else {
+ dmu_tx_abort(tx);
+ }
+ tx = NULL;
+ if (err != 0 && err != ENOENT)
+ break;
+
+next:
+ if (xip) {
+ iput(xip);
+ xip = NULL;
+ zfs_dirent_unlock(dl);
+ }
+ zap_cursor_advance(&zc);
+ }
+
+ if (tx)
+ dmu_tx_abort(tx);
+ if (xip) {
+ iput(xip);
+ zfs_dirent_unlock(dl);
+ }
+ zap_cursor_fini(&zc);
+
+ return (err == ENOENT ? 0 : err);
+}
+
+/*
* Set the file attributes to the values contained in the
* vattr structure.
*
@@ -2691,6 +2833,7 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
+ objset_t *os = zfsvfs->z_os;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
@@ -2702,17 +2845,19 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
uint64_t xattr_obj;
uint64_t mtime[2], ctime[2], atime[2];
+ uint64_t projid = ZFS_INVALID_PROJID;
znode_t *attrzp;
int need_policy = FALSE;
- int err, err2;
+ int err, err2 = 0;
zfs_fuid_info_t *fuidp = NULL;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
+ boolean_t handle_eadir = B_FALSE;
sa_bulk_attr_t *bulk, *xattr_bulk;
- int count = 0, xattr_count = 0;
+ int count = 0, xattr_count = 0, bulks = 8;
if (mask == 0)
return (0);
@@ -2720,6 +2865,39 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
+ /*
+ * If this is a xvattr_t, then get a pointer to the structure of
+ * optional attributes. If this is NULL, then we have a vattr_t.
+ */
+ xoap = xva_getxoptattr(xvap);
+ if (xoap != NULL && (mask & ATTR_XVATTR)) {
+ if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
+ if (!dmu_objset_projectquota_enabled(os) ||
+ (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
+ ZFS_EXIT(zfsvfs);
+ return (SET_ERROR(ENOTSUP));
+ }
+
+ projid = xoap->xoa_projid;
+ if (unlikely(projid == ZFS_INVALID_PROJID)) {
+ ZFS_EXIT(zfsvfs);
+ return (SET_ERROR(EINVAL));
+ }
+
+ if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
+ projid = ZFS_INVALID_PROJID;
+ else
+ need_policy = TRUE;
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
+ (!dmu_objset_projectquota_enabled(os) ||
+ (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
+ ZFS_EXIT(zfsvfs);
+ return (SET_ERROR(ENOTSUP));
+ }
+ }
+
zilog = zfsvfs->z_log;
/*
@@ -2745,17 +2923,11 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
return (SET_ERROR(EINVAL));
}
- /*
- * If this is an xvattr_t, then get a pointer to the structure of
- * optional attributes. If this is NULL, then we have a vattr_t.
- */
- xoap = xva_getxoptattr(xvap);
-
tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
xva_init(tmpxvattr);
- bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
- xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
+ bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
+ xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
/*
* Immutable files can only alter immutable bit and atime
@@ -2901,6 +3073,16 @@ top:
}
}
+ if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
+ if (xoap->xoa_projinherit !=
+ ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
+ need_policy = TRUE;
+ } else {
+ XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
+ XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
+ }
+ }
+
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
if (xoap->xoa_nounlink !=
((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
@@ -3009,7 +3191,8 @@ top:
*/
mask = vap->va_mask;
- if ((mask & (ATTR_UID | ATTR_GID))) {
+ if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
+ handle_eadir = B_TRUE;
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
@@ -3022,7 +3205,8 @@ top:
new_kuid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
- zfs_fuid_overquota(zfsvfs, B_FALSE, new_kuid)) {
+ zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
+ new_kuid)) {
if (attrzp)
iput(ZTOI(attrzp));
err = SET_ERROR(EDQUOT);
@@ -3034,15 +3218,24 @@ top:
new_kgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
- zfs_fuid_overquota(zfsvfs, B_TRUE, new_kgid)) {
+ zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
+ new_kgid)) {
if (attrzp)
iput(ZTOI(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
+
+ if (projid != ZFS_INVALID_PROJID &&
+ zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
+ if (attrzp)
+ iput(ZTOI(attrzp));
+ err = EDQUOT;
+ goto out2;
+ }
}
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(os);
if (mask & ATTR_MODE) {
uint64_t pmode = zp->z_mode;
@@ -3075,8 +3268,10 @@ top:
mutex_exit(&zp->z_lock);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
- if ((mask & ATTR_XVATTR) &&
- XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
+ if (((mask & ATTR_XVATTR) &&
+ XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
+ (projid != ZFS_INVALID_PROJID &&
+ !(zp->z_pflags & ZFS_PROJID)))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
@@ -3105,6 +3300,26 @@ top:
* updated as a side-effect of calling this function.
*/
+ if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
+ /*
+ * For the existed object that is upgraded from old system,
+ * its on-disk layout has no slot for the project ID attribute.
+ * But quota accounting logic needs to access related slots by
+ * offset directly. So we need to adjust old objects' layout
+ * to make the project ID to some unified and fixed offset.
+ */
+ if (attrzp)
+ err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
+ if (err == 0)
+ err = sa_add_projid(zp->z_sa_hdl, tx, projid);
+
+ if (unlikely(err == EEXIST))
+ err = 0;
+ else if (err != 0)
+ goto out;
+ else
+ projid = ZFS_INVALID_PROJID;
+ }
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&zp->z_acl_lock);
@@ -3120,6 +3335,12 @@ top:
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
+ if (projid != ZFS_INVALID_PROJID) {
+ attrzp->z_projid = projid;
+ SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
+ SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
+ sizeof (attrzp->z_projid));
+ }
}
if (mask & (ATTR_UID|ATTR_GID)) {
@@ -3199,6 +3420,13 @@ top:
ctime, sizeof (ctime));
}
+ if (projid != ZFS_INVALID_PROJID) {
+ zp->z_projid = projid;
+ SA_ADD_BULK_ATTR(bulk, count,
+ SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
+ sizeof (zp->z_projid));
+ }
+
if (attrzp && mask) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
@@ -3235,6 +3463,9 @@ top:
if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
+ XVA_SET_REQ(xvap, XAT_PROJINHERIT);
+ }
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT(S_ISREG(ip->i_mode));
@@ -3258,7 +3489,7 @@ top:
mutex_exit(&attrzp->z_lock);
}
out:
- if (err == 0 && attrzp) {
+ if (err == 0 && xattr_count > 0) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
ASSERT(err2 == 0);
@@ -3279,20 +3510,24 @@ out:
if (err == ERESTART)
goto top;
} else {
- err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
+ if (count > 0)
+ err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
- if (attrzp)
+ if (attrzp) {
+ if (err2 == 0 && handle_eadir)
+ err2 = zfs_setattr_dir(attrzp);
iput(ZTOI(attrzp));
+ }
zfs_inode_update(zp);
}
out2:
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ if (os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
out3:
- kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * 7);
- kmem_free(bulk, sizeof (sa_bulk_attr_t) * 7);
+ kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
+ kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
kmem_free(tmpxvattr, sizeof (xvattr_t));
ZFS_EXIT(zfsvfs);
return (err);
@@ -3586,6 +3821,19 @@ top:
}
/*
+ * If we are using project inheritance, means if the directory has
+ * ZFS_PROJINHERIT set, then its descendant directories will inherit
+ * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
+ * such case, we only allow renames into our tree when the project
+ * IDs are the same.
+ */
+ if (tdzp->z_pflags & ZFS_PROJINHERIT &&
+ tdzp->z_projid != szp->z_projid) {
+ error = SET_ERROR(EXDEV);
+ goto out;
+ }
+
+ /*
* Must have write access at the source to remove the old entry
* and write access at the target to create the new entry.
* Note that if target and source are the same, this can be
@@ -3683,6 +3931,8 @@ top:
error = zfs_link_create(tdl, szp, tx, ZRENAMING);
if (error == 0) {
szp->z_pflags |= ZFS_AV_MODIFIED;
+ if (tdzp->z_pflags & ZFS_PROJINHERIT)
+ szp->z_pflags |= ZFS_PROJINHERIT;
error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
@@ -3829,7 +4079,7 @@ top:
return (error);
}
- if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
+ if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
@@ -4013,6 +4263,18 @@ zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr,
ZFS_VERIFY_ZP(szp);
/*
+ * If we are using project inheritance, means if the directory has
+ * ZFS_PROJINHERIT set, then its descendant directories will inherit
+ * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
+ * such case, we only allow hard link creation in our tree when the
+ * project IDs are the same.
+ */
+ if (dzp->z_pflags & ZFS_PROJINHERIT && dzp->z_projid != szp->z_projid) {
+ ZFS_EXIT(zfsvfs);
+ return (SET_ERROR(EXDEV));
+ }
+
+ /*
* We check i_sb because snapshots and the ctldir must have different
* super blocks.
*/
@@ -4206,8 +4468,13 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
* is to register a page_mkwrite() handler to count the page
* against its quota when it is about to be dirtied.
*/
- if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
- zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
+ if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
+ KUID_TO_SUID(ip->i_uid)) ||
+ zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
+ KGID_TO_SGID(ip->i_gid)) ||
+ (zp->z_projid != ZFS_DEFAULT_PROJID &&
+ zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
+ zp->z_projid))) {
err = EDQUOT;
}
#endif
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index d3b68403b..5288c9c68 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
@@ -328,6 +328,7 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
sharezp->z_atime_dirty = 0;
sharezp->z_zfsvfs = zfsvfs;
sharezp->z_is_sa = zfsvfs->z_use_sa;
+ sharezp->z_pflags = 0;
vp = ZTOV(sharezp);
vn_reinit(vp);
@@ -558,6 +559,7 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
uint64_t links;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2];
+ uint64_t projid = ZFS_DEFAULT_PROJID;
sa_bulk_attr_t bulk[11];
int count = 0;
@@ -604,13 +606,17 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
- if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0) {
+ if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0 ||
+ (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
+ (zp->z_pflags & ZFS_PROJID) &&
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
goto error;
}
+ zp->z_projid = projid;
zp->z_mode = ip->i_mode = mode;
ip->i_generation = (uint32_t)tmp_gen;
ip->i_blkbits = SPA_MINBLOCKSHIFT;
@@ -696,7 +702,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
{
uint64_t crtime[2], atime[2], mtime[2], ctime[2];
uint64_t mode, size, links, parent, pflags;
- uint64_t dzp_pflags = 0;
+ uint64_t projid = ZFS_DEFAULT_PROJID;
uint64_t rdev = 0;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
dmu_buf_t *db;
@@ -771,14 +777,12 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
*/
if (flag & IS_ROOT_NODE) {
dzp->z_id = obj;
- } else {
- dzp_pflags = dzp->z_pflags;
}
/*
* If parent is an xattr, so am I.
*/
- if (dzp_pflags & ZFS_XATTR) {
+ if (dzp->z_pflags & ZFS_XATTR) {
flag |= IS_XATTR;
}
@@ -803,6 +807,23 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
if (flag & IS_XATTR)
pflags |= ZFS_XATTR;
+ if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode)) {
+ /*
+ * With ZFS_PROJID flag, we can easily know whether there is
+ * project ID stored on disk or not. See zfs_space_delta_cb().
+ */
+ if (obj_type != DMU_OT_ZNODE &&
+ dmu_objset_projectquota_enabled(zfsvfs->z_os))
+ pflags |= ZFS_PROJID;
+
+ /*
+ * Inherit project ID from parent if required.
+ */
+ projid = zfs_inherit_projid(dzp);
+ if (dzp->z_pflags & ZFS_PROJINHERIT)
+ pflags |= ZFS_PROJINHERIT;
+ }
+
/*
* No execs denied will be deterimed when zfs_mode_compute() is called.
*/
@@ -884,6 +905,10 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
&empty_xattr, 8);
+ } else if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
+ pflags & ZFS_PROJID) {
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PROJID(zfsvfs),
+ NULL, &projid, 8);
}
if (obj_type == DMU_OT_ZNODE ||
(S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
@@ -942,6 +967,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = ZTOI(*zpp)->i_mode = mode;
(*zpp)->z_dnodesize = dnodesize;
+ (*zpp)->z_projid = projid;
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
@@ -1049,6 +1075,11 @@ zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
+ if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
+ ZFS_ATTR_SET(zp, ZFS_PROJINHERIT, xoap->xoa_projinherit,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_PROJINHERIT);
+ }
if (update_inode)
zfs_set_inode_flags(zp, ZTOI(zp));
@@ -1166,6 +1197,7 @@ zfs_rezget(znode_t *zp)
uint64_t gen;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2];
+ uint64_t projid = ZFS_DEFAULT_PROJID;
znode_hold_t *zh;
/*
@@ -1241,6 +1273,17 @@ zfs_rezget(znode_t *zp)
return (SET_ERROR(EIO));
}
+ if (dmu_objset_projectquota_enabled(zfsvfs->z_os)) {
+ err = sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs),
+ &projid, 8);
+ if (err != 0 && err != ENOENT) {
+ zfs_znode_dmu_fini(zp);
+ zfs_znode_hold_exit(zfsvfs, zh);
+ return (SET_ERROR(err));
+ }
+ }
+
+ zp->z_projid = projid;
zp->z_mode = ZTOI(zp)->i_mode = mode;
zfs_uid_write(ZTOI(zp), z_uid);
zfs_gid_write(ZTOI(zp), z_gid);
@@ -1861,6 +1904,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
rootzp->z_unlinked = 0;
rootzp->z_atime_dirty = 0;
rootzp->z_is_sa = USE_SA(version, os);
+ rootzp->z_pflags = 0;
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
zfsvfs->z_os = os;
diff --git a/module/zfs/zpl_file.c b/module/zfs/zpl_file.c
index 4dfa0dea4..1c5f5e409 100644
--- a/module/zfs/zpl_file.c
+++ b/module/zfs/zpl_file.c
@@ -31,7 +31,7 @@
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_znode.h>
-#include <sys/zpl.h>
+#include <sys/zfs_project.h>
static int
@@ -720,17 +720,14 @@ zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
}
#endif /* HAVE_FILE_FALLOCATE */
-/*
- * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
- * attributes common to both Linux and Solaris are mapped.
- */
-static int
-zpl_ioctl_getflags(struct file *filp, void __user *arg)
+#define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
+#define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
+
+static uint32_t
+__zpl_ioctl_getflags(struct inode *ip)
{
- struct inode *ip = file_inode(filp);
- unsigned int ioctl_flags = 0;
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
- int error;
+ uint32_t ioctl_flags = 0;
if (zfs_flags & ZFS_IMMUTABLE)
ioctl_flags |= FS_IMMUTABLE_FL;
@@ -741,11 +738,26 @@ zpl_ioctl_getflags(struct file *filp, void __user *arg)
if (zfs_flags & ZFS_NODUMP)
ioctl_flags |= FS_NODUMP_FL;
- ioctl_flags &= FS_FL_USER_VISIBLE;
+ if (zfs_flags & ZFS_PROJINHERIT)
+ ioctl_flags |= ZFS_PROJINHERIT_FL;
- error = copy_to_user(arg, &ioctl_flags, sizeof (ioctl_flags));
+ return (ioctl_flags & ZFS_FL_USER_VISIBLE);
+}
- return (error);
+/*
+ * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
+ * attributes common to both Linux and Solaris are mapped.
+ */
+static int
+zpl_ioctl_getflags(struct file *filp, void __user *arg)
+{
+ uint32_t flags;
+ int err;
+
+ flags = __zpl_ioctl_getflags(file_inode(filp));
+ err = copy_to_user(arg, &flags, sizeof (flags));
+
+ return (err);
}
/*
@@ -760,24 +772,16 @@ zpl_ioctl_getflags(struct file *filp, void __user *arg)
#define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
static int
-zpl_ioctl_setflags(struct file *filp, void __user *arg)
+__zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
{
- struct inode *ip = file_inode(filp);
- uint64_t zfs_flags = ITOZ(ip)->z_pflags;
- unsigned int ioctl_flags;
- cred_t *cr = CRED();
- xvattr_t xva;
- xoptattr_t *xoap;
- int error;
- fstrans_cookie_t cookie;
-
- if (copy_from_user(&ioctl_flags, arg, sizeof (ioctl_flags)))
- return (-EFAULT);
+ uint64_t zfs_flags = ITOZ(ip)->z_pflags;
+ xoptattr_t *xoap;
- if ((ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL)))
+ if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL |
+ ZFS_PROJINHERIT_FL))
return (-EOPNOTSUPP);
- if ((ioctl_flags & ~(FS_FL_USER_MODIFIABLE)))
+ if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE)
return (-EACCES);
if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
@@ -788,28 +792,100 @@ zpl_ioctl_setflags(struct file *filp, void __user *arg)
if (!zpl_inode_owner_or_capable(ip))
return (-EACCES);
- xva_init(&xva);
- xoap = xva_getxoptattr(&xva);
+ xva_init(xva);
+ xoap = xva_getxoptattr(xva);
- XVA_SET_REQ(&xva, XAT_IMMUTABLE);
+ XVA_SET_REQ(xva, XAT_IMMUTABLE);
if (ioctl_flags & FS_IMMUTABLE_FL)
xoap->xoa_immutable = B_TRUE;
- XVA_SET_REQ(&xva, XAT_APPENDONLY);
+ XVA_SET_REQ(xva, XAT_APPENDONLY);
if (ioctl_flags & FS_APPEND_FL)
xoap->xoa_appendonly = B_TRUE;
- XVA_SET_REQ(&xva, XAT_NODUMP);
+ XVA_SET_REQ(xva, XAT_NODUMP);
if (ioctl_flags & FS_NODUMP_FL)
xoap->xoa_nodump = B_TRUE;
+ XVA_SET_REQ(xva, XAT_PROJINHERIT);
+ if (ioctl_flags & ZFS_PROJINHERIT_FL)
+ xoap->xoa_projinherit = B_TRUE;
+
+ return (0);
+}
+
+static int
+zpl_ioctl_setflags(struct file *filp, void __user *arg)
+{
+ struct inode *ip = file_inode(filp);
+ uint32_t flags;
+ cred_t *cr = CRED();
+ xvattr_t xva;
+ int err;
+ fstrans_cookie_t cookie;
+
+ if (copy_from_user(&flags, arg, sizeof (flags)))
+ return (-EFAULT);
+
+ err = __zpl_ioctl_setflags(ip, flags, &xva);
+ if (err)
+ return (err);
+
crhold(cr);
cookie = spl_fstrans_mark();
- error = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
+ err = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
- return (error);
+ return (err);
+}
+
+static int
+zpl_ioctl_getxattr(struct file *filp, void __user *arg)
+{
+ zfsxattr_t fsx = { 0 };
+ struct inode *ip = file_inode(filp);
+ int err;
+
+ fsx.fsx_xflags = __zpl_ioctl_getflags(ip);
+ fsx.fsx_projid = ITOZ(ip)->z_projid;
+ err = copy_to_user(arg, &fsx, sizeof (fsx));
+
+ return (err);
+}
+
+static int
+zpl_ioctl_setxattr(struct file *filp, void __user *arg)
+{
+ struct inode *ip = file_inode(filp);
+ zfsxattr_t fsx;
+ cred_t *cr = CRED();
+ xvattr_t xva;
+ xoptattr_t *xoap;
+ int err;
+ fstrans_cookie_t cookie;
+
+ if (copy_from_user(&fsx, arg, sizeof (fsx)))
+ return (-EFAULT);
+
+ if (!zpl_is_valid_projid(fsx.fsx_projid))
+ return (-EINVAL);
+
+ err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva);
+ if (err)
+ return (err);
+
+ xoap = xva_getxoptattr(&xva);
+ XVA_SET_REQ(&xva, XAT_PROJID);
+ xoap->xoa_projid = fsx.fsx_projid;
+
+ crhold(cr);
+ cookie = spl_fstrans_mark();
+ err = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
+ spl_fstrans_unmark(cookie);
+ crfree(cr);
+
+ return (err);
}
static long
@@ -820,6 +896,10 @@ zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return (zpl_ioctl_getflags(filp, (void *)arg));
case FS_IOC_SETFLAGS:
return (zpl_ioctl_setflags(filp, (void *)arg));
+ case ZFS_IOC_FSGETXATTR:
+ return (zpl_ioctl_getxattr(filp, (void *)arg));
+ case ZFS_IOC_FSSETXATTR:
+ return (zpl_ioctl_setxattr(filp, (void *)arg));
default:
return (-ENOTTY);
}