diff options
author | Jinshan Xiong <[email protected]> | 2016-10-04 11:46:10 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2016-10-07 09:45:13 -0700 |
commit | 1de321e6260f5b83eb943b6ce2166a3879f42df4 (patch) | |
tree | 5af1627e1f4b9efc0bf67eff965d480e6d603396 /module | |
parent | af322debaa11b22c4fe7b6bc8941e562694eabb2 (diff) |
Add support for user/group dnode accounting & quota
This patch tracks dnode usage for each user/group in the
DMU_USER/GROUPUSED_OBJECT ZAPs. ZAP entries dedicated to dnode
accounting have the key prefixed with "obj-" followed by the UID/GID
in string format (as done for the block accounting).
A new SPA feature has been added for dnode accounting as well as
a new ZPL version. The SPA feature must be enabled in the pool
before upgrading the zfs filesystem. During the zfs version upgrade,
a "quotacheck" will be executed by marking all dnode as dirty.
ZoL-bug-id: https://github.com/zfsonlinux/zfs/issues/3500
Signed-off-by: Jinshan Xiong <[email protected]>
Signed-off-by: Johann Lombardi <[email protected]>
Diffstat (limited to 'module')
-rw-r--r-- | module/zcommon/zfs_deleg.c | 4 | ||||
-rw-r--r-- | module/zcommon/zfs_prop.c | 6 | ||||
-rw-r--r-- | module/zfs/dmu_objset.c | 180 | ||||
-rw-r--r-- | module/zfs/dnode_sync.c | 5 | ||||
-rw-r--r-- | module/zfs/spa.c | 12 | ||||
-rw-r--r-- | module/zfs/zfeature_common.c | 11 | ||||
-rw-r--r-- | module/zfs/zfs_acl.c | 4 | ||||
-rw-r--r-- | module/zfs/zfs_ioctl.c | 62 | ||||
-rw-r--r-- | module/zfs/zfs_vfsops.c | 112 |
9 files changed, 371 insertions, 25 deletions
diff --git a/module/zcommon/zfs_deleg.c b/module/zcommon/zfs_deleg.c index f6e41da9d..647a24e5f 100644 --- a/module/zcommon/zfs_deleg.c +++ b/module/zcommon/zfs_deleg.c @@ -62,6 +62,10 @@ zfs_deleg_perm_tab_t zfs_deleg_perm_tab[] = { {ZFS_DELEG_PERM_GROUPQUOTA}, {ZFS_DELEG_PERM_USERUSED}, {ZFS_DELEG_PERM_GROUPUSED}, + {ZFS_DELEG_PERM_USEROBJQUOTA}, + {ZFS_DELEG_PERM_GROUPOBJQUOTA}, + {ZFS_DELEG_PERM_USEROBJUSED}, + {ZFS_DELEG_PERM_GROUPOBJUSED}, {ZFS_DELEG_PERM_HOLD}, {ZFS_DELEG_PERM_RELEASE}, {NULL} diff --git a/module/zcommon/zfs_prop.c b/module/zcommon/zfs_prop.c index 029075ebe..1802750f9 100644 --- a/module/zcommon/zfs_prop.c +++ b/module/zcommon/zfs_prop.c @@ -52,7 +52,11 @@ const char *zfs_userquota_prop_prefixes[] = { "userused@", "userquota@", "groupused@", - "groupquota@" + "groupquota@", + "userobjused@", + "userobjquota@", + "groupobjused@", + "groupobjquota@" }; zprop_desc_t * diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 970ee4f08..c6b6eb745 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -31,6 +31,7 @@ /* Portions Copyright 2010 Robert Milkowski */ +#include <sys/zfeature.h> #include <sys/cred.h> #include <sys/zfs_context.h> #include <sys/dmu_objset.h> @@ -53,6 +54,7 @@ #include <sys/dsl_destroy.h> #include <sys/vdev.h> #include <sys/policy.h> +#include <sys/spa_impl.h> /* * Needed to close a window in dnode_move() that allows the objset to be freed @@ -77,6 +79,9 @@ int dmu_rescan_dnode_threshold = 1 << DN_MAX_INDBLKSHIFT; static void dmu_objset_find_dp_cb(void *arg); +static void dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb); +static void dmu_objset_upgrade_stop(objset_t *os); + void dmu_objset_init(void) { @@ -519,6 +524,8 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); } + mutex_init(&os->os_upgrade_lock, NULL, MUTEX_DEFAULT, NULL); + *osp = os; return (0); } @@ -625,6 +632,9 @@ dmu_objset_own(const char *name, dmu_objset_type_t type, err = dmu_objset_own_impl(ds, type, readonly, tag, osp); dsl_pool_rele(dp, FTAG); + if (err == 0 && dmu_objset_userobjspace_upgradable(*osp)) + dmu_objset_userobjspace_upgrade(*osp); + return (err); } @@ -685,6 +695,10 @@ dmu_objset_refresh_ownership(objset_t *os, void *tag) void dmu_objset_disown(objset_t *os, void *tag) { + /* + * Stop upgrading thread + */ + dmu_objset_upgrade_stop(os); dsl_dataset_disown(os->os_dsl_dataset, tag); } @@ -859,6 +873,12 @@ dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, os->os_phys->os_type = type; if (dmu_objset_userused_enabled(os)) { os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; + if (dmu_objset_userobjused_enabled(os)) { + ds->ds_feature_activation_needed[ + SPA_FEATURE_USEROBJ_ACCOUNTING] = B_TRUE; + os->os_phys->os_flags |= + OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE; + } os->os_flags = os->os_phys->os_flags; } @@ -1068,6 +1088,60 @@ dmu_objset_snapshot_one(const char *fsname, const char *snapname) } static void +dmu_objset_upgrade_task_cb(void *data) +{ + objset_t *os = data; + + mutex_enter(&os->os_upgrade_lock); + os->os_upgrade_status = EINTR; + if (!os->os_upgrade_exit) { + mutex_exit(&os->os_upgrade_lock); + + os->os_upgrade_status = os->os_upgrade_cb(os); + mutex_enter(&os->os_upgrade_lock); + } + os->os_upgrade_exit = B_TRUE; + os->os_upgrade_id = 0; + mutex_exit(&os->os_upgrade_lock); +} + +static void +dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb) +{ + if (os->os_upgrade_id != 0) + return; + + mutex_enter(&os->os_upgrade_lock); + if (os->os_upgrade_id == 0 && os->os_upgrade_status == 0) { + os->os_upgrade_exit = B_FALSE; + os->os_upgrade_cb = cb; + os->os_upgrade_id = taskq_dispatch( + os->os_spa->spa_upgrade_taskq, + dmu_objset_upgrade_task_cb, os, TQ_SLEEP); + if (os->os_upgrade_id == 0) + os->os_upgrade_status = ENOMEM; + } + mutex_exit(&os->os_upgrade_lock); +} + +static void +dmu_objset_upgrade_stop(objset_t *os) +{ + mutex_enter(&os->os_upgrade_lock); + os->os_upgrade_exit = B_TRUE; + if (os->os_upgrade_id != 0) { + taskqid_t id = os->os_upgrade_id; + + os->os_upgrade_id = 0; + mutex_exit(&os->os_upgrade_lock); + + taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id); + } else { + mutex_exit(&os->os_upgrade_lock); + } +} + +static void dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx) { dnode_t *dn; @@ -1257,6 +1331,13 @@ dmu_objset_userused_enabled(objset_t *os) DMU_USERUSED_DNODE(os) != NULL); } +boolean_t +dmu_objset_userobjused_enabled(objset_t *os) +{ + return (dmu_objset_userused_enabled(os) && + spa_feature_is_enabled(os->os_spa, SPA_FEATURE_USEROBJ_ACCOUNTING)); +} + static void do_userquota_update(objset_t *os, uint64_t used, uint64_t flags, uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx) @@ -1272,6 +1353,25 @@ do_userquota_update(objset_t *os, uint64_t used, uint64_t flags, } } +static void +do_userobjquota_update(objset_t *os, uint64_t flags, uint64_t user, + uint64_t group, boolean_t subtract, dmu_tx_t *tx) +{ + if (flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) { + char name[20 + DMU_OBJACCT_PREFIX_LEN]; + + (void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx", + (longlong_t)user); + VERIFY0(zap_increment(os, DMU_USERUSED_OBJECT, name, + subtract ? -1 : 1, tx)); + + (void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx", + (longlong_t)group); + VERIFY0(zap_increment(os, DMU_GROUPUSED_OBJECT, name, + subtract ? -1 : 1, tx)); + } +} + void dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) { @@ -1310,11 +1410,15 @@ dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) if (flags & DN_ID_OLD_EXIST) { do_userquota_update(os, dn->dn_oldused, dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx); + do_userobjquota_update(os, dn->dn_oldflags, + dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx); } if (flags & DN_ID_NEW_EXIST) { do_userquota_update(os, DN_USED_BYTES(dn->dn_phys), dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid, B_FALSE, tx); + do_userobjquota_update(os, dn->dn_phys->dn_flags, + dn->dn_newuid, dn->dn_newgid, B_FALSE, tx); } mutex_enter(&dn->dn_mtx); @@ -1486,19 +1590,19 @@ dmu_objset_userspace_present(objset_t *os) OBJSET_FLAG_USERACCOUNTING_COMPLETE); } -int -dmu_objset_userspace_upgrade(objset_t *os) +boolean_t +dmu_objset_userobjspace_present(objset_t *os) +{ + return (os->os_phys->os_flags & + OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE); +} + +static int +dmu_objset_space_upgrade(objset_t *os) { uint64_t obj; int err = 0; - if (dmu_objset_userspace_present(os)) - return (0); - if (!dmu_objset_userused_enabled(os)) - return (SET_ERROR(ENOTSUP)); - if (dmu_objset_is_snapshot(os)) - return (SET_ERROR(EINVAL)); - /* * We simply need to mark every object dirty, so that it will be * synced out and now accounted. If this is called @@ -1512,6 +1616,13 @@ dmu_objset_userspace_upgrade(objset_t *os) dmu_buf_t *db; int objerr; + mutex_enter(&os->os_upgrade_lock); + if (os->os_upgrade_exit) + err = SET_ERROR(EINTR); + mutex_exit(&os->os_upgrade_lock); + if (err != 0) + return (err); + if (issig(JUSTLOOKING) && issig(FORREAL)) return (SET_ERROR(EINTR)); @@ -1529,12 +1640,60 @@ dmu_objset_userspace_upgrade(objset_t *os) dmu_buf_rele(db, FTAG); dmu_tx_commit(tx); } + return (0); +} + +int +dmu_objset_userspace_upgrade(objset_t *os) +{ + int err = 0; + + if (dmu_objset_userspace_present(os)) + return (0); + if (dmu_objset_is_snapshot(os)) + return (SET_ERROR(EINVAL)); + if (!dmu_objset_userused_enabled(os)) + return (SET_ERROR(ENOTSUP)); + + err = dmu_objset_space_upgrade(os); + if (err) + return (err); os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; txg_wait_synced(dmu_objset_pool(os), 0); return (0); } +static int +dmu_objset_userobjspace_upgrade_cb(objset_t *os) +{ + int err = 0; + + if (dmu_objset_userobjspace_present(os)) + return (0); + if (dmu_objset_is_snapshot(os)) + return (SET_ERROR(EINVAL)); + if (!dmu_objset_userobjused_enabled(os)) + return (SET_ERROR(ENOTSUP)); + + dmu_objset_ds(os)->ds_feature_activation_needed[ + SPA_FEATURE_USEROBJ_ACCOUNTING] = B_TRUE; + + err = dmu_objset_space_upgrade(os); + if (err) + return (err); + + os->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE; + txg_wait_synced(dmu_objset_pool(os), 0); + return (0); +} + +void +dmu_objset_userobjspace_upgrade(objset_t *os) +{ + dmu_objset_upgrade(os, dmu_objset_userobjspace_upgrade_cb); +} + void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, uint64_t *usedobjsp, uint64_t *availobjsp) @@ -2096,4 +2255,7 @@ EXPORT_SYMBOL(dmu_objset_userquota_get_ids); EXPORT_SYMBOL(dmu_objset_userused_enabled); EXPORT_SYMBOL(dmu_objset_userspace_upgrade); EXPORT_SYMBOL(dmu_objset_userspace_present); +EXPORT_SYMBOL(dmu_objset_userobjused_enabled); +EXPORT_SYMBOL(dmu_objset_userobjspace_upgrade); +EXPORT_SYMBOL(dmu_objset_userobjspace_present); #endif diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c index b19f50af9..6d1fa3339 100644 --- a/module/zfs/dnode_sync.c +++ b/module/zfs/dnode_sync.c @@ -570,12 +570,17 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx) dn->dn_oldused = DN_USED_BYTES(dn->dn_phys); dn->dn_oldflags = dn->dn_phys->dn_flags; dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED; + if (dmu_objset_userobjused_enabled(dn->dn_objset)) + dn->dn_phys->dn_flags |= + DNODE_FLAG_USEROBJUSED_ACCOUNTED; mutex_exit(&dn->dn_mtx); dmu_objset_userquota_get_ids(dn, B_FALSE, tx); } else { /* Once we account for it, we should always account for it. */ ASSERT(!(dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED)); + ASSERT(!(dn->dn_phys->dn_flags & + DNODE_FLAG_USEROBJUSED_ACCOUNTED)); } mutex_enter(&dn->dn_mtx); diff --git a/module/zfs/spa.c b/module/zfs/spa.c index c2f914e11..0a480d3ec 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -1167,6 +1167,13 @@ spa_activate(spa_t *spa, int mode) */ spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri, 1, INT_MAX, 0); + + /* + * The taskq to upgrade datasets in this pool. Currently used by + * feature SPA_FEATURE_USEROBJ_ACCOUNTING. + */ + spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus, + defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC); } /* @@ -1190,6 +1197,11 @@ spa_deactivate(spa_t *spa) spa->spa_zvol_taskq = NULL; } + if (spa->spa_upgrade_taskq) { + taskq_destroy(spa->spa_upgrade_taskq); + spa->spa_upgrade_taskq = NULL; + } + txg_list_destroy(&spa->spa_vdev_txg_list); list_destroy(&spa->spa_config_dirty_list); diff --git a/module/zfs/zfeature_common.c b/module/zfs/zfeature_common.c index 9beb4903e..ccd65a7b7 100644 --- a/module/zfs/zfeature_common.c +++ b/module/zfs/zfeature_common.c @@ -285,4 +285,15 @@ zpool_feature_init(void) "Edon-R hash algorithm.", ZFEATURE_FLAG_PER_DATASET, edonr_deps); } + { + static const spa_feature_t userobj_accounting_deps[] = { + SPA_FEATURE_EXTENSIBLE_DATASET, + SPA_FEATURE_NONE + }; + zfeature_register(SPA_FEATURE_USEROBJ_ACCOUNTING, + "org.zfsonlinux:userobj_accounting", "userobj_accounting", + "User/Group object accounting.", + ZFEATURE_FLAG_READONLY_COMPAT | ZFEATURE_FLAG_PER_DATASET, + userobj_accounting_deps); + } } diff --git a/module/zfs/zfs_acl.c b/module/zfs/zfs_acl.c index 451000010..7198c7ebf 100644 --- a/module/zfs/zfs_acl.c +++ b/module/zfs/zfs_acl.c @@ -1886,7 +1886,9 @@ boolean_t zfs_acl_ids_overquota(zfs_sb_t *zsb, zfs_acl_ids_t *acl_ids) { return (zfs_fuid_overquota(zsb, B_FALSE, acl_ids->z_fuid) || - zfs_fuid_overquota(zsb, B_TRUE, acl_ids->z_fgid)); + zfs_fuid_overquota(zsb, B_TRUE, acl_ids->z_fgid) || + zfs_fuid_overobjquota(zsb, B_FALSE, acl_ids->z_fuid) || + zfs_fuid_overobjquota(zsb, B_TRUE, acl_ids->z_fgid)); } /* diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index e5704e258..549a83116 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -244,9 +244,14 @@ static const char *userquota_perms[] = { ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_PERM_GROUPQUOTA, + ZFS_DELEG_PERM_USEROBJUSED, + ZFS_DELEG_PERM_USEROBJQUOTA, + ZFS_DELEG_PERM_GROUPOBJUSED, + ZFS_DELEG_PERM_GROUPOBJQUOTA, }; static int zfs_ioc_userspace_upgrade(zfs_cmd_t *zc); +static int zfs_ioc_userobjspace_upgrade(zfs_cmd_t *zc); static int zfs_check_settable(const char *name, nvpair_t *property, cred_t *cr); static int zfs_check_clearable(char *dataset, nvlist_t *props, @@ -1171,7 +1176,9 @@ zfs_secpolicy_userspace_one(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) * themself, allow it. */ if (zc->zc_objset_type == ZFS_PROP_USERUSED || - zc->zc_objset_type == ZFS_PROP_USERQUOTA) { + zc->zc_objset_type == ZFS_PROP_USERQUOTA || + zc->zc_objset_type == ZFS_PROP_USEROBJUSED || + zc->zc_objset_type == ZFS_PROP_USEROBJQUOTA) { if (zc->zc_guid == crgetuid(cr)) return (0); } else { @@ -2426,6 +2433,7 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source, zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP); (void) strcpy(zc->zc_name, dsname); (void) zfs_ioc_userspace_upgrade(zc); + (void) zfs_ioc_userobjspace_upgrade(zc); kmem_free(zc, sizeof (zfs_cmd_t)); } break; @@ -3720,13 +3728,23 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr) zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA]; const char *gq_prefix = zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA]; + const char *uiq_prefix = + zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA]; + const char *giq_prefix = + zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA]; if (strncmp(propname, uq_prefix, strlen(uq_prefix)) == 0) { perm = ZFS_DELEG_PERM_USERQUOTA; + } else if (strncmp(propname, uiq_prefix, + strlen(uiq_prefix)) == 0) { + perm = ZFS_DELEG_PERM_USEROBJQUOTA; } else if (strncmp(propname, gq_prefix, strlen(gq_prefix)) == 0) { perm = ZFS_DELEG_PERM_GROUPQUOTA; + } else if (strncmp(propname, giq_prefix, + strlen(giq_prefix)) == 0) { + perm = ZFS_DELEG_PERM_GROUPOBJQUOTA; } else { /* USERUSED and GROUPUSED are read-only */ return (SET_ERROR(EINVAL)); @@ -4927,6 +4945,48 @@ zfs_ioc_userspace_upgrade(zfs_cmd_t *zc) return (error); } +/* + * inputs: + * zc_name name of filesystem + * + * outputs: + * none + */ +static int +zfs_ioc_userobjspace_upgrade(zfs_cmd_t *zc) +{ + objset_t *os; + int error; + + error = dmu_objset_hold(zc->zc_name, FTAG, &os); + if (error != 0) + return (error); + + dsl_dataset_long_hold(dmu_objset_ds(os), FTAG); + dsl_pool_rele(dmu_objset_pool(os), FTAG); + + if (dmu_objset_userobjspace_upgradable(os)) { + mutex_enter(&os->os_upgrade_lock); + if (os->os_upgrade_id == 0) { + /* clear potential error code and retry */ + os->os_upgrade_status = 0; + mutex_exit(&os->os_upgrade_lock); + + dmu_objset_userobjspace_upgrade(os); + } else { + mutex_exit(&os->os_upgrade_lock); + } + + taskq_wait_id(os->os_spa->spa_upgrade_taskq, os->os_upgrade_id); + error = os->os_upgrade_status; + } + + dsl_dataset_long_rele(dmu_objset_ds(os), FTAG); + dsl_dataset_rele(dmu_objset_ds(os), FTAG); + + return (error); +} + static int zfs_ioc_share(zfs_cmd_t *zc) { diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c index d8b27461a..63b7f9230 100644 --- a/module/zfs/zfs_vfsops.c +++ b/module/zfs/zfs_vfsops.c @@ -431,17 +431,22 @@ zfs_userquota_prop_to_obj(zfs_sb_t *zsb, zfs_userquota_prop_t type) { switch (type) { case ZFS_PROP_USERUSED: + case ZFS_PROP_USEROBJUSED: return (DMU_USERUSED_OBJECT); case ZFS_PROP_GROUPUSED: + case ZFS_PROP_GROUPOBJUSED: return (DMU_GROUPUSED_OBJECT); case ZFS_PROP_USERQUOTA: return (zsb->z_userquota_obj); case ZFS_PROP_GROUPQUOTA: return (zsb->z_groupquota_obj); + case ZFS_PROP_USEROBJQUOTA: + return (zsb->z_userobjquota_obj); + case ZFS_PROP_GROUPOBJQUOTA: + return (zsb->z_groupobjquota_obj); default: - return (SET_ERROR(ENOTSUP)); + return (ZFS_NO_OBJECT); } - return (0); } int @@ -453,16 +458,25 @@ zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, zap_attribute_t za; zfs_useracct_t *buf = vbuf; uint64_t obj; + int offset = 0; if (!dmu_objset_userspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); + if ((type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED || + type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA) && + !dmu_objset_userobjspace_present(zsb->z_os)) + return (SET_ERROR(ENOTSUP)); + obj = zfs_userquota_prop_to_obj(zsb, type); - if (obj == 0) { + if (obj == ZFS_NO_OBJECT) { *bufsizep = 0; return (0); } + if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED) + offset = DMU_OBJACCT_PREFIX_LEN; + for (zap_cursor_init_serialized(&zc, zsb->z_os, obj, *cookiep); (error = zap_cursor_retrieve(&zc, &za)) == 0; zap_cursor_advance(&zc)) { @@ -470,7 +484,15 @@ zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, *bufsizep) break; - fuidstr_to_sid(zsb, za.za_name, + /* + * skip object quota (with zap name prefix DMU_OBJACCT_PREFIX) + * when dealing with block quota and vice versa. + */ + if ((offset > 0) != (strncmp(za.za_name, DMU_OBJACCT_PREFIX, + DMU_OBJACCT_PREFIX_LEN) == 0)) + continue; + + fuidstr_to_sid(zsb, za.za_name + offset, buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid); buf->zu_space = za.za_first_integer; @@ -511,7 +533,8 @@ int zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type, const char *domain, uint64_t rid, uint64_t *valp) { - char buf[32]; + char buf[20 + DMU_OBJACCT_PREFIX_LEN]; + int offset = 0; int err; uint64_t obj; @@ -520,11 +543,21 @@ zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type, if (!dmu_objset_userspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); + if ((type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED || + type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA) && + !dmu_objset_userobjspace_present(zsb->z_os)) + return (SET_ERROR(ENOTSUP)); + obj = zfs_userquota_prop_to_obj(zsb, type); - if (obj == 0) + if (obj == ZFS_NO_OBJECT) return (0); - err = id_to_fuidstr(zsb, domain, rid, buf, B_FALSE); + if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED) { + strncpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN); + offset = DMU_OBJACCT_PREFIX_LEN; + } + + err = id_to_fuidstr(zsb, domain, rid, buf + offset, B_FALSE); if (err) return (err); @@ -545,14 +578,25 @@ zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type, uint64_t *objp; boolean_t fuid_dirtied; - if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA) - return (SET_ERROR(EINVAL)); - if (zsb->z_version < ZPL_VERSION_USERSPACE) return (SET_ERROR(ENOTSUP)); - objp = (type == ZFS_PROP_USERQUOTA) ? &zsb->z_userquota_obj : - &zsb->z_groupquota_obj; + switch (type) { + case ZFS_PROP_USERQUOTA: + objp = &zsb->z_userquota_obj; + break; + case ZFS_PROP_GROUPQUOTA: + objp = &zsb->z_groupquota_obj; + break; + case ZFS_PROP_USEROBJQUOTA: + objp = &zsb->z_userobjquota_obj; + break; + case ZFS_PROP_GROUPOBJQUOTA: + objp = &zsb->z_groupobjquota_obj; + break; + default: + return (SET_ERROR(EINVAL)); + } err = id_to_fuidstr(zsb, domain, rid, buf, B_TRUE); if (err) @@ -598,9 +642,39 @@ zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type, EXPORT_SYMBOL(zfs_set_userquota); boolean_t +zfs_fuid_overobjquota(zfs_sb_t *zsb, boolean_t isgroup, uint64_t fuid) +{ + char buf[20 + DMU_OBJACCT_PREFIX_LEN]; + uint64_t used, quota, usedobj, quotaobj; + int err; + + if (!dmu_objset_userobjspace_present(zsb->z_os)) { + if (dmu_objset_userobjspace_upgradable(zsb->z_os)) + dmu_objset_userobjspace_upgrade(zsb->z_os); + return (B_FALSE); + } + + usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT; + quotaobj = isgroup ? zsb->z_groupobjquota_obj : zsb->z_userobjquota_obj; + if (quotaobj == 0 || zsb->z_replay) + return (B_FALSE); + + (void) sprintf(buf, "%llx", (longlong_t)fuid); + err = zap_lookup(zsb->z_os, quotaobj, buf, 8, 1, "a); + if (err != 0) + return (B_FALSE); + + (void) sprintf(buf, DMU_OBJACCT_PREFIX "%llx", (longlong_t)fuid); + err = zap_lookup(zsb->z_os, usedobj, buf, 8, 1, &used); + if (err != 0) + return (B_FALSE); + return (used >= quota); +} + +boolean_t zfs_fuid_overquota(zfs_sb_t *zsb, boolean_t isgroup, uint64_t fuid) { - char buf[32]; + char buf[20]; uint64_t used, quota, usedobj, quotaobj; int err; @@ -777,6 +851,18 @@ zfs_sb_create(const char *osname, zfs_mntopts_t *zmo, zfs_sb_t **zsbp) if (error && error != ENOENT) goto out; + error = zap_lookup(os, MASTER_NODE_OBJ, + zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA], + 8, 1, &zsb->z_userobjquota_obj); + if (error && error != ENOENT) + goto out; + + error = zap_lookup(os, MASTER_NODE_OBJ, + zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA], + 8, 1, &zsb->z_groupobjquota_obj); + if (error && error != ENOENT) + goto out; + error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1, &zsb->z_fuid_obj); if (error && error != ENOENT) |