diff options
Diffstat (limited to 'module/zfs/zfs_fuid.c')
-rw-r--r-- | module/zfs/zfs_fuid.c | 163 |
1 files changed, 82 insertions, 81 deletions
diff --git a/module/zfs/zfs_fuid.c b/module/zfs/zfs_fuid.c index 3ab1c7ba0..5cfb0c975 100644 --- a/module/zfs/zfs_fuid.c +++ b/module/zfs/zfs_fuid.c @@ -46,7 +46,7 @@ * two AVL trees are created. One tree is keyed by the index number * and the other by the domain string. Nodes are never removed from * trees, but new entries may be added. If a new entry is added then - * the zsb->z_fuid_dirty flag is set to true and the caller will then + * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then * be responsible for calling zfs_fuid_sync() to sync the changes to disk. * */ @@ -191,34 +191,34 @@ zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx) * Load the fuid table(s) into memory. */ static void -zfs_fuid_init(zfs_sb_t *zsb) +zfs_fuid_init(zfsvfs_t *zfsvfs) { - rw_enter(&zsb->z_fuid_lock, RW_WRITER); + rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); - if (zsb->z_fuid_loaded) { - rw_exit(&zsb->z_fuid_lock); + if (zfsvfs->z_fuid_loaded) { + rw_exit(&zfsvfs->z_fuid_lock); return; } - zfs_fuid_avl_tree_create(&zsb->z_fuid_idx, &zsb->z_fuid_domain); + zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); - (void) zap_lookup(zsb->z_os, MASTER_NODE_OBJ, - ZFS_FUID_TABLES, 8, 1, &zsb->z_fuid_obj); - if (zsb->z_fuid_obj != 0) { - zsb->z_fuid_size = zfs_fuid_table_load(zsb->z_os, - zsb->z_fuid_obj, &zsb->z_fuid_idx, - &zsb->z_fuid_domain); + (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ, + ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj); + if (zfsvfs->z_fuid_obj != 0) { + zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os, + zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx, + &zfsvfs->z_fuid_domain); } - zsb->z_fuid_loaded = B_TRUE; - rw_exit(&zsb->z_fuid_lock); + zfsvfs->z_fuid_loaded = B_TRUE; + rw_exit(&zfsvfs->z_fuid_lock); } /* * sync out AVL trees to persistent storage. */ void -zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx) +zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { nvlist_t *nvp; nvlist_t **fuids; @@ -229,30 +229,30 @@ zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx) int numnodes; int i; - if (!zsb->z_fuid_dirty) { + if (!zfsvfs->z_fuid_dirty) { return; } - rw_enter(&zsb->z_fuid_lock, RW_WRITER); + rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); /* * First see if table needs to be created? */ - if (zsb->z_fuid_obj == 0) { - zsb->z_fuid_obj = dmu_object_alloc(zsb->z_os, + if (zfsvfs->z_fuid_obj == 0) { + zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os, DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE, sizeof (uint64_t), tx); - VERIFY(zap_add(zsb->z_os, MASTER_NODE_OBJ, + VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, sizeof (uint64_t), 1, - &zsb->z_fuid_obj, tx) == 0); + &zfsvfs->z_fuid_obj, tx) == 0); } VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); - numnodes = avl_numnodes(&zsb->z_fuid_idx); + numnodes = avl_numnodes(&zfsvfs->z_fuid_idx); fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP); - for (i = 0, domnode = avl_first(&zsb->z_fuid_domain); domnode; i++, - domnode = AVL_NEXT(&zsb->z_fuid_domain, domnode)) { + for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++, + domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) { VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX, domnode->f_idx) == 0); @@ -270,29 +270,30 @@ zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx) VERIFY(nvlist_pack(nvp, &packed, &nvsize, NV_ENCODE_XDR, KM_SLEEP) == 0); nvlist_free(nvp); - zsb->z_fuid_size = nvsize; - dmu_write(zsb->z_os, zsb->z_fuid_obj, 0, zsb->z_fuid_size, packed, tx); - kmem_free(packed, zsb->z_fuid_size); - VERIFY(0 == dmu_bonus_hold(zsb->z_os, zsb->z_fuid_obj, + zfsvfs->z_fuid_size = nvsize; + dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0, + zfsvfs->z_fuid_size, packed, tx); + kmem_free(packed, zfsvfs->z_fuid_size); + VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db)); dmu_buf_will_dirty(db, tx); - *(uint64_t *)db->db_data = zsb->z_fuid_size; + *(uint64_t *)db->db_data = zfsvfs->z_fuid_size; dmu_buf_rele(db, FTAG); - zsb->z_fuid_dirty = B_FALSE; - rw_exit(&zsb->z_fuid_lock); + zfsvfs->z_fuid_dirty = B_FALSE; + rw_exit(&zfsvfs->z_fuid_lock); } /* * Query domain table for a given domain. * * If domain isn't found and addok is set, it is added to AVL trees and - * the zsb->z_fuid_dirty flag will be set to TRUE. It will then be + * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be * necessary for the caller or another thread to detect the dirty table * and sync out the changes. */ int -zfs_fuid_find_by_domain(zfs_sb_t *zsb, const char *domain, +zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, char **retdomain, boolean_t addok) { fuid_domain_t searchnode, *findnode; @@ -313,23 +314,23 @@ zfs_fuid_find_by_domain(zfs_sb_t *zsb, const char *domain, searchnode.f_ksid = ksid_lookupdomain(domain); if (retdomain) *retdomain = searchnode.f_ksid->kd_name; - if (!zsb->z_fuid_loaded) - zfs_fuid_init(zsb); + if (!zfsvfs->z_fuid_loaded) + zfs_fuid_init(zfsvfs); retry: - rw_enter(&zsb->z_fuid_lock, rw); - findnode = avl_find(&zsb->z_fuid_domain, &searchnode, &loc); + rw_enter(&zfsvfs->z_fuid_lock, rw); + findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc); if (findnode) { - rw_exit(&zsb->z_fuid_lock); + rw_exit(&zfsvfs->z_fuid_lock); ksiddomain_rele(searchnode.f_ksid); return (findnode->f_idx); } else if (addok) { fuid_domain_t *domnode; uint64_t retidx; - if (rw == RW_READER && !rw_tryupgrade(&zsb->z_fuid_lock)) { - rw_exit(&zsb->z_fuid_lock); + if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) { + rw_exit(&zfsvfs->z_fuid_lock); rw = RW_WRITER; goto retry; } @@ -337,15 +338,15 @@ retry: domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); domnode->f_ksid = searchnode.f_ksid; - retidx = domnode->f_idx = avl_numnodes(&zsb->z_fuid_idx) + 1; + retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1; - avl_add(&zsb->z_fuid_domain, domnode); - avl_add(&zsb->z_fuid_idx, domnode); - zsb->z_fuid_dirty = B_TRUE; - rw_exit(&zsb->z_fuid_lock); + avl_add(&zfsvfs->z_fuid_domain, domnode); + avl_add(&zfsvfs->z_fuid_idx, domnode); + zfsvfs->z_fuid_dirty = B_TRUE; + rw_exit(&zfsvfs->z_fuid_lock); return (retidx); } else { - rw_exit(&zsb->z_fuid_lock); + rw_exit(&zfsvfs->z_fuid_lock); return (-1); } } @@ -357,23 +358,23 @@ retry: * */ const char * -zfs_fuid_find_by_idx(zfs_sb_t *zsb, uint32_t idx) +zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx) { char *domain; - if (idx == 0 || !zsb->z_use_fuids) + if (idx == 0 || !zfsvfs->z_use_fuids) return (NULL); - if (!zsb->z_fuid_loaded) - zfs_fuid_init(zsb); + if (!zfsvfs->z_fuid_loaded) + zfs_fuid_init(zfsvfs); - rw_enter(&zsb->z_fuid_lock, RW_READER); + rw_enter(&zfsvfs->z_fuid_lock, RW_READER); - if (zsb->z_fuid_obj || zsb->z_fuid_dirty) - domain = zfs_fuid_idx_domain(&zsb->z_fuid_idx, idx); + if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty) + domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx); else domain = nulldomain; - rw_exit(&zsb->z_fuid_lock); + rw_exit(&zfsvfs->z_fuid_lock); ASSERT(domain); return (domain); @@ -389,7 +390,7 @@ zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp) } uid_t -zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid, +zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { #ifdef HAVE_KSID @@ -400,7 +401,7 @@ zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid, if (index == 0) return (fuid); - domain = zfs_fuid_find_by_idx(zsb, index); + domain = zfs_fuid_find_by_idx(zfsvfs, index); ASSERT(domain != NULL); if (type == ZFS_OWNER || type == ZFS_ACE_USER) { @@ -495,7 +496,7 @@ zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid, * be used if it exists. */ uint64_t -zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type, +zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type, cred_t *cr, zfs_fuid_info_t **fuidp) { uint64_t idx; @@ -509,7 +510,7 @@ zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type, ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP); - if (!zsb->z_use_fuids || (ksid == NULL)) { + if (!zfsvfs->z_use_fuids || (ksid == NULL)) { id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr); if (IS_EPHEMERAL(id)) @@ -532,7 +533,7 @@ zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type, rid = ksid_getrid(ksid); domain = ksid_getdomain(ksid); - idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE); + idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type); @@ -550,10 +551,10 @@ zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type, * * During replay operations the domain+rid information is * found in the zfs_fuid_info_t that the replay code has - * attached to the zsb of the file system. + * attached to the zfsvfs of the file system. */ uint64_t -zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr, +zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr, zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp) { #ifdef HAVE_KSID @@ -574,11 +575,11 @@ zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr, * chmod. */ - if (!zsb->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0) + if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0) return (id); - if (zsb->z_replay) { - fuidp = zsb->z_fuid_replay; + if (zfsvfs->z_replay) { + fuidp = zfsvfs->z_fuid_replay; /* * If we are passed an ephemeral id, but no @@ -628,9 +629,9 @@ zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr, } } - idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE); + idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); - if (!zsb->z_replay) + if (!zfsvfs->z_replay) zfs_fuid_node_add(fuidpp, kdomain, rid, idx, id, type); else if (zfuid != NULL) { @@ -647,15 +648,15 @@ zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr, } void -zfs_fuid_destroy(zfs_sb_t *zsb) +zfs_fuid_destroy(zfsvfs_t *zfsvfs) { - rw_enter(&zsb->z_fuid_lock, RW_WRITER); - if (!zsb->z_fuid_loaded) { - rw_exit(&zsb->z_fuid_lock); + rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); + if (!zfsvfs->z_fuid_loaded) { + rw_exit(&zfsvfs->z_fuid_lock); return; } - zfs_fuid_table_destroy(&zsb->z_fuid_idx, &zsb->z_fuid_domain); - rw_exit(&zsb->z_fuid_lock); + zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); + rw_exit(&zfsvfs->z_fuid_lock); } /* @@ -709,7 +710,7 @@ zfs_fuid_info_free(zfs_fuid_info_t *fuidp) * Will use a straight FUID compare when possible. */ boolean_t -zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr) +zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr) { #ifdef HAVE_KSID ksid_t *ksid = crgetsid(cr, KSID_GROUP); @@ -733,7 +734,7 @@ zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr) } else { const char *domain; - domain = zfs_fuid_find_by_idx(zsb, idx); + domain = zfs_fuid_find_by_idx(zfsvfs, idx); ASSERT(domain != NULL); if (strcmp(domain, @@ -751,7 +752,7 @@ zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr) /* * Not found in ksidlist, check posix groups */ - gid = zfs_fuid_map_id(zsb, id, cr, ZFS_GROUP); + gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP); return (groupmember(gid, cr)); #else return (B_TRUE); @@ -759,17 +760,17 @@ zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr) } void -zfs_fuid_txhold(zfs_sb_t *zsb, dmu_tx_t *tx) +zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { - if (zsb->z_fuid_obj == 0) { + if (zfsvfs->z_fuid_obj == 0) { dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, - FUID_SIZE_ESTIMATE(zsb)); + FUID_SIZE_ESTIMATE(zfsvfs)); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); } else { - dmu_tx_hold_bonus(tx, zsb->z_fuid_obj); - dmu_tx_hold_write(tx, zsb->z_fuid_obj, 0, - FUID_SIZE_ESTIMATE(zsb)); + dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); + dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); } } #endif |