aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/zfs_fuid.c
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2011-02-08 11:16:06 -0800
committerBrian Behlendorf <[email protected]>2011-02-10 09:27:21 -0800
commit3558fd73b5d863304102f6745c26e0b592aca60a (patch)
treeb22e26afbf6c494d34032876fb9be4d21d4e8ed7 /module/zfs/zfs_fuid.c
parent6149f4c45fc905761a6f636ea9e14ff76ce6c842 (diff)
Prototype/structure update for Linux
I appologize in advance why to many things ended up in this commit. When it could be seperated in to a whole series of commits teasing that all apart now would take considerable time and I'm not sure there's much merrit in it. As such I'll just summerize the intent of the changes which are all (or partly) in this commit. Broadly the intent is to remove as much Solaris specific code as possible and replace it with native Linux equivilants. More specifically: 1) Replace all instances of zfsvfs_t with zfs_sb_t. While the type is largely the same calling it private super block data rather than a zfsvfs is more consistent with how Linux names this. While non critical it makes the code easier to read when your thinking in Linux friendly VFS terms. 2) Replace vnode_t with struct inode. The Linux VFS doesn't have the notion of a vnode and there's absolutely no good reason to create one. There are in fact several good reasons to remove it. It just adds overhead on Linux if we were to manage one, it conplicates the code, and it likely will lead to bugs so there's a good change it will be out of date. The code has been updated to remove all need for this type. 3) Replace all vtype_t's with umode types. Along with this shift all uses of types to mode bits. The Solaris code would pass a vtype which is redundant with the Linux mode. Just update all the code to use the Linux mode macros and remove this redundancy. 4) Remove using of vn_* helpers and replace where needed with inode helpers. The big example here is creating iput_aync to replace vn_rele_async. Other vn helpers will be addressed as needed but they should be be emulated. They are a Solaris VFS'ism and should simply be replaced with Linux equivilants. 5) Update znode alloc/free code. Under Linux it's common to embed the inode specific data with the inode itself. This removes the need for an extra memory allocation. In zfs this information is called a znode and it now embeds the inode with it. Allocators have been updated accordingly. 6) Minimal integration with the vfs flags for setting up the super block and handling mount options has been added this code will need to be refined but functionally it's all there. This will be the first and last of these to large to review commits.
Diffstat (limited to 'module/zfs/zfs_fuid.c')
-rw-r--r--module/zfs/zfs_fuid.c171
1 files changed, 85 insertions, 86 deletions
diff --git a/module/zfs/zfs_fuid.c b/module/zfs/zfs_fuid.c
index a5741185a..f1e071fc7 100644
--- a/module/zfs/zfs_fuid.c
+++ b/module/zfs/zfs_fuid.c
@@ -46,7 +46,7 @@
* two AVL trees are created. One tree is keyed by the index number
* and the other by the domain string. Nodes are never removed from
* trees, but new entries may be added. If a new entry is added then
- * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
+ * the zsb->z_fuid_dirty flag is set to true and the caller will then
* be responsible for calling zfs_fuid_sync() to sync the changes to disk.
*
*/
@@ -196,34 +196,34 @@ zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
* Load the fuid table(s) into memory.
*/
static void
-zfs_fuid_init(zfsvfs_t *zfsvfs)
+zfs_fuid_init(zfs_sb_t *zsb)
{
- rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
+ rw_enter(&zsb->z_fuid_lock, RW_WRITER);
- if (zfsvfs->z_fuid_loaded) {
- rw_exit(&zfsvfs->z_fuid_lock);
+ if (zsb->z_fuid_loaded) {
+ rw_exit(&zsb->z_fuid_lock);
return;
}
- zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
+ zfs_fuid_avl_tree_create(&zsb->z_fuid_idx, &zsb->z_fuid_domain);
- (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
- ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
- if (zfsvfs->z_fuid_obj != 0) {
- zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
- zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
- &zfsvfs->z_fuid_domain);
+ (void) zap_lookup(zsb->z_os, MASTER_NODE_OBJ,
+ ZFS_FUID_TABLES, 8, 1, &zsb->z_fuid_obj);
+ if (zsb->z_fuid_obj != 0) {
+ zsb->z_fuid_size = zfs_fuid_table_load(zsb->z_os,
+ zsb->z_fuid_obj, &zsb->z_fuid_idx,
+ &zsb->z_fuid_domain);
}
- zfsvfs->z_fuid_loaded = B_TRUE;
- rw_exit(&zfsvfs->z_fuid_lock);
+ zsb->z_fuid_loaded = B_TRUE;
+ rw_exit(&zsb->z_fuid_lock);
}
/*
* sync out AVL trees to persistent storage.
*/
void
-zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
+zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx)
{
nvlist_t *nvp;
nvlist_t **fuids;
@@ -234,30 +234,30 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
int numnodes;
int i;
- if (!zfsvfs->z_fuid_dirty) {
+ if (!zsb->z_fuid_dirty) {
return;
}
- rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
+ rw_enter(&zsb->z_fuid_lock, RW_WRITER);
/*
* First see if table needs to be created?
*/
- if (zfsvfs->z_fuid_obj == 0) {
- zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
+ if (zsb->z_fuid_obj == 0) {
+ zsb->z_fuid_obj = dmu_object_alloc(zsb->z_os,
DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
sizeof (uint64_t), tx);
- VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
+ VERIFY(zap_add(zsb->z_os, MASTER_NODE_OBJ,
ZFS_FUID_TABLES, sizeof (uint64_t), 1,
- &zfsvfs->z_fuid_obj, tx) == 0);
+ &zsb->z_fuid_obj, tx) == 0);
}
VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
+ numnodes = avl_numnodes(&zsb->z_fuid_idx);
fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
- for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
- domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
+ for (i = 0, domnode = avl_first(&zsb->z_fuid_domain); domnode; i++,
+ domnode = AVL_NEXT(&zsb->z_fuid_domain, domnode)) {
VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
domnode->f_idx) == 0);
@@ -275,30 +275,29 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
VERIFY(nvlist_pack(nvp, &packed, &nvsize,
NV_ENCODE_XDR, KM_SLEEP) == 0);
nvlist_free(nvp);
- zfsvfs->z_fuid_size = nvsize;
- dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
- zfsvfs->z_fuid_size, packed, tx);
- kmem_free(packed, zfsvfs->z_fuid_size);
- VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
+ zsb->z_fuid_size = nvsize;
+ dmu_write(zsb->z_os, zsb->z_fuid_obj, 0, zsb->z_fuid_size, packed, tx);
+ kmem_free(packed, zsb->z_fuid_size);
+ VERIFY(0 == dmu_bonus_hold(zsb->z_os, zsb->z_fuid_obj,
FTAG, &db));
dmu_buf_will_dirty(db, tx);
- *(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
+ *(uint64_t *)db->db_data = zsb->z_fuid_size;
dmu_buf_rele(db, FTAG);
- zfsvfs->z_fuid_dirty = B_FALSE;
- rw_exit(&zfsvfs->z_fuid_lock);
+ zsb->z_fuid_dirty = B_FALSE;
+ rw_exit(&zsb->z_fuid_lock);
}
/*
* Query domain table for a given domain.
*
* If domain isn't found and addok is set, it is added to AVL trees and
- * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be
+ * the zsb->z_fuid_dirty flag will be set to TRUE. It will then be
* necessary for the caller or another thread to detect the dirty table
* and sync out the changes.
*/
int
-zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
+zfs_fuid_find_by_domain(zfs_sb_t *zsb, const char *domain,
char **retdomain, boolean_t addok)
{
fuid_domain_t searchnode, *findnode;
@@ -319,23 +318,23 @@ zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
searchnode.f_ksid = ksid_lookupdomain(domain);
if (retdomain)
*retdomain = searchnode.f_ksid->kd_name;
- if (!zfsvfs->z_fuid_loaded)
- zfs_fuid_init(zfsvfs);
+ if (!zsb->z_fuid_loaded)
+ zfs_fuid_init(zsb);
retry:
- rw_enter(&zfsvfs->z_fuid_lock, rw);
- findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
+ rw_enter(&zsb->z_fuid_lock, rw);
+ findnode = avl_find(&zsb->z_fuid_domain, &searchnode, &loc);
if (findnode) {
- rw_exit(&zfsvfs->z_fuid_lock);
+ rw_exit(&zsb->z_fuid_lock);
ksiddomain_rele(searchnode.f_ksid);
return (findnode->f_idx);
} else if (addok) {
fuid_domain_t *domnode;
uint64_t retidx;
- if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
- rw_exit(&zfsvfs->z_fuid_lock);
+ if (rw == RW_READER && !rw_tryupgrade(&zsb->z_fuid_lock)) {
+ rw_exit(&zsb->z_fuid_lock);
rw = RW_WRITER;
goto retry;
}
@@ -343,15 +342,15 @@ retry:
domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
domnode->f_ksid = searchnode.f_ksid;
- retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
+ retidx = domnode->f_idx = avl_numnodes(&zsb->z_fuid_idx) + 1;
- avl_add(&zfsvfs->z_fuid_domain, domnode);
- avl_add(&zfsvfs->z_fuid_idx, domnode);
- zfsvfs->z_fuid_dirty = B_TRUE;
- rw_exit(&zfsvfs->z_fuid_lock);
+ avl_add(&zsb->z_fuid_domain, domnode);
+ avl_add(&zsb->z_fuid_idx, domnode);
+ zsb->z_fuid_dirty = B_TRUE;
+ rw_exit(&zsb->z_fuid_lock);
return (retidx);
} else {
- rw_exit(&zfsvfs->z_fuid_lock);
+ rw_exit(&zsb->z_fuid_lock);
return (-1);
}
}
@@ -363,23 +362,23 @@ retry:
*
*/
const char *
-zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
+zfs_fuid_find_by_idx(zfs_sb_t *zsb, uint32_t idx)
{
char *domain;
- if (idx == 0 || !zfsvfs->z_use_fuids)
+ if (idx == 0 || !zsb->z_use_fuids)
return (NULL);
- if (!zfsvfs->z_fuid_loaded)
- zfs_fuid_init(zfsvfs);
+ if (!zsb->z_fuid_loaded)
+ zfs_fuid_init(zsb);
- rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
+ rw_enter(&zsb->z_fuid_lock, RW_READER);
- if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
- domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
+ if (zsb->z_fuid_obj || zsb->z_fuid_dirty)
+ domain = zfs_fuid_idx_domain(&zsb->z_fuid_idx, idx);
else
domain = nulldomain;
- rw_exit(&zfsvfs->z_fuid_lock);
+ rw_exit(&zsb->z_fuid_lock);
ASSERT(domain);
return (domain);
@@ -388,12 +387,12 @@ zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
void
zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
{
- *uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
- *gidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_gid, cr, ZFS_GROUP);
+ *uidp = zfs_fuid_map_id(ZTOZSB(zp), zp->z_uid, cr, ZFS_OWNER);
+ *gidp = zfs_fuid_map_id(ZTOZSB(zp), zp->z_gid, cr, ZFS_GROUP);
}
uid_t
-zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
+zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid,
cred_t *cr, zfs_fuid_type_t type)
{
#ifdef HAVE_KSID
@@ -404,7 +403,7 @@ zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
if (index == 0)
return (fuid);
- domain = zfs_fuid_find_by_idx(zfsvfs, index);
+ domain = zfs_fuid_find_by_idx(zsb, index);
ASSERT(domain != NULL);
if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
@@ -499,13 +498,13 @@ zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
* be used if it exists.
*/
uint64_t
-zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
+zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type,
cred_t *cr, zfs_fuid_info_t **fuidp)
{
uint64_t idx;
ksid_t *ksid;
uint32_t rid;
- char *kdomain;
+ char *kdomain;
const char *domain;
uid_t id;
@@ -513,7 +512,7 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
- if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
+ if (!zsb->z_use_fuids || (ksid == NULL)) {
id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
if (IS_EPHEMERAL(id))
@@ -536,7 +535,7 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
rid = ksid_getrid(ksid);
domain = ksid_getdomain(ksid);
- idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
+ idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE);
zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
@@ -554,10 +553,10 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
*
* During replay operations the domain+rid information is
* found in the zfs_fuid_info_t that the replay code has
- * attached to the zfsvfs of the file system.
+ * attached to the zsb of the file system.
*/
uint64_t
-zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
+zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr,
zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
{
#ifdef HAVE_KSID
@@ -578,11 +577,11 @@ zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
* chmod.
*/
- if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
+ if (!zsb->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
return (id);
- if (zfsvfs->z_replay) {
- fuidp = zfsvfs->z_fuid_replay;
+ if (zsb->z_replay) {
+ fuidp = zsb->z_fuid_replay;
/*
* If we are passed an ephemeral id, but no
@@ -629,9 +628,9 @@ zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
}
}
- idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
+ idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE);
- if (!zfsvfs->z_replay)
+ if (!zsb->z_replay)
zfs_fuid_node_add(fuidpp, kdomain,
rid, idx, id, type);
else if (zfuid != NULL) {
@@ -648,15 +647,15 @@ zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
}
void
-zfs_fuid_destroy(zfsvfs_t *zfsvfs)
+zfs_fuid_destroy(zfs_sb_t *zsb)
{
- rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
- if (!zfsvfs->z_fuid_loaded) {
- rw_exit(&zfsvfs->z_fuid_lock);
+ rw_enter(&zsb->z_fuid_lock, RW_WRITER);
+ if (!zsb->z_fuid_loaded) {
+ rw_exit(&zsb->z_fuid_lock);
return;
}
- zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
- rw_exit(&zfsvfs->z_fuid_lock);
+ zfs_fuid_table_destroy(&zsb->z_fuid_idx, &zsb->z_fuid_domain);
+ rw_exit(&zsb->z_fuid_lock);
}
/*
@@ -710,7 +709,7 @@ zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
* Will use a straight FUID compare when possible.
*/
boolean_t
-zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
+zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr)
{
#ifdef HAVE_KSID
ksid_t *ksid = crgetsid(cr, KSID_GROUP);
@@ -718,7 +717,7 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
uid_t gid;
if (ksid && ksidlist) {
- int i;
+ int i;
ksid_t *ksid_groups;
uint32_t idx = FUID_INDEX(id);
uint32_t rid = FUID_RID(id);
@@ -734,7 +733,7 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
} else {
const char *domain;
- domain = zfs_fuid_find_by_idx(zfsvfs, idx);
+ domain = zfs_fuid_find_by_idx(zsb, idx);
ASSERT(domain != NULL);
if (strcmp(domain,
@@ -752,7 +751,7 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
/*
* Not found in ksidlist, check posix groups
*/
- gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
+ gid = zfs_fuid_map_id(zsb, id, cr, ZFS_GROUP);
return (groupmember(gid, cr));
#else
return (B_TRUE);
@@ -760,17 +759,17 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
}
void
-zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
+zfs_fuid_txhold(zfs_sb_t *zsb, dmu_tx_t *tx)
{
- if (zfsvfs->z_fuid_obj == 0) {
+ if (zsb->z_fuid_obj == 0) {
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
- FUID_SIZE_ESTIMATE(zfsvfs));
+ FUID_SIZE_ESTIMATE(zsb));
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
} else {
- dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
- dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
- FUID_SIZE_ESTIMATE(zfsvfs));
+ dmu_tx_hold_bonus(tx, zsb->z_fuid_obj);
+ dmu_tx_hold_write(tx, zsb->z_fuid_obj, 0,
+ FUID_SIZE_ESTIMATE(zsb));
}
}
#endif