diff options
author | Pawel Jakub Dawidek <[email protected]> | 2022-02-03 14:37:57 -0800 |
---|---|---|
committer | GitHub <[email protected]> | 2022-02-03 14:37:57 -0800 |
commit | 3d244b488155e04ec059e66752d7138aa75e7e48 (patch) | |
tree | ea29349eea084539d30180d5638874e66e17d1f8 /module | |
parent | 63652e154643cfe596fe077c13de0e7be34dd863 (diff) |
Fix clearing set-uid and set-gid bits on a file when replying a write
POSIX requires that set-uid and set-gid bits to be removed when an
unprivileged user writes to a file and ZFS does that during normal
operation.
The problem arrises when the write is stored in the ZIL and replayed.
During replay we have no access to original credentials of the process
doing the write, so zfs_write() will be performed with the root
credentials. When root is doing the write set-uid and set-gid bits
are not removed from the file.
To correct that, log a separate TX_SETATTR entry that removed those bits
on first write to such file.
Idea from: Christian Schwarz
Add test for ZIL replay of setuid/setgid clearing.
Improve various edge cases when clearing setid bits:
- The setid bits can be readded during a single write, so make sure to check
for them on every chunk write.
- Log TX_SETATTR record at most once per transaction group (if the setid bits
are keep coming back).
- Move zfs_log_setattr() outside of zp->z_acl_lock.
Reviewed-by: Dan McDonald <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Co-authored-by: Christian Schwarz <[email protected]>
Signed-off-by: Pawel Jakub Dawidek <[email protected]>
Closes #13027
Diffstat (limited to 'module')
-rw-r--r-- | module/zfs/zfs_vnops.c | 105 |
1 files changed, 81 insertions, 24 deletions
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index b7fdae926..918938d62 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -315,6 +315,62 @@ out: return (error); } +static void +zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr, + uint64_t *clear_setid_bits_txgp, dmu_tx_t *tx) +{ + zilog_t *zilog = zfsvfs->z_log; + const uint64_t uid = KUID_TO_SUID(ZTOUID(zp)); + + ASSERT(clear_setid_bits_txgp != NULL); + ASSERT(tx != NULL); + + /* + * Clear Set-UID/Set-GID bits on successful write if not + * privileged and at least one of the execute bits is set. + * + * It would be nice to do this after all writes have + * been done, but that would still expose the ISUID/ISGID + * to another app after the partial write is committed. + * + * Note: we don't call zfs_fuid_map_id() here because + * user 0 is not an ephemeral uid. + */ + mutex_enter(&zp->z_acl_lock); + if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 && + (zp->z_mode & (S_ISUID | S_ISGID)) != 0 && + secpolicy_vnode_setid_retain(zp, cr, + ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) { + uint64_t newmode; + + zp->z_mode &= ~(S_ISUID | S_ISGID); + newmode = zp->z_mode; + (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), + (void *)&newmode, sizeof (uint64_t), tx); + + mutex_exit(&zp->z_acl_lock); + + /* + * Make sure SUID/SGID bits will be removed when we replay the + * log. If the setid bits are keep coming back, don't log more + * than one TX_SETATTR per transaction group. + */ + if (*clear_setid_bits_txgp != dmu_tx_get_txg(tx)) { + vattr_t va; + + bzero(&va, sizeof (va)); + va.va_mask = AT_MODE; + va.va_nodeid = zp->z_id; + va.va_mode = newmode; + zfs_log_setattr(zilog, tx, TX_SETATTR, zp, &va, AT_MODE, + NULL); + *clear_setid_bits_txgp = dmu_tx_get_txg(tx); + } + } else { + mutex_exit(&zp->z_acl_lock); + } +} + /* * Write the bytes to a file. * @@ -340,6 +396,7 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) { int error = 0, error1; ssize_t start_resid = zfs_uio_resid(uio); + uint64_t clear_setid_bits_txg = 0; /* * Fasttrack empty write @@ -519,6 +576,11 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) } /* + * NB: We must call zfs_clear_setid_bits_if_necessary before + * committing the transaction! + */ + + /* * If rangelock_enter() over-locked we grow the blocksize * and then reduce the lock range. This will only happen * on the first iteration since rangelock_reduce() will @@ -559,6 +621,8 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) zfs_uio_fault_disable(uio, B_FALSE); #ifdef __linux__ if (error == EFAULT) { + zfs_clear_setid_bits_if_necessary(zfsvfs, zp, + cr, &clear_setid_bits_txg, tx); dmu_tx_commit(tx); /* * Account for partial writes before @@ -581,6 +645,8 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) * VFS, which will handle faulting and will retry. */ if (error != 0 && error != EFAULT) { + zfs_clear_setid_bits_if_necessary(zfsvfs, zp, + cr, &clear_setid_bits_txg, tx); dmu_tx_commit(tx); break; } @@ -605,6 +671,13 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) error = dmu_assign_arcbuf_by_dbuf( sa_get_db(zp->z_sa_hdl), woff, abuf, tx); if (error != 0) { + /* + * XXX This might not be necessary if + * dmu_assign_arcbuf_by_dbuf is guaranteed + * to be atomic. + */ + zfs_clear_setid_bits_if_necessary(zfsvfs, zp, + cr, &clear_setid_bits_txg, tx); dmu_return_arcbuf(abuf); dmu_tx_commit(tx); break; @@ -630,30 +703,8 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) break; } - /* - * Clear Set-UID/Set-GID bits on successful write if not - * privileged and at least one of the execute bits is set. - * - * It would be nice to do this after all writes have - * been done, but that would still expose the ISUID/ISGID - * to another app after the partial write is committed. - * - * Note: we don't call zfs_fuid_map_id() here because - * user 0 is not an ephemeral uid. - */ - mutex_enter(&zp->z_acl_lock); - if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | - (S_IXUSR >> 6))) != 0 && - (zp->z_mode & (S_ISUID | S_ISGID)) != 0 && - secpolicy_vnode_setid_retain(zp, cr, - ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) { - uint64_t newmode; - zp->z_mode &= ~(S_ISUID | S_ISGID); - newmode = zp->z_mode; - (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), - (void *)&newmode, sizeof (uint64_t), tx); - } - mutex_exit(&zp->z_acl_lock); + zfs_clear_setid_bits_if_necessary(zfsvfs, zp, cr, + &clear_setid_bits_txg, tx); zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime); @@ -679,8 +730,14 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) /* Avoid clobbering EFAULT. */ error = error1; + /* + * NB: During replay, the TX_SETATTR record logged by + * zfs_clear_setid_bits_if_necessary must precede any of + * the TX_WRITE records logged here. + */ zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag, NULL, NULL); + dmu_tx_commit(tx); if (error != 0) |