diff options
author | Jorgen Lundman <[email protected]> | 2020-06-15 02:09:55 +0900 |
---|---|---|
committer | GitHub <[email protected]> | 2020-06-14 10:09:55 -0700 |
commit | 883a40fff427d200be41d3faabab1dca9a84b353 (patch) | |
tree | ce4521d15b4abcb6712baf0de6a840bfa884fd20 /module | |
parent | 4f73576ea15fcf38b344b008eaf355480a08bbff (diff) |
Add convenience wrappers for common uio usage
The macOS uio struct is opaque and the API must be used, this
makes the smallest changes to the code for all platforms.
Reviewed-by: Matt Macy <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Jorgen Lundman <[email protected]>
Closes #10412
Diffstat (limited to 'module')
-rw-r--r-- | module/icp/algs/modes/modes.c | 30 | ||||
-rw-r--r-- | module/icp/core/kcf_prov_lib.c | 35 | ||||
-rw-r--r-- | module/icp/io/sha1_mod.c | 62 | ||||
-rw-r--r-- | module/icp/io/sha2_mod.c | 64 | ||||
-rw-r--r-- | module/icp/io/skein_mod.c | 42 | ||||
-rw-r--r-- | module/zfs/dmu.c | 14 | ||||
-rw-r--r-- | module/zfs/sa.c | 2 | ||||
-rw-r--r-- | module/zfs/zfs_sa.c | 4 |
8 files changed, 105 insertions, 148 deletions
diff --git a/module/icp/algs/modes/modes.c b/module/icp/algs/modes/modes.c index 1d33c4268..f07876a47 100644 --- a/module/icp/algs/modes/modes.c +++ b/module/icp/algs/modes/modes.c @@ -44,16 +44,13 @@ crypto_init_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset) case CRYPTO_DATA_UIO: { uio_t *uiop = out->cd_uio; - uintptr_t vec_idx; + uint_t vec_idx; offset = out->cd_offset; - for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && - offset >= uiop->uio_iov[vec_idx].iov_len; - offset -= uiop->uio_iov[vec_idx++].iov_len) - ; + offset = uio_index_at_offset(uiop, offset, &vec_idx); *current_offset = offset; - *iov_or_mp = (void *)vec_idx; + *iov_or_mp = (void *)(uintptr_t)vec_idx; break; } } /* end switch */ @@ -89,33 +86,34 @@ crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset, case CRYPTO_DATA_UIO: { uio_t *uio = out->cd_uio; - iovec_t *iov; offset_t offset; - uintptr_t vec_idx; + uint_t vec_idx; uint8_t *p; + uint64_t iov_len; + void *iov_base; offset = *current_offset; vec_idx = (uintptr_t)(*iov_or_mp); - iov = (iovec_t *)&uio->uio_iov[vec_idx]; - p = (uint8_t *)iov->iov_base + offset; + uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); + p = (uint8_t *)iov_base + offset; *out_data_1 = p; - if (offset + amt <= iov->iov_len) { + if (offset + amt <= iov_len) { /* can fit one block into this iov */ *out_data_1_len = amt; *out_data_2 = NULL; *current_offset = offset + amt; } else { /* one block spans two iovecs */ - *out_data_1_len = iov->iov_len - offset; - if (vec_idx == uio->uio_iovcnt) + *out_data_1_len = iov_len - offset; + if (vec_idx == uio_iovcnt(uio)) return; vec_idx++; - iov = (iovec_t *)&uio->uio_iov[vec_idx]; - *out_data_2 = (uint8_t *)iov->iov_base; + uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); + *out_data_2 = (uint8_t *)iov_base; *current_offset = amt - *out_data_1_len; } - *iov_or_mp = (void *)vec_idx; + *iov_or_mp = (void *)(uintptr_t)vec_idx; break; } } /* end switch */ diff --git a/module/icp/core/kcf_prov_lib.c b/module/icp/core/kcf_prov_lib.c index 6a60eb3db..905ef6657 100644 --- a/module/icp/core/kcf_prov_lib.c +++ b/module/icp/core/kcf_prov_lib.c @@ -48,7 +48,7 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, uchar_t *datap; ASSERT(data->cd_format == CRYPTO_DATA_UIO); - if (uiop->uio_segflg != UIO_SYSSPACE) { + if (uio_segflg(uiop) != UIO_SYSSPACE) { return (CRYPTO_ARGUMENTS_BAD); } @@ -56,12 +56,9 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, * Jump to the first iovec containing data to be * processed. */ - for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && - offset >= uiop->uio_iov[vec_idx].iov_len; - offset -= uiop->uio_iov[vec_idx++].iov_len) - ; + offset = uio_index_at_offset(uiop, offset, &vec_idx); - if (vec_idx == uiop->uio_iovcnt && length > 0) { + if (vec_idx == uio_iovcnt(uiop) && length > 0) { /* * The caller specified an offset that is larger than * the total size of the buffers it provided. @@ -69,12 +66,11 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, return (CRYPTO_DATA_LEN_RANGE); } - while (vec_idx < uiop->uio_iovcnt && length > 0) { - cur_len = MIN(uiop->uio_iov[vec_idx].iov_len - + while (vec_idx < uio_iovcnt(uiop) && length > 0) { + cur_len = MIN(uio_iovlen(uiop, vec_idx) - offset, length); - datap = (uchar_t *)(uiop->uio_iov[vec_idx].iov_base + - offset); + datap = (uchar_t *)(uio_iovbase(uiop, vec_idx) + offset); switch (cmd) { case COPY_FROM_DATA: bcopy(datap, buf, cur_len); @@ -101,7 +97,7 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, offset = 0; } - if (vec_idx == uiop->uio_iovcnt && length > 0) { + if (vec_idx == uio_iovcnt(uiop) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed. @@ -182,7 +178,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output, &common_ctx->cc_iv[0]); } - if (input->cd_uio->uio_segflg != UIO_SYSSPACE) { + if (uio_segflg(input->cd_uio) != UIO_SYSSPACE) { return (CRYPTO_ARGUMENTS_BAD); } @@ -190,11 +186,8 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output, * Jump to the first iovec containing data to be * processed. */ - for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && - offset >= uiop->uio_iov[vec_idx].iov_len; - offset -= uiop->uio_iov[vec_idx++].iov_len) - ; - if (vec_idx == uiop->uio_iovcnt && length > 0) { + offset = uio_index_at_offset(uiop, offset, &vec_idx); + if (vec_idx == uio_iovcnt(uiop) && length > 0) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -205,11 +198,11 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output, /* * Now process the iovecs. */ - while (vec_idx < uiop->uio_iovcnt && length > 0) { - cur_len = MIN(uiop->uio_iov[vec_idx].iov_len - + while (vec_idx < uio_iovcnt(uiop) && length > 0) { + cur_len = MIN(uio_iovlen(uiop, vec_idx) - offset, length); - int rv = (cipher)(ctx, uiop->uio_iov[vec_idx].iov_base + offset, + int rv = (cipher)(ctx, uio_iovbase(uiop, vec_idx) + offset, cur_len, output); if (rv != CRYPTO_SUCCESS) { @@ -220,7 +213,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output, offset = 0; } - if (vec_idx == uiop->uio_iovcnt && length > 0) { + if (vec_idx == uio_iovcnt(uiop) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. diff --git a/module/icp/io/sha1_mod.c b/module/icp/io/sha1_mod.c index e7c38542a..d0b23cb78 100644 --- a/module/icp/io/sha1_mod.c +++ b/module/icp/io/sha1_mod.c @@ -270,19 +270,15 @@ sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data) size_t cur_len; /* we support only kernel buffer */ - if (data->cd_uio->uio_segflg != UIO_SYSSPACE) + if (uio_segflg(data->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing data to be * digested. */ - while (vec_idx < data->cd_uio->uio_iovcnt && - offset >= data->cd_uio->uio_iov[vec_idx].iov_len) { - offset -= data->cd_uio->uio_iov[vec_idx].iov_len; - vec_idx++; - } - if (vec_idx == data->cd_uio->uio_iovcnt) { + offset = uio_index_at_offset(data->cd_uio, offset, &vec_idx); + if (vec_idx == uio_iovcnt(data->cd_uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -293,12 +289,12 @@ sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data) /* * Now do the digesting on the iovecs. */ - while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) { - cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len - + while (vec_idx < uio_iovcnt(data->cd_uio) && length > 0) { + cur_len = MIN(uio_iovlen(data->cd_uio, vec_idx) - offset, length); SHA1Update(sha1_ctx, - (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset, + (uint8_t *)uio_iovbase(data->cd_uio, vec_idx) + offset, cur_len); length -= cur_len; @@ -306,7 +302,7 @@ sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data) offset = 0; } - if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) { + if (vec_idx == uio_iovcnt(data->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -333,19 +329,15 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, uint_t vec_idx = 0; /* we support only kernel buffer */ - if (digest->cd_uio->uio_segflg != UIO_SYSSPACE) + if (uio_segflg(digest->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing ptr to the digest to * be returned. */ - while (vec_idx < digest->cd_uio->uio_iovcnt && - offset >= digest->cd_uio->uio_iov[vec_idx].iov_len) { - offset -= digest->cd_uio->uio_iov[vec_idx].iov_len; - vec_idx++; - } - if (vec_idx == digest->cd_uio->uio_iovcnt) { + offset = uio_index_at_offset(digest->cd_uio, offset, &vec_idx); + if (vec_idx == uio_iovcnt(digest->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers @@ -355,7 +347,7 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, } if (offset + digest_len <= - digest->cd_uio->uio_iov[vec_idx].iov_len) { + uio_iovlen(digest->cd_uio, vec_idx)) { /* * The computed SHA1 digest will fit in the current * iovec. @@ -367,12 +359,12 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, * the user only what was requested. */ SHA1Final(digest_scratch, sha1_ctx); - bcopy(digest_scratch, (uchar_t *)digest-> - cd_uio->uio_iov[vec_idx].iov_base + offset, + bcopy(digest_scratch, (uchar_t *)uio_iovbase(digest-> + cd_uio, vec_idx) + offset, digest_len); } else { - SHA1Final((uchar_t *)digest-> - cd_uio->uio_iov[vec_idx].iov_base + offset, + SHA1Final((uchar_t *)uio_iovbase(digest-> + cd_uio, vec_idx) + offset, sha1_ctx); } } else { @@ -389,11 +381,11 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, SHA1Final(digest_tmp, sha1_ctx); - while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) { - cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len - + while (vec_idx < uio_iovcnt(digest->cd_uio) && length > 0) { + cur_len = MIN(uio_iovlen(digest->cd_uio, vec_idx) - offset, length); bcopy(digest_tmp + scratch_offset, - digest->cd_uio->uio_iov[vec_idx].iov_base + offset, + uio_iovbase(digest->cd_uio, vec_idx) + offset, cur_len); length -= cur_len; @@ -402,7 +394,7 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, offset = 0; } - if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) { + if (vec_idx == uio_iovcnt(digest->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -1103,16 +1095,12 @@ sha1_mac_verify_atomic(crypto_provider_handle_t provider, size_t cur_len; /* we support only kernel buffer */ - if (mac->cd_uio->uio_segflg != UIO_SYSSPACE) + if (uio_segflg(mac->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* jump to the first iovec containing the expected digest */ - while (vec_idx < mac->cd_uio->uio_iovcnt && - offset >= mac->cd_uio->uio_iov[vec_idx].iov_len) { - offset -= mac->cd_uio->uio_iov[vec_idx].iov_len; - vec_idx++; - } - if (vec_idx == mac->cd_uio->uio_iovcnt) { + offset = uio_index_at_offset(mac->cd_uio, offset, &vec_idx); + if (vec_idx == uio_iovcnt(mac->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers @@ -1123,12 +1111,12 @@ sha1_mac_verify_atomic(crypto_provider_handle_t provider, } /* do the comparison of computed digest vs specified one */ - while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) { - cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len - + while (vec_idx < uio_iovcnt(mac->cd_uio) && length > 0) { + cur_len = MIN(uio_iovlen(mac->cd_uio, vec_idx) - offset, length); if (bcmp(digest + scratch_offset, - mac->cd_uio->uio_iov[vec_idx].iov_base + offset, + uio_iovbase(mac->cd_uio, vec_idx) + offset, cur_len) != 0) { ret = CRYPTO_INVALID_MAC; break; diff --git a/module/icp/io/sha2_mod.c b/module/icp/io/sha2_mod.c index 3254f5597..a4a5c6041 100644 --- a/module/icp/io/sha2_mod.c +++ b/module/icp/io/sha2_mod.c @@ -296,19 +296,15 @@ sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data) size_t cur_len; /* we support only kernel buffer */ - if (data->cd_uio->uio_segflg != UIO_SYSSPACE) + if (uio_segflg(data->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing data to be * digested. */ - while (vec_idx < data->cd_uio->uio_iovcnt && - offset >= data->cd_uio->uio_iov[vec_idx].iov_len) { - offset -= data->cd_uio->uio_iov[vec_idx].iov_len; - vec_idx++; - } - if (vec_idx == data->cd_uio->uio_iovcnt) { + offset = uio_index_at_offset(data->cd_uio, offset, &vec_idx); + if (vec_idx == uio_iovcnt(data->cd_uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -319,18 +315,18 @@ sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data) /* * Now do the digesting on the iovecs. */ - while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) { - cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len - + while (vec_idx < uio_iovcnt(data->cd_uio) && length > 0) { + cur_len = MIN(uio_iovlen(data->cd_uio, vec_idx) - offset, length); - SHA2Update(sha2_ctx, (uint8_t *)data->cd_uio-> - uio_iov[vec_idx].iov_base + offset, cur_len); + SHA2Update(sha2_ctx, (uint8_t *)uio_iovbase(data->cd_uio, + vec_idx) + offset, cur_len); length -= cur_len; vec_idx++; offset = 0; } - if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) { + if (vec_idx == uio_iovcnt(data->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -357,19 +353,15 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, uint_t vec_idx = 0; /* we support only kernel buffer */ - if (digest->cd_uio->uio_segflg != UIO_SYSSPACE) + if (uio_segflg(digest->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing ptr to the digest to * be returned. */ - while (vec_idx < digest->cd_uio->uio_iovcnt && - offset >= digest->cd_uio->uio_iov[vec_idx].iov_len) { - offset -= digest->cd_uio->uio_iov[vec_idx].iov_len; - vec_idx++; - } - if (vec_idx == digest->cd_uio->uio_iovcnt) { + offset = uio_index_at_offset(digest->cd_uio, offset, &vec_idx); + if (vec_idx == uio_iovcnt(digest->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers @@ -379,7 +371,7 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, } if (offset + digest_len <= - digest->cd_uio->uio_iov[vec_idx].iov_len) { + uio_iovlen(digest->cd_uio, vec_idx)) { /* * The computed SHA2 digest will fit in the current * iovec. @@ -395,12 +387,12 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, */ SHA2Final(digest_scratch, sha2_ctx); - bcopy(digest_scratch, (uchar_t *)digest-> - cd_uio->uio_iov[vec_idx].iov_base + offset, + bcopy(digest_scratch, (uchar_t *)uio_iovbase(digest-> + cd_uio, vec_idx) + offset, digest_len); } else { - SHA2Final((uchar_t *)digest-> - cd_uio->uio_iov[vec_idx].iov_base + offset, + SHA2Final((uchar_t *)uio_iovbase(digest-> + cd_uio, vec_idx) + offset, sha2_ctx); } @@ -418,12 +410,12 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, SHA2Final(digest_tmp, sha2_ctx); - while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) { + while (vec_idx < uio_iovcnt(digest->cd_uio) && length > 0) { cur_len = - MIN(digest->cd_uio->uio_iov[vec_idx].iov_len - + MIN(uio_iovlen(digest->cd_uio, vec_idx) - offset, length); bcopy(digest_tmp + scratch_offset, - digest->cd_uio->uio_iov[vec_idx].iov_base + offset, + uio_iovbase(digest->cd_uio, vec_idx) + offset, cur_len); length -= cur_len; @@ -432,7 +424,7 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, offset = 0; } - if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) { + if (vec_idx == uio_iovcnt(digest->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -1259,16 +1251,12 @@ sha2_mac_verify_atomic(crypto_provider_handle_t provider, size_t cur_len; /* we support only kernel buffer */ - if (mac->cd_uio->uio_segflg != UIO_SYSSPACE) + if (uio_segflg(mac->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* jump to the first iovec containing the expected digest */ - while (vec_idx < mac->cd_uio->uio_iovcnt && - offset >= mac->cd_uio->uio_iov[vec_idx].iov_len) { - offset -= mac->cd_uio->uio_iov[vec_idx].iov_len; - vec_idx++; - } - if (vec_idx == mac->cd_uio->uio_iovcnt) { + offset = uio_index_at_offset(mac->cd_uio, offset, &vec_idx); + if (vec_idx == uio_iovcnt(mac->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers @@ -1279,12 +1267,12 @@ sha2_mac_verify_atomic(crypto_provider_handle_t provider, } /* do the comparison of computed digest vs specified one */ - while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) { - cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len - + while (vec_idx < uio_iovcnt(mac->cd_uio) && length > 0) { + cur_len = MIN(uio_iovlen(mac->cd_uio, vec_idx) - offset, length); if (bcmp(digest + scratch_offset, - mac->cd_uio->uio_iov[vec_idx].iov_base + offset, + uio_iovbase(mac->cd_uio, vec_idx) + offset, cur_len) != 0) { ret = CRYPTO_INVALID_MAC; break; diff --git a/module/icp/io/skein_mod.c b/module/icp/io/skein_mod.c index afd7f5680..621fa6158 100644 --- a/module/icp/io/skein_mod.c +++ b/module/icp/io/skein_mod.c @@ -271,22 +271,18 @@ skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data) size_t length = data->cd_length; uint_t vec_idx = 0; size_t cur_len; - const uio_t *uio = data->cd_uio; + uio_t *uio = data->cd_uio; /* we support only kernel buffer */ - if (uio->uio_segflg != UIO_SYSSPACE) + if (uio_segflg(uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing data to be * digested. */ - while (vec_idx < uio->uio_iovcnt && - offset >= uio->uio_iov[vec_idx].iov_len) { - offset -= uio->uio_iov[vec_idx].iov_len; - vec_idx++; - } - if (vec_idx == uio->uio_iovcnt) { + offset = uio_index_at_offset(uio, offset, &vec_idx); + if (vec_idx == uio_iovcnt(uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -297,16 +293,16 @@ skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data) /* * Now do the digesting on the iovecs. */ - while (vec_idx < uio->uio_iovcnt && length > 0) { - cur_len = MIN(uio->uio_iov[vec_idx].iov_len - offset, length); - SKEIN_OP(ctx, Update, (uint8_t *)uio->uio_iov[vec_idx].iov_base + while (vec_idx < uio_iovcnt(uio) && length > 0) { + cur_len = MIN(uio_iovlen(uio, vec_idx) - offset, length); + SKEIN_OP(ctx, Update, (uint8_t *)uio_iovbase(uio, vec_idx) + offset, cur_len); length -= cur_len; vec_idx++; offset = 0; } - if (vec_idx == uio->uio_iovcnt && length > 0) { + if (vec_idx == uio_iovcnt(uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -330,18 +326,14 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, uio_t *uio = digest->cd_uio; /* we support only kernel buffer */ - if (uio->uio_segflg != UIO_SYSSPACE) + if (uio_segflg(uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing ptr to the digest to be returned. */ - while (vec_idx < uio->uio_iovcnt && - offset >= uio->uio_iov[vec_idx].iov_len) { - offset -= uio->uio_iov[vec_idx].iov_len; - vec_idx++; - } - if (vec_idx == uio->uio_iovcnt) { + offset = uio_index_at_offset(uio, offset, &vec_idx); + if (vec_idx == uio_iovcnt(uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -349,10 +341,10 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, return (CRYPTO_DATA_LEN_RANGE); } if (offset + CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen) <= - uio->uio_iov[vec_idx].iov_len) { + uio_iovlen(uio, vec_idx)) { /* The computed digest will fit in the current iovec. */ SKEIN_OP(ctx, Final, - (uchar_t *)uio->uio_iov[vec_idx].iov_base + offset); + (uchar_t *)uio_iovbase(uio, vec_idx) + offset); } else { uint8_t *digest_tmp; off_t scratch_offset = 0; @@ -364,11 +356,11 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, if (digest_tmp == NULL) return (CRYPTO_HOST_MEMORY); SKEIN_OP(ctx, Final, digest_tmp); - while (vec_idx < uio->uio_iovcnt && length > 0) { - cur_len = MIN(uio->uio_iov[vec_idx].iov_len - offset, + while (vec_idx < uio_iovcnt(uio) && length > 0) { + cur_len = MIN(uio_iovlen(uio, vec_idx) - offset, length); bcopy(digest_tmp + scratch_offset, - uio->uio_iov[vec_idx].iov_base + offset, cur_len); + uio_iovbase(uio, vec_idx) + offset, cur_len); length -= cur_len; vec_idx++; @@ -377,7 +369,7 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, } kmem_free(digest_tmp, CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen)); - if (vec_idx == uio->uio_iovcnt && length > 0) { + if (vec_idx == uio_iovcnt(uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index a21ac8d74..3958a6f97 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1327,7 +1327,7 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) * NB: we could do this block-at-a-time, but it's nice * to be reading in parallel. */ - err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, + err = dmu_buf_hold_array_by_dnode(dn, uio_offset(uio), size, TRUE, FTAG, &numbufs, &dbp, 0); if (err) return (err); @@ -1339,7 +1339,7 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) ASSERT(size > 0); - bufoff = uio->uio_loffset - db->db_offset; + bufoff = uio_offset(uio) - db->db_offset; tocpy = MIN(db->db_size - bufoff, size); #ifdef HAVE_UIO_ZEROCOPY @@ -1348,10 +1348,8 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) arc_buf_t *dbuf_abuf = dbi->db_buf; arc_buf_t *abuf = dbuf_loan_arcbuf(dbi); err = dmu_xuio_add(xuio, abuf, bufoff, tocpy); - if (!err) { - uio->uio_resid -= tocpy; - uio->uio_loffset += tocpy; - } + if (!err) + uio_advance(uio, tocpy); if (abuf == dbuf_abuf) XUIOSTAT_BUMP(xuiostat_rbuf_nocopy); @@ -1436,7 +1434,7 @@ dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) int err = 0; int i; - err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, + err = dmu_buf_hold_array_by_dnode(dn, uio_offset(uio), size, FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); if (err) return (err); @@ -1448,7 +1446,7 @@ dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) ASSERT(size > 0); - bufoff = uio->uio_loffset - db->db_offset; + bufoff = uio_offset(uio) - db->db_offset; tocpy = MIN(db->db_size - bufoff, size); ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); diff --git a/module/zfs/sa.c b/module/zfs/sa.c index 557ab0949..ed531892e 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -1517,7 +1517,7 @@ sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio) mutex_enter(&hdl->sa_lock); if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) { error = uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size, - uio->uio_resid), UIO_READ, uio); + uio_resid(uio)), UIO_READ, uio); } mutex_exit(&hdl->sa_lock); return (error); diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c index e20e92f12..cbb773ffb 100644 --- a/module/zfs/zfs_sa.c +++ b/module/zfs/zfs_sa.c @@ -81,13 +81,13 @@ zfs_sa_readlink(znode_t *zp, uio_t *uio) if (bufsz + ZFS_OLD_ZNODE_PHYS_SIZE <= db->db_size) { error = uiomove((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE, - MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); + MIN((size_t)bufsz, uio_resid(uio)), UIO_READ, uio); } else { dmu_buf_t *dbp; if ((error = dmu_buf_hold(ZTOZSB(zp)->z_os, zp->z_id, 0, FTAG, &dbp, DMU_READ_NO_PREFETCH)) == 0) { error = uiomove(dbp->db_data, - MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); + MIN((size_t)bufsz, uio_resid(uio)), UIO_READ, uio); dmu_buf_rele(dbp, FTAG); } } |