aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChunwei Chen <[email protected]>2014-03-29 19:49:55 +0800
committerBrian Behlendorf <[email protected]>2014-04-10 14:28:43 -0700
commit215b4634c77b7914078baad9260ea854af85ec33 (patch)
treef7c7e3d103efa74725da45e7c184656e68b7392d
parentd4541210f3e07930dfefb6723d21cd8b313bb156 (diff)
Refactor dmu_req_copy for immutable biovec changes
Originally, dmu_req_copy modifies bv_len and bv_offset in bio_vec so that it can continue in subsequent passes. However, after the immutable biovec changes in Linux 3.14, this is not allowed. So instead, we just tell dmu_req_copy how many bytes are already copied and it will skip to the right spot accordingly. Signed-off-by: Chunwei Chen <[email protected]> Signed-off-by: Richard Yao <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #2124
-rw-r--r--module/zfs/dmu.c67
1 files changed, 39 insertions, 28 deletions
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 9e99558a3..81b7a870b 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -1004,48 +1004,53 @@ xuio_stat_wbuf_nocopy()
/*
* Copy up to size bytes between arg_buf and req based on the data direction
- * described by the req. If an entire req's data cannot be transfered the
- * req's is updated such that it's current index and bv offsets correctly
- * reference any residual data which could not be copied. The return value
- * is the number of bytes successfully copied to arg_buf.
+ * described by the req. If an entire req's data cannot be transfered in one
+ * pass, you should pass in @req_offset to indicate where to continue. The
+ * return value is the number of bytes successfully copied to arg_buf.
*/
static int
-dmu_req_copy(void *arg_buf, int size, int *offset, struct request *req)
+dmu_req_copy(void *arg_buf, int size, struct request *req, size_t req_offset)
{
struct bio_vec *bv;
struct req_iterator iter;
char *bv_buf;
- int tocpy;
+ int tocpy, bv_len, bv_offset;
+ int offset = 0;
- *offset = 0;
rq_for_each_segment(bv, req, iter) {
+ /*
+ * Fully consumed the passed arg_buf. We use goto here because
+ * rq_for_each_segment is a double loop
+ */
+ ASSERT3S(offset, <=, size);
+ if (size == offset)
+ goto out;
- /* Fully consumed the passed arg_buf */
- ASSERT3S(*offset, <=, size);
- if (size == *offset)
- break;
-
- /* Skip fully consumed bv's */
- if (bv->bv_len == 0)
+ /* Skip already copied bv */
+ if (req_offset >= bv->bv_len) {
+ req_offset -= bv->bv_len;
continue;
+ }
+
+ bv_len = bv->bv_len - req_offset;
+ bv_offset = bv->bv_offset + req_offset;
+ req_offset = 0;
- tocpy = MIN(bv->bv_len, size - *offset);
+ tocpy = MIN(bv_len, size - offset);
ASSERT3S(tocpy, >=, 0);
- bv_buf = page_address(bv->bv_page) + bv->bv_offset;
+ bv_buf = page_address(bv->bv_page) + bv_offset;
ASSERT3P(bv_buf, !=, NULL);
if (rq_data_dir(req) == WRITE)
- memcpy(arg_buf + *offset, bv_buf, tocpy);
+ memcpy(arg_buf + offset, bv_buf, tocpy);
else
- memcpy(bv_buf, arg_buf + *offset, tocpy);
+ memcpy(bv_buf, arg_buf + offset, tocpy);
- *offset += tocpy;
- bv->bv_offset += tocpy;
- bv->bv_len -= tocpy;
+ offset += tocpy;
}
-
- return (0);
+out:
+ return (offset);
}
static void
@@ -1101,6 +1106,7 @@ dmu_read_req(objset_t *os, uint64_t object, struct request *req)
struct bio *bio_saved = req->bio;
dmu_buf_t **dbp;
int numbufs, i, err;
+ size_t req_offset;
/*
* NB: we could do this block-at-a-time, but it's nice
@@ -1122,6 +1128,7 @@ dmu_read_req(objset_t *os, uint64_t object, struct request *req)
if (err)
goto error;
+ req_offset = 0;
for (i = 0; i < numbufs; i++) {
int tocpy, didcpy, bufoff;
dmu_buf_t *db = dbp[i];
@@ -1133,7 +1140,8 @@ dmu_read_req(objset_t *os, uint64_t object, struct request *req)
if (tocpy == 0)
break;
- err = dmu_req_copy(db->db_data + bufoff, tocpy, &didcpy, req);
+ didcpy = dmu_req_copy(db->db_data + bufoff, tocpy, req,
+ req_offset);
if (didcpy < tocpy)
err = EIO;
@@ -1143,6 +1151,7 @@ dmu_read_req(objset_t *os, uint64_t object, struct request *req)
size -= tocpy;
offset += didcpy;
+ req_offset += didcpy;
err = 0;
}
@@ -1161,9 +1170,8 @@ dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx)
uint64_t offset = blk_rq_pos(req) << 9;
struct bio *bio_saved = req->bio;
dmu_buf_t **dbp;
- int numbufs;
- int err = 0;
- int i;
+ int numbufs, i, err;
+ size_t req_offset;
if (size == 0)
return (0);
@@ -1184,6 +1192,7 @@ dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx)
if (err)
goto error;
+ req_offset = 0;
for (i = 0; i < numbufs; i++) {
int tocpy, didcpy, bufoff;
dmu_buf_t *db = dbp[i];
@@ -1202,7 +1211,8 @@ dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx)
else
dmu_buf_will_dirty(db, tx);
- err = dmu_req_copy(db->db_data + bufoff, tocpy, &didcpy, req);
+ didcpy = dmu_req_copy(db->db_data + bufoff, tocpy, req,
+ req_offset);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
@@ -1215,6 +1225,7 @@ dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx)
size -= tocpy;
offset += didcpy;
+ req_offset += didcpy;
err = 0;
}