summaryrefslogtreecommitdiffstats
path: root/cmd/ztest
diff options
context:
space:
mode:
authorTom Caputi <[email protected]>2017-09-28 11:49:13 -0400
committerTom Caputi <[email protected]>2017-10-11 16:55:50 -0400
commit440a3eb939441a42ab5029e5e64498d802fa276b (patch)
tree5b2b958c6d4d96d4f4f9a930b0c291318fad51ff /cmd/ztest
parent4807c0badb130ae70cf6f0887b4be1648f217f1a (diff)
Fixes for #6639
Several issues were uncovered by running stress tests with zfs encryption and raw sends in particular. The issues and their associated fixes are as follows: * arc_read_done() has the ability to chain several requests for the same block of data via the arc_callback_t struct. In these cases, the ARC would only use the first request's dsobj from the bookmark to decrypt the data. This is problematic because the first request might be a prefetch zio which is able to handle the key not being loaded, while the second might use a different key that it is sure will work. The fix here is to pass the dsobj with each individual arc_callback_t so that each request can attempt to decrypt the data separately. * DRR_FREE and DRR_FREEOBJECT records in a send file were not having their transactions properly tagged as raw during raw sends, which caused a panic when the dbuf code attempted to decrypt these blocks. * traverse_prefetch_metadata() did not properly set ZIO_FLAG_SPECULATIVE when issuing prefetch IOs. * Added a few asserts and code cleanups to ensure these issues are more detectable in the future. Signed-off-by: Tom Caputi <[email protected]>
Diffstat (limited to 'cmd/ztest')
-rw-r--r--cmd/ztest/ztest.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
index d397d7279..248d04fc4 100644
--- a/cmd/ztest/ztest.c
+++ b/cmd/ztest/ztest.c
@@ -1958,7 +1958,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
dmu_write(os, lr->lr_foid, offset, length, data, tx);
} else {
bcopy(data, abuf->b_data, length);
- dmu_assign_arcbuf(db, offset, abuf, tx);
+ dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx);
}
(void) ztest_log_write(zd, tx, lr);
@@ -4346,7 +4346,7 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
* bigobj, at the tail of the nth chunk
*
* The chunk size is set equal to bigobj block size so that
- * dmu_assign_arcbuf() can be tested for object updates.
+ * dmu_assign_arcbuf_by_dbuf() can be tested for object updates.
*/
/*
@@ -4408,7 +4408,7 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
/*
* In iteration 5 (i == 5) use arcbufs
* that don't match bigobj blksz to test
- * dmu_assign_arcbuf() when it can't directly
+ * dmu_assign_arcbuf_by_dbuf() when it can't directly
* assign an arcbuf to a dbuf.
*/
for (j = 0; j < s; j++) {
@@ -4454,8 +4454,8 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
/*
* 50% of the time don't read objects in the 1st iteration to
- * test dmu_assign_arcbuf() for the case when there're no
- * existing dbufs for the specified offsets.
+ * test dmu_assign_arcbuf_by_dbuf() for the case when there are
+ * no existing dbufs for the specified offsets.
*/
if (i != 0 || ztest_random(2) != 0) {
error = dmu_read(os, packobj, packoff,
@@ -4500,12 +4500,12 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
}
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
- dmu_assign_arcbuf(bonus_db, off,
+ dmu_assign_arcbuf_by_dbuf(bonus_db, off,
bigbuf_arcbufs[j], tx);
} else {
- dmu_assign_arcbuf(bonus_db, off,
+ dmu_assign_arcbuf_by_dbuf(bonus_db, off,
bigbuf_arcbufs[2 * j], tx);
- dmu_assign_arcbuf(bonus_db,
+ dmu_assign_arcbuf_by_dbuf(bonus_db,
off + chunksize / 2,
bigbuf_arcbufs[2 * j + 1], tx);
}