summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorBrian Atkinson <[email protected]>2020-05-20 19:06:09 -0600
committerGitHub <[email protected]>2020-05-20 18:06:09 -0700
commitfb822260b19921985a5312f7306b0ee0e30eb3b0 (patch)
tree1dacef1716b894d163b41948c28295a941c7708c /module
parent501a1511aeaaf8f7b50410ef7e64e06647aa8dfb (diff)
Gang ABD Type
Adding the gang ABD type, which allows for linear and scatter ABDs to be chained together into a single ABD. This can be used to avoid doing memory copies to/from ABDs. An example of this can be found in vdev_queue.c in the vdev_queue_aggregate() function. Reviewed-by: Matthew Ahrens <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Co-authored-by: Brian <[email protected]> Co-authored-by: Mark Maybee <[email protected]> Signed-off-by: Brian Atkinson <[email protected]> Closes #10069
Diffstat (limited to 'module')
-rw-r--r--module/os/freebsd/zfs/abd_os.c62
-rw-r--r--module/os/linux/zfs/abd_os.c194
-rw-r--r--module/os/linux/zfs/vdev_disk.c50
-rw-r--r--module/zfs/abd.c342
-rw-r--r--module/zfs/vdev_queue.c66
5 files changed, 611 insertions, 103 deletions
diff --git a/module/os/freebsd/zfs/abd_os.c b/module/os/freebsd/zfs/abd_os.c
index 6b967bc07..e87981815 100644
--- a/module/os/freebsd/zfs/abd_os.c
+++ b/module/os/freebsd/zfs/abd_os.c
@@ -90,6 +90,15 @@ SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_chunk_size, CTLFLAG_RDTUN,
kmem_cache_t *abd_chunk_cache;
static kstat_t *abd_ksp;
+/*
+ * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are
+ * just a single zero'd sized zfs_abd_chunk_size buffer. This
+ * allows us to conserve memory by only using a single zero buffer
+ * for the scatter chunks.
+ */
+abd_t *abd_zero_scatter = NULL;
+static char *abd_zero_buf = NULL;
+
static void
abd_free_chunk(void *c)
{
@@ -193,6 +202,8 @@ abd_alloc_struct(size_t size)
abd_u.abd_scatter.abd_chunks[chunkcnt]);
abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
ASSERT3P(abd, !=, NULL);
+ list_link_init(&abd->abd_gang_link);
+ mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
ABDSTAT_INCR(abdstat_struct_size, abd_size);
return (abd);
@@ -203,10 +214,53 @@ abd_free_struct(abd_t *abd)
{
size_t chunkcnt = abd_is_linear(abd) ? 0 : abd_scatter_chunkcnt(abd);
int size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]);
+ mutex_destroy(&abd->abd_mtx);
+ ASSERT(!list_link_active(&abd->abd_gang_link));
kmem_free(abd, size);
ABDSTAT_INCR(abdstat_struct_size, -size);
}
+/*
+ * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where
+ * each chunk in the scatterlist will be set to abd_zero_buf.
+ */
+static void
+abd_alloc_zero_scatter(void)
+{
+ size_t n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
+ abd_zero_buf = kmem_zalloc(zfs_abd_chunk_size, KM_SLEEP);
+ abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
+
+ abd_zero_scatter->abd_flags = ABD_FLAG_OWNER | ABD_FLAG_ZEROS;
+ abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
+ abd_zero_scatter->abd_parent = NULL;
+ zfs_refcount_create(&abd_zero_scatter->abd_children);
+
+ ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
+ ABD_SCATTER(abd_zero_scatter).abd_chunk_size =
+ zfs_abd_chunk_size;
+
+ for (int i = 0; i < n; i++) {
+ ABD_SCATTER(abd_zero_scatter).abd_chunks[i] =
+ abd_zero_buf;
+ }
+
+ ABDSTAT_BUMP(abdstat_scatter_cnt);
+ ABDSTAT_INCR(abdstat_scatter_data_size, zfs_abd_chunk_size);
+}
+
+static void
+abd_free_zero_scatter(void)
+{
+ zfs_refcount_destroy(&abd_zero_scatter->abd_children);
+ ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
+ ABDSTAT_INCR(abdstat_scatter_data_size, -(int)zfs_abd_chunk_size);
+
+ abd_free_struct(abd_zero_scatter);
+ abd_zero_scatter = NULL;
+ kmem_free(abd_zero_buf, zfs_abd_chunk_size);
+}
+
void
abd_init(void)
{
@@ -219,11 +273,15 @@ abd_init(void)
abd_ksp->ks_data = &abd_stats;
kstat_install(abd_ksp);
}
+
+ abd_alloc_zero_scatter();
}
void
abd_fini(void)
{
+ abd_free_zero_scatter();
+
if (abd_ksp != NULL) {
kstat_delete(abd_ksp);
abd_ksp = NULL;
@@ -271,12 +329,13 @@ abd_alloc_scatter_offset_chunkcnt(size_t chunkcnt)
abd_u.abd_scatter.abd_chunks[chunkcnt]);
abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
ASSERT3P(abd, !=, NULL);
+ list_link_init(&abd->abd_gang_link);
+ mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
ABDSTAT_INCR(abdstat_struct_size, abd_size);
return (abd);
}
-
abd_t *
abd_get_offset_scatter(abd_t *sabd, size_t off)
{
@@ -332,6 +391,7 @@ abd_iter_scatter_chunk_index(struct abd_iter *aiter)
void
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
{
+ ASSERT(!abd_is_gang(abd));
abd_verify(abd);
aiter->iter_abd = abd;
aiter->iter_pos = 0;
diff --git a/module/os/linux/zfs/abd_os.c b/module/os/linux/zfs/abd_os.c
index a8e8f404d..9ad40d69c 100644
--- a/module/os/linux/zfs/abd_os.c
+++ b/module/os/linux/zfs/abd_os.c
@@ -24,7 +24,7 @@
*/
/*
- * See abd.c for an general overview of the arc buffered data (ABD).
+ * See abd.c for a general overview of the arc buffered data (ABD).
*
* Linear buffers act exactly like normal buffers and are always mapped into the
* kernel's virtual memory space, while scattered ABD data chunks are allocated
@@ -48,7 +48,7 @@
*
* If we are not using HIGHMEM, scattered buffers which have only one chunk
* can be treated as linear buffers, because they are contiguous in the
- * kernel's virtual address space. See abd_alloc_chunks() for details.
+ * kernel's virtual address space. See abd_alloc_chunks() for details.
*/
#include <sys/abd_impl.h>
@@ -160,6 +160,13 @@ unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
*/
int zfs_abd_scatter_min_size = 512 * 3;
+/*
+ * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
+ * just a single zero'd page. This allows us to conserve memory by
+ * only using a single zero page for the scatterlist.
+ */
+abd_t *abd_zero_scatter = NULL;
+
static kmem_cache_t *abd_cache = NULL;
static kstat_t *abd_ksp;
@@ -178,6 +185,8 @@ abd_alloc_struct(size_t size)
*/
abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
ASSERT3P(abd, !=, NULL);
+ list_link_init(&abd->abd_gang_link);
+ mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
return (abd);
@@ -186,6 +195,8 @@ abd_alloc_struct(size_t size)
void
abd_free_struct(abd_t *abd)
{
+ mutex_destroy(&abd->abd_mtx);
+ ASSERT(!list_link_active(&abd->abd_gang_link));
kmem_cache_free(abd_cache, abd);
ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
}
@@ -426,14 +437,59 @@ abd_free_chunks(abd_t *abd)
abd_free_sg_table(abd);
}
+/*
+ * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
+ * the scatterlist will be set to ZERO_PAGE(0). ZERO_PAGE(0) returns
+ * a global shared page that is always zero'd out.
+ */
+static void
+abd_alloc_zero_scatter(void)
+{
+ struct scatterlist *sg = NULL;
+ struct sg_table table;
+ gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
+ int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
+ int i = 0;
+
+ while (sg_alloc_table(&table, nr_pages, gfp)) {
+ ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
+ schedule_timeout_interruptible(1);
+ }
+ ASSERT3U(table.nents, ==, nr_pages);
+
+ abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
+ abd_zero_scatter->abd_flags = ABD_FLAG_OWNER;
+ ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
+ ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
+ ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
+ abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
+ abd_zero_scatter->abd_parent = NULL;
+ abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
+ zfs_refcount_create(&abd_zero_scatter->abd_children);
+
+ abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
+ sg_set_page(sg, ZERO_PAGE(0), PAGESIZE, 0);
+ }
+
+ ABDSTAT_BUMP(abdstat_scatter_cnt);
+ ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
+ ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
+}
+
#else /* _KERNEL */
+struct page;
+
+/*
+ * In user space abd_zero_page we will be an allocated zero'd PAGESIZE
+ * buffer, which is assigned to set each of the pages of abd_zero_scatter.
+ */
+static struct page *abd_zero_page = NULL;
+
#ifndef PAGE_SHIFT
#define PAGE_SHIFT (highbit64(PAGESIZE)-1)
#endif
-struct page;
-
#define zfs_kmap_atomic(chunk, km) ((void *)chunk)
#define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0)
#define local_irq_save(flags) do { (void)(flags); } while (0)
@@ -527,6 +583,37 @@ abd_free_chunks(abd_t *abd)
abd_free_sg_table(abd);
}
+static void
+abd_alloc_zero_scatter(void)
+{
+ unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
+ struct scatterlist *sg;
+ int i;
+
+ abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
+ memset(abd_zero_page, 0, PAGESIZE);
+ abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
+ abd_zero_scatter->abd_flags = ABD_FLAG_OWNER;
+ abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
+ ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
+ ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
+ abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
+ abd_zero_scatter->abd_parent = NULL;
+ zfs_refcount_create(&abd_zero_scatter->abd_children);
+ ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages *
+ sizeof (struct scatterlist), KM_SLEEP);
+
+ sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages);
+
+ abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
+ sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
+ }
+
+ ABDSTAT_BUMP(abdstat_scatter_cnt);
+ ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
+ ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
+}
+
#endif /* _KERNEL */
boolean_t
@@ -582,6 +669,22 @@ abd_verify_scatter(abd_t *abd)
}
}
+static void
+abd_free_zero_scatter(void)
+{
+ zfs_refcount_destroy(&abd_zero_scatter->abd_children);
+ ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
+ ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
+ ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
+
+ abd_free_sg_table(abd_zero_scatter);
+ abd_free_struct(abd_zero_scatter);
+ abd_zero_scatter = NULL;
+#if !defined(_KERNEL)
+ umem_free(abd_zero_page, PAGESIZE);
+#endif /* _KERNEL */
+}
+
void
abd_init(void)
{
@@ -602,11 +705,15 @@ abd_init(void)
abd_ksp->ks_data = &abd_stats;
kstat_install(abd_ksp);
}
+
+ abd_alloc_zero_scatter();
}
void
abd_fini(void)
{
+ abd_free_zero_scatter();
+
if (abd_ksp != NULL) {
kstat_delete(abd_ksp);
abd_ksp = NULL;
@@ -692,6 +799,7 @@ abd_get_offset_scatter(abd_t *sabd, size_t off)
void
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
{
+ ASSERT(!abd_is_gang(abd));
abd_verify(abd);
aiter->iter_abd = abd;
aiter->iter_mapaddr = NULL;
@@ -813,6 +921,10 @@ abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
{
unsigned long pos;
+ while (abd_is_gang(abd))
+ abd = abd_gang_get_offset(abd, &off);
+
+ ASSERT(!abd_is_gang(abd));
if (abd_is_linear(abd))
pos = (unsigned long)abd_to_buf(abd) + off;
else
@@ -822,20 +934,88 @@ abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
(pos >> PAGE_SHIFT);
}
+static unsigned int
+bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size)
+{
+ unsigned int offset, size, i;
+ struct page *page;
+
+ offset = offset_in_page(buf_ptr);
+ for (i = 0; i < bio->bi_max_vecs; i++) {
+ size = PAGE_SIZE - offset;
+
+ if (bio_size <= 0)
+ break;
+
+ if (size > bio_size)
+ size = bio_size;
+
+ if (is_vmalloc_addr(buf_ptr))
+ page = vmalloc_to_page(buf_ptr);
+ else
+ page = virt_to_page(buf_ptr);
+
+ /*
+ * Some network related block device uses tcp_sendpage, which
+ * doesn't behave well when using 0-count page, this is a
+ * safety net to catch them.
+ */
+ ASSERT3S(page_count(page), >, 0);
+
+ if (bio_add_page(bio, page, size, offset) != size)
+ break;
+
+ buf_ptr += size;
+ bio_size -= size;
+ offset = 0;
+ }
+
+ return (bio_size);
+}
+
/*
- * bio_map for scatter ABD.
+ * bio_map for gang ABD.
+ */
+static unsigned int
+abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
+ unsigned int io_size, size_t off)
+{
+ ASSERT(abd_is_gang(abd));
+
+ for (abd_t *cabd = abd_gang_get_offset(abd, &off);
+ cabd != NULL;
+ cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
+ ASSERT3U(off, <, cabd->abd_size);
+ int size = MIN(io_size, cabd->abd_size - off);
+ int remainder = abd_bio_map_off(bio, cabd, size, off);
+ io_size -= (size - remainder);
+ if (io_size == 0 || remainder > 0)
+ return (io_size);
+ off = 0;
+ }
+ ASSERT0(io_size);
+ return (io_size);
+}
+
+/*
+ * bio_map for ABD.
* @off is the offset in @abd
* Remaining IO size is returned
*/
unsigned int
-abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
+abd_bio_map_off(struct bio *bio, abd_t *abd,
unsigned int io_size, size_t off)
{
int i;
struct abd_iter aiter;
- ASSERT(!abd_is_linear(abd));
ASSERT3U(io_size, <=, abd->abd_size - off);
+ if (abd_is_linear(abd))
+ return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
+
+ ASSERT(!abd_is_linear(abd));
+ if (abd_is_gang(abd))
+ return (abd_gang_bio_map_off(bio, abd, io_size, off));
abd_iter_init(&aiter, abd);
abd_iter_advance(&aiter, off);
diff --git a/module/os/linux/zfs/vdev_disk.c b/module/os/linux/zfs/vdev_disk.c
index 66e408c6c..b514df3bc 100644
--- a/module/os/linux/zfs/vdev_disk.c
+++ b/module/os/linux/zfs/vdev_disk.c
@@ -396,54 +396,6 @@ BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error)
rc = vdev_disk_dio_put(dr);
}
-static unsigned int
-bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
-{
- unsigned int offset, size, i;
- struct page *page;
-
- offset = offset_in_page(bio_ptr);
- for (i = 0; i < bio->bi_max_vecs; i++) {
- size = PAGE_SIZE - offset;
-
- if (bio_size <= 0)
- break;
-
- if (size > bio_size)
- size = bio_size;
-
- if (is_vmalloc_addr(bio_ptr))
- page = vmalloc_to_page(bio_ptr);
- else
- page = virt_to_page(bio_ptr);
-
- /*
- * Some network related block device uses tcp_sendpage, which
- * doesn't behave well when using 0-count page, this is a
- * safety net to catch them.
- */
- ASSERT3S(page_count(page), >, 0);
-
- if (bio_add_page(bio, page, size, offset) != size)
- break;
-
- bio_ptr += size;
- bio_size -= size;
- offset = 0;
- }
-
- return (bio_size);
-}
-
-static unsigned int
-bio_map_abd_off(struct bio *bio, abd_t *abd, unsigned int size, size_t off)
-{
- if (abd_is_linear(abd))
- return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, size));
-
- return (abd_scatter_bio_map_off(bio, abd, size, off));
-}
-
static inline void
vdev_submit_bio_impl(struct bio *bio)
{
@@ -603,7 +555,7 @@ retry:
bio_set_op_attrs(dr->dr_bio[i], rw, flags);
/* Remaining size is returned to become the new size */
- bio_size = bio_map_abd_off(dr->dr_bio[i], zio->io_abd,
+ bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd,
bio_size, abd_offset);
/* Advance in buffer and construct another bio if needed */
diff --git a/module/zfs/abd.c b/module/zfs/abd.c
index abb5d5f2e..a3e58ebc5 100644
--- a/module/zfs/abd.c
+++ b/module/zfs/abd.c
@@ -88,6 +88,10 @@
* function which progressively accesses the whole ABD, use the abd_iterate_*
* functions.
*
+ * As an additional feature, linear and scatter ABD's can be stitched together
+ * by using the gang ABD type (abd_alloc_gang_abd()). This allows for
+ * multiple ABDs to be viewed as a singular ABD.
+ *
* It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
* B_FALSE.
*/
@@ -114,6 +118,13 @@ abd_is_linear_page(abd_t *abd)
B_TRUE : B_FALSE);
}
+boolean_t
+abd_is_gang(abd_t *abd)
+{
+ return ((abd->abd_flags & ABD_FLAG_GANG) != 0 ? B_TRUE :
+ B_FALSE);
+}
+
void
abd_verify(abd_t *abd)
{
@@ -121,11 +132,18 @@ abd_verify(abd_t *abd)
ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
- ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE));
+ ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
+ ABD_FLAG_GANG_FREE | ABD_FLAG_ZEROS));
IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
if (abd_is_linear(abd)) {
ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
+ } else if (abd_is_gang(abd)) {
+ for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
+ cabd != NULL;
+ cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
+ abd_verify(cabd);
+ }
} else {
abd_verify_scatter(abd);
}
@@ -177,6 +195,22 @@ abd_free_scatter(abd_t *abd)
abd_free_struct(abd);
}
+static void
+abd_put_gang_abd(abd_t *abd)
+{
+ ASSERT(abd_is_gang(abd));
+ abd_t *cabd;
+
+ while ((cabd = list_remove_head(&ABD_GANG(abd).abd_gang_chain))
+ != NULL) {
+ ASSERT0(cabd->abd_flags & ABD_FLAG_GANG_FREE);
+ abd->abd_size -= cabd->abd_size;
+ abd_put(cabd);
+ }
+ ASSERT0(abd->abd_size);
+ list_destroy(&ABD_GANG(abd).abd_gang_chain);
+}
+
/*
* Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
* free the underlying scatterlist or buffer.
@@ -195,6 +229,9 @@ abd_put(abd_t *abd)
abd->abd_size, abd);
}
+ if (abd_is_gang(abd))
+ abd_put_gang_abd(abd);
+
zfs_refcount_destroy(&abd->abd_children);
abd_free_struct(abd);
}
@@ -249,9 +286,31 @@ abd_free_linear(abd_t *abd)
abd_free_struct(abd);
}
+static void
+abd_free_gang_abd(abd_t *abd)
+{
+ ASSERT(abd_is_gang(abd));
+ abd_t *cabd;
+
+ while ((cabd = list_remove_head(&ABD_GANG(abd).abd_gang_chain))
+ != NULL) {
+ abd->abd_size -= cabd->abd_size;
+ if (cabd->abd_flags & ABD_FLAG_GANG_FREE) {
+ if (cabd->abd_flags & ABD_FLAG_OWNER)
+ abd_free(cabd);
+ else
+ abd_put(cabd);
+ }
+ }
+ ASSERT0(abd->abd_size);
+ list_destroy(&ABD_GANG(abd).abd_gang_chain);
+ zfs_refcount_destroy(&abd->abd_children);
+ abd_free_struct(abd);
+}
+
/*
- * Free an ABD. Only use this on ABDs allocated with abd_alloc() or
- * abd_alloc_linear().
+ * Free an ABD. Only use this on ABDs allocated with abd_alloc(),
+ * abd_alloc_linear(), or abd_alloc_gang_abd().
*/
void
abd_free(abd_t *abd)
@@ -264,6 +323,8 @@ abd_free(abd_t *abd)
ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
if (abd_is_linear(abd))
abd_free_linear(abd);
+ else if (abd_is_gang(abd))
+ abd_free_gang_abd(abd);
else
abd_free_scatter(abd);
}
@@ -284,6 +345,109 @@ abd_alloc_sametype(abd_t *sabd, size_t size)
}
}
+
+/*
+ * Create gang ABD that will be the head of a list of ABD's. This is used
+ * to "chain" scatter/gather lists together when constructing aggregated
+ * IO's. To free this abd, abd_free() must be called.
+ */
+abd_t *
+abd_alloc_gang_abd(void)
+{
+ abd_t *abd;
+
+ abd = abd_alloc_struct(0);
+ abd->abd_flags = ABD_FLAG_GANG | ABD_FLAG_OWNER;
+ abd->abd_size = 0;
+ abd->abd_parent = NULL;
+ list_create(&ABD_GANG(abd).abd_gang_chain,
+ sizeof (abd_t), offsetof(abd_t, abd_gang_link));
+ zfs_refcount_create(&abd->abd_children);
+ return (abd);
+}
+
+/*
+ * Add a child ABD to a gang ABD's chained list.
+ */
+void
+abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
+{
+ ASSERT(abd_is_gang(pabd));
+ abd_t *child_abd = NULL;
+
+ /*
+ * In order to verify that an ABD is not already part of
+ * another gang ABD, we must lock the child ABD's abd_mtx
+ * to check its abd_gang_link status. We unlock the abd_mtx
+ * only after it is has been added to a gang ABD, which
+ * will update the abd_gang_link's status. See comment below
+ * for how an ABD can be in multiple gang ABD's simultaneously.
+ */
+ mutex_enter(&cabd->abd_mtx);
+ if (list_link_active(&cabd->abd_gang_link)) {
+ /*
+ * If the child ABD is already part of another
+ * gang ABD then we must allocate a new
+ * ABD to use a seperate link. We mark the newly
+ * allocated ABD with ABD_FLAG_GANG_FREE, before
+ * adding it to the gang ABD's list, to make the
+ * gang ABD aware that it is responsible to call
+ * abd_put(). We use abd_get_offset() in order
+ * to just allocate a new ABD but avoid copying the
+ * data over into the newly allocated ABD.
+ *
+ * An ABD may become part of multiple gang ABD's. For
+ * example, when writting ditto bocks, the same ABD
+ * is used to write 2 or 3 locations with 2 or 3
+ * zio_t's. Each of the zio's may be aggregated with
+ * different adjacent zio's. zio aggregation uses gang
+ * zio's, so the single ABD can become part of multiple
+ * gang zio's.
+ *
+ * The ASSERT below is to make sure that if
+ * free_on_free is passed as B_TRUE, the ABD can
+ * not be in mulitple gang ABD's. The gang ABD
+ * can not be responsible for cleaning up the child
+ * ABD memory allocation if the ABD can be in
+ * multiple gang ABD's at one time.
+ */
+ ASSERT3B(free_on_free, ==, B_FALSE);
+ child_abd = abd_get_offset(cabd, 0);
+ child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
+ } else {
+ child_abd = cabd;
+ if (free_on_free)
+ child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
+ }
+ ASSERT3P(child_abd, !=, NULL);
+
+ list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
+ mutex_exit(&cabd->abd_mtx);
+ pabd->abd_size += child_abd->abd_size;
+}
+
+/*
+ * Locate the ABD for the supplied offset in the gang ABD.
+ * Return a new offset relative to the returned ABD.
+ */
+abd_t *
+abd_gang_get_offset(abd_t *abd, size_t *off)
+{
+ abd_t *cabd;
+
+ ASSERT(abd_is_gang(abd));
+ ASSERT3U(*off, <, abd->abd_size);
+ for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
+ cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
+ if (*off >= cabd->abd_size)
+ *off -= cabd->abd_size;
+ else
+ return (cabd);
+ }
+ VERIFY3P(cabd, !=, NULL);
+ return (cabd);
+}
+
/*
* Allocate a new ABD to point to offset off of sabd. It shares the underlying
* buffer data with sabd. Use abd_put() to free. sabd must not be freed while
@@ -308,6 +472,21 @@ abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
abd->abd_flags = ABD_FLAG_LINEAR;
ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
+ } else if (abd_is_gang(sabd)) {
+ size_t left = size;
+ abd = abd_alloc_gang_abd();
+ abd->abd_flags &= ~ABD_FLAG_OWNER;
+ for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
+ cabd != NULL && left > 0;
+ cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
+ int csize = MIN(left, cabd->abd_size - off);
+
+ abd_t *nabd = abd_get_offset_impl(cabd, off, csize);
+ abd_gang_add(abd, nabd, B_FALSE);
+ left -= csize;
+ off = 0;
+ }
+ ASSERT3U(left, ==, 0);
} else {
abd = abd_get_offset_scatter(sabd, off);
}
@@ -335,6 +514,18 @@ abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
}
/*
+ * Return a size scatter ABD. In order to free the returned
+ * ABD abd_put() must be called.
+ */
+abd_t *
+abd_get_zeros(size_t size)
+{
+ ASSERT3P(abd_zero_scatter, !=, NULL);
+ ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
+ return (abd_get_offset_size(abd_zero_scatter, 0, size));
+}
+
+/*
* Allocate a linear ABD structure for buf. You must free this with abd_put()
* since the resulting ABD doesn't own its own buffer.
*/
@@ -477,20 +668,69 @@ abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
abd_update_linear_stats(abd, ABDSTAT_INCR);
}
+/*
+ * Initializes an abd_iter based on whether the abd is a gang ABD
+ * or just a single ABD.
+ */
+static inline abd_t *
+abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
+{
+ abd_t *cabd = NULL;
+
+ if (abd_is_gang(abd)) {
+ cabd = abd_gang_get_offset(abd, &off);
+ if (cabd) {
+ abd_iter_init(aiter, cabd);
+ abd_iter_advance(aiter, off);
+ }
+ } else {
+ abd_iter_init(aiter, abd);
+ abd_iter_advance(aiter, off);
+ }
+ return (cabd);
+}
+
+/*
+ * Advances an abd_iter. We have to be careful with gang ABD as
+ * advancing could mean that we are at the end of a particular ABD and
+ * must grab the ABD in the gang ABD's list.
+ */
+static inline abd_t *
+abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
+ size_t len)
+{
+ abd_iter_advance(aiter, len);
+ if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
+ ASSERT3P(cabd, !=, NULL);
+ cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
+ if (cabd) {
+ abd_iter_init(aiter, cabd);
+ abd_iter_advance(aiter, 0);
+ }
+ }
+ return (cabd);
+}
+
int
abd_iterate_func(abd_t *abd, size_t off, size_t size,
abd_iter_func_t *func, void *private)
{
int ret = 0;
struct abd_iter aiter;
+ boolean_t abd_multi;
+ abd_t *c_abd;
abd_verify(abd);
ASSERT3U(off + size, <=, abd->abd_size);
- abd_iter_init(&aiter, abd);
- abd_iter_advance(&aiter, off);
+ abd_multi = abd_is_gang(abd);
+ c_abd = abd_init_abd_iter(abd, &aiter, off);
while (size > 0) {
+ /* If we are at the end of the gang ABD we are done */
+ if (abd_multi && !c_abd)
+ break;
+
abd_iter_map(&aiter);
size_t len = MIN(aiter.iter_mapsize, size);
@@ -504,7 +744,7 @@ abd_iterate_func(abd_t *abd, size_t off, size_t size,
break;
size -= len;
- abd_iter_advance(&aiter, len);
+ c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
}
return (ret);
@@ -611,6 +851,8 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
{
int ret = 0;
struct abd_iter daiter, saiter;
+ boolean_t dabd_is_gang_abd, sabd_is_gang_abd;
+ abd_t *c_dabd, *c_sabd;
abd_verify(dabd);
abd_verify(sabd);
@@ -618,12 +860,17 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
ASSERT3U(doff + size, <=, dabd->abd_size);
ASSERT3U(soff + size, <=, sabd->abd_size);
- abd_iter_init(&daiter, dabd);
- abd_iter_init(&saiter, sabd);
- abd_iter_advance(&daiter, doff);
- abd_iter_advance(&saiter, soff);
+ dabd_is_gang_abd = abd_is_gang(dabd);
+ sabd_is_gang_abd = abd_is_gang(sabd);
+ c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
+ c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
while (size > 0) {
+ /* if we are at the end of the gang ABD we are done */
+ if ((dabd_is_gang_abd && !c_dabd) ||
+ (sabd_is_gang_abd && !c_sabd))
+ break;
+
abd_iter_map(&daiter);
abd_iter_map(&saiter);
@@ -642,8 +889,10 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
break;
size -= len;
- abd_iter_advance(&daiter, len);
- abd_iter_advance(&saiter, len);
+ c_dabd =
+ abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
+ c_sabd =
+ abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
}
return (ret);
@@ -704,29 +953,46 @@ abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
struct abd_iter daiter = {0};
void *caddrs[3];
unsigned long flags __maybe_unused = 0;
+ abd_t *c_cabds[3];
+ abd_t *c_dabd = NULL;
+ boolean_t cabds_is_gang_abd[3];
+ boolean_t dabd_is_gang_abd = B_FALSE;
ASSERT3U(parity, <=, 3);
- for (i = 0; i < parity; i++)
- abd_iter_init(&caiters[i], cabds[i]);
+ for (i = 0; i < parity; i++) {
+ cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
+ c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0);
+ }
- if (dabd)
- abd_iter_init(&daiter, dabd);
+ if (dabd) {
+ dabd_is_gang_abd = abd_is_gang(dabd);
+ c_dabd = abd_init_abd_iter(dabd, &daiter, 0);
+ }
ASSERT3S(dsize, >=, 0);
abd_enter_critical(flags);
while (csize > 0) {
- len = csize;
-
- if (dabd && dsize > 0)
- abd_iter_map(&daiter);
+ /* if we are at the end of the gang ABD we are done */
+ if (dabd_is_gang_abd && !c_dabd)
+ break;
for (i = 0; i < parity; i++) {
+ /*
+ * If we are at the end of the gang ABD we are
+ * done.
+ */
+ if (cabds_is_gang_abd[i] && !c_cabds[i])
+ break;
abd_iter_map(&caiters[i]);
caddrs[i] = caiters[i].iter_mapaddr;
}
+ len = csize;
+
+ if (dabd && dsize > 0)
+ abd_iter_map(&daiter);
switch (parity) {
case 3:
@@ -761,12 +1027,16 @@ abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&caiters[i]);
- abd_iter_advance(&caiters[i], len);
+ c_cabds[i] =
+ abd_advance_abd_iter(cabds[i], c_cabds[i],
+ &caiters[i], len);
}
if (dabd && dsize > 0) {
abd_iter_unmap(&daiter);
- abd_iter_advance(&daiter, dlen);
+ c_dabd =
+ abd_advance_abd_iter(dabd, c_dabd, &daiter,
+ dlen);
dsize -= dlen;
}
@@ -801,18 +1071,34 @@ abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
struct abd_iter xiters[3];
void *caddrs[3], *xaddrs[3];
unsigned long flags __maybe_unused = 0;
+ boolean_t cabds_is_gang_abd[3];
+ boolean_t tabds_is_gang_abd[3];
+ abd_t *c_cabds[3];
+ abd_t *c_tabds[3];
ASSERT3U(parity, <=, 3);
for (i = 0; i < parity; i++) {
- abd_iter_init(&citers[i], cabds[i]);
- abd_iter_init(&xiters[i], tabds[i]);
+ cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
+ tabds_is_gang_abd[i] = abd_is_gang(tabds[i]);
+ c_cabds[i] =
+ abd_init_abd_iter(cabds[i], &citers[i], 0);
+ c_tabds[i] =
+ abd_init_abd_iter(tabds[i], &xiters[i], 0);
}
abd_enter_critical(flags);
while (tsize > 0) {
for (i = 0; i < parity; i++) {
+ /*
+ * If we are at the end of the gang ABD we
+ * are done.
+ */
+ if (cabds_is_gang_abd[i] && !c_cabds[i])
+ break;
+ if (tabds_is_gang_abd[i] && !c_tabds[i])
+ break;
abd_iter_map(&citers[i]);
abd_iter_map(&xiters[i]);
caddrs[i] = citers[i].iter_mapaddr;
@@ -846,8 +1132,12 @@ abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&xiters[i]);
abd_iter_unmap(&citers[i]);
- abd_iter_advance(&xiters[i], len);
- abd_iter_advance(&citers[i], len);
+ c_tabds[i] =
+ abd_advance_abd_iter(tabds[i], c_tabds[i],
+ &xiters[i], len);
+ c_cabds[i] =
+ abd_advance_abd_iter(cabds[i], c_cabds[i],
+ &citers[i], len);
}
tsize -= len;
diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c
index e156e2b01..b61ba39d7 100644
--- a/module/zfs/vdev_queue.c
+++ b/module/zfs/vdev_queue.c
@@ -535,15 +535,6 @@ vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
static void
vdev_queue_agg_io_done(zio_t *aio)
{
- if (aio->io_type == ZIO_TYPE_READ) {
- zio_t *pio;
- zio_link_t *zl = NULL;
- while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
- abd_copy_off(pio->io_abd, aio->io_abd,
- 0, pio->io_offset - aio->io_offset, pio->io_size);
- }
- }
-
abd_free(aio->io_abd);
}
@@ -556,6 +547,14 @@ vdev_queue_agg_io_done(zio_t *aio)
#define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
#define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
+/*
+ * Sufficiently adjacent io_offset's in ZIOs will be aggregated. We do this
+ * by creating a gang ABD from the adjacent ZIOs io_abd's. By using
+ * a gang ABD we avoid doing memory copies to and from the parent,
+ * child ZIOs. The gang ABD also accounts for gaps between adjacent
+ * io_offsets by simply getting the zero ABD for writes or allocating
+ * a new ABD for reads and placing them in the gang ABD as well.
+ */
static zio_t *
vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
{
@@ -568,6 +567,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
boolean_t stretch = B_FALSE;
avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
+ uint64_t next_offset;
abd_t *abd;
maxblocksize = spa_maxblocksize(vq->vq_vdev->vdev_spa);
@@ -695,7 +695,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
size = IO_SPAN(first, last);
ASSERT3U(size, <=, maxblocksize);
- abd = abd_alloc_for_io(size, B_TRUE);
+ abd = abd_alloc_gang_abd();
if (abd == NULL)
return (NULL);
@@ -706,32 +706,58 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
aio->io_timestamp = first->io_timestamp;
nio = first;
+ next_offset = first->io_offset;
do {
dio = nio;
nio = AVL_NEXT(t, dio);
zio_add_child(dio, aio);
vdev_queue_io_remove(vq, dio);
+
+ if (dio->io_offset != next_offset) {
+ /* allocate a buffer for a read gap */
+ ASSERT3U(dio->io_type, ==, ZIO_TYPE_READ);
+ ASSERT3U(dio->io_offset, >, next_offset);
+ abd = abd_alloc_for_io(
+ dio->io_offset - next_offset, B_TRUE);
+ abd_gang_add(aio->io_abd, abd, B_TRUE);
+ }
+ if (dio->io_abd &&
+ (dio->io_size != abd_get_size(dio->io_abd))) {
+ /* abd size not the same as IO size */
+ ASSERT3U(abd_get_size(dio->io_abd), >, dio->io_size);
+ abd = abd_get_offset_size(dio->io_abd, 0, dio->io_size);
+ abd_gang_add(aio->io_abd, abd, B_TRUE);
+ } else {
+ if (dio->io_flags & ZIO_FLAG_NODATA) {
+ /* allocate a buffer for a write gap */
+ ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
+ ASSERT3P(dio->io_abd, ==, NULL);
+ abd_gang_add(aio->io_abd,
+ abd_get_zeros(dio->io_size), B_TRUE);
+ } else {
+ /*
+ * We pass B_FALSE to abd_gang_add()
+ * because we did not allocate a new
+ * ABD, so it is assumed the caller
+ * will free this ABD.
+ */
+ abd_gang_add(aio->io_abd, dio->io_abd,
+ B_FALSE);
+ }
+ }
+ next_offset = dio->io_offset + dio->io_size;
} while (dio != last);
+ ASSERT3U(abd_get_size(aio->io_abd), ==, aio->io_size);
/*
* We need to drop the vdev queue's lock during zio_execute() to
* avoid a deadlock that we could encounter due to lock order
* reversal between vq_lock and io_lock in zio_change_priority().
- * Use the dropped lock to do memory copy without congestion.
*/
mutex_exit(&vq->vq_lock);
while ((dio = zio_walk_parents(aio, &zl)) != NULL) {
ASSERT3U(dio->io_type, ==, aio->io_type);
- if (dio->io_flags & ZIO_FLAG_NODATA) {
- ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
- abd_zero_off(aio->io_abd,
- dio->io_offset - aio->io_offset, dio->io_size);
- } else if (dio->io_type == ZIO_TYPE_WRITE) {
- abd_copy_off(aio->io_abd, dio->io_abd,
- dio->io_offset - aio->io_offset, 0, dio->io_size);
- }
-
zio_vdev_io_bypass(dio);
zio_execute(dio);
}