summaryrefslogtreecommitdiffstats
path: root/module/zfs/zio.c
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2012-02-23 15:32:51 -0800
committerBrian Behlendorf <[email protected]>2012-03-07 16:06:23 -0800
commit2c6d0b1e07b0265f0661ed7851d3aa8d3e75e7a9 (patch)
tree41827e1429cda5f2467ad75037f5914e1ca35f1a /module/zfs/zio.c
parent76c2b24c613ddac896ee67250b467fd40d6a5689 (diff)
Add zio constructor/destructor
Add a standard zio constructor and destructor. Normally, this is done to reduce to cost of allocating a new structure by reducing expensive operations such as memory allocations. However, in this case none of the operations moved out of zio_create() were really very expensive. This change was principly made as a debug patch (and workaround) for a zio_destroy() race. The is good evidence that zio_create() is reinitializing a mutex which is really still in use by another thread. This would completely explain the observed symptoms in the issue report. This patch doesn't fix the root cause of the race, but it should make it less likely by only initializing the mutex once in the constructor. Also, this particular flaw might have gone unnoticed in other zfs implementations due to the specific implementation details of Linux ticket spinlocks. Once the real root cause is determined and resolved this change can be safely reverted. Until then this should help workaround the issue. Signed-off-by: Brian Behlendorf <[email protected]> Issue #496
Diffstat (limited to 'module/zfs/zio.c')
-rw-r--r--module/zfs/zio.c77
1 files changed, 62 insertions, 15 deletions
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index c96442d0b..69a661201 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -99,6 +99,35 @@ int zio_buf_debug_limit = 0;
static inline void __zio_execute(zio_t *zio);
+static int
+zio_cons(void *arg, void *unused, int kmflag)
+{
+ zio_t *zio = arg;
+
+ bzero(zio, sizeof (zio_t));
+
+ mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
+
+ list_create(&zio->io_parent_list, sizeof (zio_link_t),
+ offsetof(zio_link_t, zl_parent_node));
+ list_create(&zio->io_child_list, sizeof (zio_link_t),
+ offsetof(zio_link_t, zl_child_node));
+
+ return (0);
+}
+
+static void
+zio_dest(void *arg, void *unused)
+{
+ zio_t *zio = arg;
+
+ mutex_destroy(&zio->io_lock);
+ cv_destroy(&zio->io_cv);
+ list_destroy(&zio->io_parent_list);
+ list_destroy(&zio->io_child_list);
+}
+
void
zio_init(void)
{
@@ -108,8 +137,8 @@ zio_init(void)
#ifdef _KERNEL
data_alloc_arena = zio_alloc_arena;
#endif
- zio_cache = kmem_cache_create("zio_cache",
- sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM);
+ zio_cache = kmem_cache_create("zio_cache", sizeof (zio_t), 0,
+ zio_cons, zio_dest, NULL, NULL, NULL, KMC_KMEM);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM);
@@ -511,15 +540,6 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
ASSERT(vd || stage == ZIO_STAGE_OPEN);
zio = kmem_cache_alloc(zio_cache, KM_PUSHPAGE);
- bzero(zio, sizeof (zio_t));
-
- mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL);
- cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
-
- list_create(&zio->io_parent_list, sizeof (zio_link_t),
- offsetof(zio_link_t, zl_parent_node));
- list_create(&zio->io_child_list, sizeof (zio_link_t),
- offsetof(zio_link_t, zl_child_node));
if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV;
@@ -541,21 +561,52 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio->io_logical = zio;
if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
pipeline |= ZIO_GANG_STAGES;
+ } else {
+ zio->io_logical = NULL;
+ zio->io_bp = NULL;
+ bzero(&zio->io_bp_copy, sizeof (blkptr_t));
+ bzero(&zio->io_bp_orig, sizeof (blkptr_t));
}
zio->io_spa = spa;
zio->io_txg = txg;
+ zio->io_ready = NULL;
zio->io_done = done;
zio->io_private = private;
+ zio->io_prev_space_delta = 0;
zio->io_type = type;
zio->io_priority = priority;
zio->io_vd = vd;
+ zio->io_vsd = NULL;
+ zio->io_vsd_ops = NULL;
zio->io_offset = offset;
+ zio->io_deadline = 0;
zio->io_orig_data = zio->io_data = data;
zio->io_orig_size = zio->io_size = size;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_orig_stage = zio->io_stage = stage;
zio->io_orig_pipeline = zio->io_pipeline = pipeline;
+ bzero(&zio->io_prop, sizeof (zio_prop_t));
+ zio->io_cmd = 0;
+ zio->io_reexecute = 0;
+ zio->io_bp_override = NULL;
+ zio->io_walk_link = NULL;
+ zio->io_transform_stack = NULL;
+ zio->io_delay = 0;
+ zio->io_error = 0;
+ zio->io_child_count = 0;
+ zio->io_parent_count = 0;
+ zio->io_stall = NULL;
+ zio->io_gang_leader = NULL;
+ zio->io_gang_tree = NULL;
+ zio->io_executor = NULL;
+ zio->io_waiter = NULL;
+ zio->io_cksum_report = NULL;
+ zio->io_ena = 0;
+ bzero(zio->io_child_error, sizeof (int) * ZIO_CHILD_TYPES);
+ bzero(zio->io_children,
+ sizeof (uint64_t) * ZIO_CHILD_TYPES * ZIO_WAIT_TYPES);
+ bzero(&zio->io_bookmark, sizeof (zbookmark_t));
zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
@@ -579,10 +630,6 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
static void
zio_destroy(zio_t *zio)
{
- list_destroy(&zio->io_parent_list);
- list_destroy(&zio->io_child_list);
- mutex_destroy(&zio->io_lock);
- cv_destroy(&zio->io_cv);
kmem_cache_free(zio_cache, zio);
}