aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/spa.c
diff options
context:
space:
mode:
authorAdam Leventhal <[email protected]>2013-05-06 12:24:30 -0700
committerBrian Behlendorf <[email protected]>2013-05-06 14:05:37 -0700
commit7ef5e54e2e28884a04dc800657967b891239e933 (patch)
treed7681a4dfc1ad3bfb40dd99767da2b89745d3935 /module/zfs/spa.c
parent55d85d5a8c45c4559a4a0e675c37b0c3afb19c2f (diff)
Illumos #3581 spa_zio_taskq[ZIO_TYPE_FREE][ZIO_TASKQ_ISSUE]->tq_lock contention
3581 spa_zio_taskq[ZIO_TYPE_FREE][ZIO_TASKQ_ISSUE]->tq_lock is piping hot Reviewed by: Matthew Ahrens <[email protected]> Reviewed by: George Wilson <[email protected]> Reviewed by: Christopher Siden <[email protected]> Reviewed by: Gordon Ross <[email protected]> Approved by: Richard Lowe <[email protected]> References: illumos/illumos-gate@ec94d32 https://illumos.org/issues/3581 Notes for Linux port: Earlier commit 08d08eb reduced contention on this taskq lock by simply reducing the number of z_fr_iss threads from 100 to one-per-CPU. We also optimized the taskq implementation in zfsonlinux/spl@3c6ed54. These changes significantly improved unlink performance to acceptable levels. This patch further reduces time spent spinning on this lock by randomly dispatching the work items over multiple independent task queues. The Illumos ZFS developers stated that this lock contention only arose after "3329 spa_sync() spends 10-20% of its time in spa_free_sync_cb()" was landed. It's not clear if 3329 affects the Linux port or not. I didn't see spa_free_sync_cb() show up in oprofile sessions while unlinking large files, but I may just not have used the right test case. I tested unlinking a 1 TB of data with and without the patch and didn't observe a meaningful difference in elapsed time. However, oprofile showed that the percent time spent in taskq_thread() was reduced from about 16% to about 5%. Aside from a possible slight performance benefit this may be worth landing if only for the sake of maintaining consistency with upstream. Ported-by: Ned Bass <[email protected]> Closes #1327
Diffstat (limited to 'module/zfs/spa.c')
-rw-r--r--module/zfs/spa.c203
1 files changed, 140 insertions, 63 deletions
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index 22fa7878c..e022c3258 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -78,23 +78,25 @@
#include "zfs_comutil.h"
typedef enum zti_modes {
- zti_mode_fixed, /* value is # of threads (min 1) */
- zti_mode_online_percent, /* value is % of online CPUs */
- zti_mode_batch, /* cpu-intensive; value is ignored */
- zti_mode_null, /* don't create a taskq */
- zti_nmodes
+ ZTI_MODE_FIXED, /* value is # of threads (min 1) */
+ ZTI_MODE_ONLINE_PERCENT, /* value is % of online CPUs */
+ ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
+ ZTI_MODE_NULL, /* don't create a taskq */
+ ZTI_NMODES
} zti_modes_t;
-#define ZTI_FIX(n) { zti_mode_fixed, (n) }
-#define ZTI_PCT(n) { zti_mode_online_percent, (n) }
-#define ZTI_BATCH { zti_mode_batch, 0 }
-#define ZTI_NULL { zti_mode_null, 0 }
+#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
+#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
+#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
+#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
-#define ZTI_ONE ZTI_FIX(1)
+#define ZTI_N(n) ZTI_P(n, 1)
+#define ZTI_ONE ZTI_N(1)
typedef struct zio_taskq_info {
- enum zti_modes zti_mode;
+ zti_modes_t zti_mode;
uint_t zti_value;
+ uint_t zti_count;
} zio_taskq_info_t;
static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
@@ -102,17 +104,30 @@ static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
};
/*
- * Define the taskq threads for the following I/O types:
- * NULL, READ, WRITE, FREE, CLAIM, and IOCTL
+ * This table defines the taskq settings for each ZFS I/O type. When
+ * initializing a pool, we use this table to create an appropriately sized
+ * taskq. Some operations are low volume and therefore have a small, static
+ * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
+ * macros. Other operations process a large amount of data; the ZTI_BATCH
+ * macro causes us to create a taskq oriented for throughput. Some operations
+ * are so high frequency and short-lived that the taskq itself can become a a
+ * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
+ * additional degree of parallelism specified by the number of threads per-
+ * taskq and the number of taskqs; when dispatching an event in this case, the
+ * particular taskq is chosen at random.
+ *
+ * The different taskq priorities are to handle the different contexts (issue
+ * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
+ * need to be handled with minimum delay.
*/
const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
- { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
- { ZTI_FIX(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL },
- { ZTI_BATCH, ZTI_FIX(5), ZTI_FIX(16), ZTI_FIX(5) },
- { ZTI_FIX(8), ZTI_NULL, ZTI_ONE, ZTI_NULL },
- { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
- { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
+ { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
+ { ZTI_N(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */
+ { ZTI_BATCH, ZTI_N(5), ZTI_N(16), ZTI_N(5) }, /* WRITE */
+ { ZTI_P(4, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
+ { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
+ { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
};
static dsl_syncfunc_t spa_sync_version;
@@ -794,48 +809,121 @@ spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
offsetof(spa_error_entry_t, se_avl));
}
-static taskq_t *
-spa_taskq_create(spa_t *spa, const char *name, enum zti_modes mode,
- uint_t value)
+static void
+spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
- uint_t flags = TASKQ_PREPOPULATE;
+ const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
+ enum zti_modes mode = ztip->zti_mode;
+ uint_t value = ztip->zti_value;
+ uint_t count = ztip->zti_count;
+ spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
+ char name[32];
+ uint_t i, flags = 0;
boolean_t batch = B_FALSE;
- switch (mode) {
- case zti_mode_null:
- return (NULL); /* no taskq needed */
+ if (mode == ZTI_MODE_NULL) {
+ tqs->stqs_count = 0;
+ tqs->stqs_taskq = NULL;
+ return;
+ }
- case zti_mode_fixed:
- ASSERT3U(value, >=, 1);
- value = MAX(value, 1);
- break;
+ ASSERT3U(count, >, 0);
- case zti_mode_batch:
- batch = B_TRUE;
- flags |= TASKQ_THREADS_CPU_PCT;
- value = zio_taskq_batch_pct;
- break;
+ tqs->stqs_count = count;
+ tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
- case zti_mode_online_percent:
- flags |= TASKQ_THREADS_CPU_PCT;
- break;
+ for (i = 0; i < count; i++) {
+ taskq_t *tq;
- default:
- panic("unrecognized mode for %s taskq (%u:%u) in "
- "spa_activate()",
- name, mode, value);
- break;
+ switch (mode) {
+ case ZTI_MODE_FIXED:
+ ASSERT3U(value, >=, 1);
+ value = MAX(value, 1);
+ break;
+
+ case ZTI_MODE_BATCH:
+ batch = B_TRUE;
+ flags |= TASKQ_THREADS_CPU_PCT;
+ value = zio_taskq_batch_pct;
+ break;
+
+ case ZTI_MODE_ONLINE_PERCENT:
+ flags |= TASKQ_THREADS_CPU_PCT;
+ break;
+
+ default:
+ panic("unrecognized mode for %s_%s taskq (%u:%u) in "
+ "spa_activate()",
+ zio_type_name[t], zio_taskq_types[q], mode, value);
+ break;
+ }
+
+ if (count > 1) {
+ (void) snprintf(name, sizeof (name), "%s_%s_%u",
+ zio_type_name[t], zio_taskq_types[q], i);
+ } else {
+ (void) snprintf(name, sizeof (name), "%s_%s",
+ zio_type_name[t], zio_taskq_types[q]);
+ }
+
+ if (zio_taskq_sysdc && spa->spa_proc != &p0) {
+ if (batch)
+ flags |= TASKQ_DC_BATCH;
+
+ tq = taskq_create_sysdc(name, value, 50, INT_MAX,
+ spa->spa_proc, zio_taskq_basedc, flags);
+ } else {
+ tq = taskq_create_proc(name, value, maxclsyspri, 50,
+ INT_MAX, spa->spa_proc, flags);
+ }
+
+ tqs->stqs_taskq[i] = tq;
}
+}
+
+static void
+spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
+{
+ spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
+ uint_t i;
- if (zio_taskq_sysdc && spa->spa_proc != &p0) {
- if (batch)
- flags |= TASKQ_DC_BATCH;
+ if (tqs->stqs_taskq == NULL) {
+ ASSERT3U(tqs->stqs_count, ==, 0);
+ return;
+ }
- return (taskq_create_sysdc(name, value, 50, INT_MAX,
- spa->spa_proc, zio_taskq_basedc, flags));
+ for (i = 0; i < tqs->stqs_count; i++) {
+ ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
+ taskq_destroy(tqs->stqs_taskq[i]);
}
- return (taskq_create_proc(name, value, maxclsyspri, 50, INT_MAX,
- spa->spa_proc, flags));
+
+ kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
+ tqs->stqs_taskq = NULL;
+}
+
+/*
+ * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
+ * Note that a type may have multiple discrete taskqs to avoid lock contention
+ * on the taskq itself. In that case we choose which taskq at random by using
+ * the low bits of gethrtime().
+ */
+void
+spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
+ task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
+{
+ spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
+ taskq_t *tq;
+
+ ASSERT3P(tqs->stqs_taskq, !=, NULL);
+ ASSERT3U(tqs->stqs_count, !=, 0);
+
+ if (tqs->stqs_count == 1) {
+ tq = tqs->stqs_taskq[0];
+ } else {
+ tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count];
+ }
+
+ taskq_dispatch_ent(tq, func, arg, flags, ent);
}
static void
@@ -845,16 +933,7 @@ spa_create_zio_taskqs(spa_t *spa)
for (t = 0; t < ZIO_TYPES; t++) {
for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
- const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
- enum zti_modes mode = ztip->zti_mode;
- uint_t value = ztip->zti_value;
- char name[32];
-
- (void) snprintf(name, sizeof (name),
- "%s_%s", zio_type_name[t], zio_taskq_types[q]);
-
- spa->spa_zio_taskq[t][q] =
- spa_taskq_create(spa, name, mode, value);
+ spa_taskqs_init(spa, t, q);
}
}
}
@@ -1017,9 +1096,7 @@ spa_deactivate(spa_t *spa)
for (t = 0; t < ZIO_TYPES; t++) {
for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
- if (spa->spa_zio_taskq[t][q] != NULL)
- taskq_destroy(spa->spa_zio_taskq[t][q]);
- spa->spa_zio_taskq[t][q] = NULL;
+ spa_taskqs_fini(spa, t, q);
}
}