diff options
author | Matthew Ahrens <[email protected]> | 2016-05-12 21:16:36 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2017-06-09 09:43:26 -0700 |
commit | dbeb8796996b4a342f7de2b3eb4ea5b86ac260f9 (patch) | |
tree | 63a722cfe2ae86634005064399b5672e3694eeb8 | |
parent | 1b7c1e5ce90ae27d9bb1f6f3616bf079c168005c (diff) |
OpenZFS 8199 - multi-threaded dmu_object_alloc()
dmu_object_alloc() is single-threaded, so when multiple threads are
creating files in a single filesystem, they spend a lot of time waiting
for the os_obj_lock. To improve performance of multi-threaded file
creation, we must make dmu_object_alloc() typically not grab any
filesystem-wide locks.
The solution is to have a "next object to allocate" for each CPU. Each
of these "next object"s is in a different block of the dnode object, so
that concurrent allocation holds dnodes in different dbufs. When a
thread's "next object" reaches the end of a chunk of objects (by default
4 blocks worth -- 128 dnodes), it will be reset to the per-objset
os_obj_next, which will be increased by a chunk of objects (128). Only
when manipulating the os_obj_next will we need to grab the os_obj_lock.
This decreases lock contention dramatically, because each thread only
needs to grab the os_obj_lock briefly, once per 128 allocations.
This results in a 70% performance improvement to multi-threaded object
creation (where each thread is creating objects in its own directory),
from 67,000/sec to 115,000/sec, with 8 CPUs.
Work sponsored by Intel Corp.
Authored by: Matthew Ahrens <[email protected]>
Reviewed-by: Ned Bass <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Ported-by: Matthew Ahrens <[email protected]>
Signed-off-by: Matthew Ahrens <[email protected]>
OpenZFS-issue: https://www.illumos.org/issues/8199
OpenZFS-commit: https://github.com/openzfs/openzfs/pull/374
Closes #4703
Closes #6117
-rw-r--r-- | include/sys/dmu_objset.h | 6 | ||||
-rw-r--r-- | module/zfs/dmu_object.c | 186 | ||||
-rw-r--r-- | module/zfs/dmu_objset.c | 6 | ||||
-rw-r--r-- | module/zfs/zfs_znode.c | 8 |
4 files changed, 135 insertions, 71 deletions
diff --git a/include/sys/dmu_objset.h b/include/sys/dmu_objset.h index 636b0e249..a836e0372 100644 --- a/include/sys/dmu_objset.h +++ b/include/sys/dmu_objset.h @@ -120,7 +120,11 @@ struct objset { /* Protected by os_obj_lock */ kmutex_t os_obj_lock; - uint64_t os_obj_next; + uint64_t os_obj_next_chunk; + + /* Per-CPU next object to allocate, protected by atomic ops. */ + uint64_t *os_obj_next_percpu; + int os_obj_next_percpu_len; /* Protected by os_lock */ kmutex_t os_lock; diff --git a/module/zfs/dmu_object.c b/module/zfs/dmu_object.c index 8ca699ebf..cb861a196 100644 --- a/module/zfs/dmu_object.c +++ b/module/zfs/dmu_object.c @@ -32,6 +32,15 @@ #include <sys/zfeature.h> #include <sys/dsl_dataset.h> +/* + * Each of the concurrent object allocators will grab + * 2^dmu_object_alloc_chunk_shift dnode slots at a time. The default is to + * grab 128 slots, which is 4 blocks worth. This was experimentally + * determined to be the lowest value that eliminates the measurable effect + * of lock contention from this code path. + */ +int dmu_object_alloc_chunk_shift = 7; + uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) @@ -50,6 +59,9 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize, dnode_t *dn = NULL; int dn_slots = dnodesize >> DNODE_SHIFT; boolean_t restarted = B_FALSE; + uint64_t *cpuobj = &os->os_obj_next_percpu[CPU_SEQID % + os->os_obj_next_percpu_len]; + int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift; if (dn_slots == 0) { dn_slots = DNODE_MIN_SLOTS; @@ -58,54 +70,88 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize, ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS); } - mutex_enter(&os->os_obj_lock); + /* + * The "chunk" of dnodes that is assigned to a CPU-specific + * allocator needs to be at least one block's worth, to avoid + * lock contention on the dbuf. It can be at most one L1 block's + * worth, so that the "rescan after polishing off a L1's worth" + * logic below will be sure to kick in. + */ + if (dnodes_per_chunk < DNODES_PER_BLOCK) + dnodes_per_chunk = DNODES_PER_BLOCK; + if (dnodes_per_chunk > L1_dnode_count) + dnodes_per_chunk = L1_dnode_count; + + object = *cpuobj; for (;;) { - object = os->os_obj_next; /* - * Each time we polish off a L1 bp worth of dnodes (2^12 - * objects), move to another L1 bp that's still - * reasonably sparse (at most 1/4 full). Look from the - * beginning at most once per txg. If we still can't - * allocate from that L1 block, search for an empty L0 - * block, which will quickly skip to the end of the - * metadnode if the no nearby L0 blocks are empty. This - * fallback avoids a pathology where full dnode blocks - * containing large dnodes appear sparse because they - * have a low blk_fill, leading to many failed - * allocation attempts. In the long term a better - * mechanism to search for sparse metadnode regions, - * such as spacemaps, could be implemented. - * - * os_scan_dnodes is set during txg sync if enough objects - * have been freed since the previous rescan to justify - * backfilling again. - * - * Note that dmu_traverse depends on the behavior that we use - * multiple blocks of the dnode object before going back to - * reuse objects. Any change to this algorithm should preserve - * that property or find another solution to the issues - * described in traverse_visitbp. + * If we finished a chunk of dnodes, get a new one from + * the global allocator. */ - if (P2PHASE(object, L1_dnode_count) == 0) { - uint64_t offset; - uint64_t blkfill; - int minlvl; - int error; - if (os->os_rescan_dnodes) { - offset = 0; - os->os_rescan_dnodes = B_FALSE; - } else { - offset = object << DNODE_SHIFT; + if (P2PHASE(object, dnodes_per_chunk) == 0) { + mutex_enter(&os->os_obj_lock); + ASSERT0(P2PHASE(os->os_obj_next_chunk, + dnodes_per_chunk)); + object = os->os_obj_next_chunk; + + /* + * Each time we polish off a L1 bp worth of dnodes + * (2^12 objects), move to another L1 bp that's + * still reasonably sparse (at most 1/4 full). Look + * from the beginning at most once per txg. If we + * still can't allocate from that L1 block, search + * for an empty L0 block, which will quickly skip + * to the end of the metadnode if no nearby L0 + * blocks are empty. This fallback avoids a + * pathology where full dnode blocks containing + * large dnodes appear sparse because they have a + * low blk_fill, leading to many failed allocation + * attempts. In the long term a better mechanism to + * search for sparse metadnode regions, such as + * spacemaps, could be implemented. + * + * os_scan_dnodes is set during txg sync if enough + * objects have been freed since the previous + * rescan to justify backfilling again. + * + * Note that dmu_traverse depends on the behavior + * that we use multiple blocks of the dnode object + * before going back to reuse objects. Any change + * to this algorithm should preserve that property + * or find another solution to the issues described + * in traverse_visitbp. + */ + if (P2PHASE(object, L1_dnode_count) == 0) { + uint64_t offset; + uint64_t blkfill; + int minlvl; + int error; + if (os->os_rescan_dnodes) { + offset = 0; + os->os_rescan_dnodes = B_FALSE; + } else { + offset = object << DNODE_SHIFT; + } + blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2; + minlvl = restarted ? 1 : 2; + restarted = B_TRUE; + error = dnode_next_offset(DMU_META_DNODE(os), + DNODE_FIND_HOLE, &offset, minlvl, + blkfill, 0); + if (error == 0) { + object = offset >> DNODE_SHIFT; + } } - blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2; - minlvl = restarted ? 1 : 2; - restarted = B_TRUE; - error = dnode_next_offset(DMU_META_DNODE(os), - DNODE_FIND_HOLE, &offset, minlvl, blkfill, 0); - if (error == 0) - object = offset >> DNODE_SHIFT; + /* + * Note: if "restarted", we may find a L0 that + * is not suitably aligned. + */ + os->os_obj_next_chunk = + P2ALIGN(object, dnodes_per_chunk) + + dnodes_per_chunk; + (void) atomic_swap_64(cpuobj, object); + mutex_exit(&os->os_obj_lock); } - os->os_obj_next = object + dn_slots; /* * XXX We should check for an i/o error here and return @@ -113,28 +159,38 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize, * dmu_tx_assign(), but there is currently no mechanism * to do so. */ - (void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, dn_slots, - FTAG, &dn); - if (dn) - break; - - if (dmu_object_next(os, &object, B_TRUE, 0) == 0) - os->os_obj_next = object; - else + (void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, + dn_slots, FTAG, &dn); + if (dn != NULL) { + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); /* - * Skip to next known valid starting point for a dnode. + * Another thread could have allocated it; check + * again now that we have the struct lock. */ - os->os_obj_next = P2ROUNDUP(object + 1, - DNODES_PER_BLOCK); - } - - dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, dn_slots, tx); - mutex_exit(&os->os_obj_lock); - - dmu_tx_add_new_object(tx, dn); - dnode_rele(dn, FTAG); + if (dn->dn_type == DMU_OT_NONE) { + dnode_allocate(dn, ot, blocksize, 0, + bonustype, bonuslen, dn_slots, tx); + rw_exit(&dn->dn_struct_rwlock); + dmu_tx_add_new_object(tx, dn); + dnode_rele(dn, FTAG); + + (void) atomic_swap_64(cpuobj, + object + dn_slots); + return (object); + } + rw_exit(&dn->dn_struct_rwlock); + dnode_rele(dn, FTAG); + } - return (object); + if (dmu_object_next(os, &object, B_TRUE, 0) != 0) { + /* + * Skip to next known valid starting point for a + * dnode. + */ + object = P2ROUNDUP(object + 1, DNODES_PER_BLOCK); + } + (void) atomic_swap_64(cpuobj, object); + } } int @@ -341,4 +397,10 @@ EXPORT_SYMBOL(dmu_object_free); EXPORT_SYMBOL(dmu_object_next); EXPORT_SYMBOL(dmu_object_zapify); EXPORT_SYMBOL(dmu_object_free_zapified); + +/* BEGIN CSTYLED */ +module_param(dmu_object_alloc_chunk_shift, int, 0644); +MODULE_PARM_DESC(dmu_object_alloc_chunk_shift, + "CPU-specific allocator grabs 2^N objects at once"); +/* END CSTYLED */ #endif diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index a50f8dcb1..9a7a6968d 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -547,6 +547,9 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); + os->os_obj_next_percpu_len = boot_ncpus; + os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len * + sizeof (os->os_obj_next_percpu[0]), KM_SLEEP); dnode_special_open(os, &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT, &os->os_meta_dnode); @@ -842,6 +845,9 @@ dmu_objset_evict_done(objset_t *os) rw_enter(&os_lock, RW_READER); rw_exit(&os_lock); + kmem_free(os->os_obj_next_percpu, + os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0])); + mutex_destroy(&os->os_lock); mutex_destroy(&os->os_userused_lock); mutex_destroy(&os->os_obj_lock); diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index 522621668..1ec5618e0 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -1780,14 +1780,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx) ASSERT(error == 0); /* - * Give dmu_object_alloc() a hint about where to start - * allocating new objects. Otherwise, since the metadnode's - * dnode_phys_t structure isn't initialized yet, dmu_object_next() - * would fail and we'd have to skip to the next dnode block. - */ - os->os_obj_next = moid + 1; - - /* * Set starting attributes. */ version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os))); |