aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cmd/zdb/zdb.c393
-rw-r--r--cmd/ztest/ztest.c33
-rw-r--r--configure.ac1
-rw-r--r--include/sys/Makefile.am3
-rw-r--r--include/sys/dmu.h1
-rw-r--r--include/sys/fs/zfs.h2
-rw-r--r--include/sys/metaslab.h13
-rw-r--r--include/sys/metaslab_impl.h28
-rw-r--r--include/sys/range_tree.h8
-rw-r--r--include/sys/spa.h5
-rw-r--r--include/sys/spa_impl.h11
-rw-r--r--include/sys/spa_log_spacemap.h79
-rw-r--r--include/sys/space_map.h9
-rw-r--r--include/sys/vdev_impl.h4
-rw-r--r--include/sys/zfs_debug.h3
-rw-r--r--include/zfeature_common.h1
-rw-r--r--lib/libzpool/Makefile.am1
-rw-r--r--man/man5/zfs-module-parameters.5147
-rw-r--r--man/man8/zdb.83
-rw-r--r--module/zcommon/zfeature_common.c13
-rw-r--r--module/zfs/Makefile.in1
-rw-r--r--module/zfs/dmu_objset.c2
-rw-r--r--module/zfs/dsl_pool.c4
-rw-r--r--module/zfs/metaslab.c976
-rw-r--r--module/zfs/range_tree.c84
-rw-r--r--module/zfs/spa.c153
-rw-r--r--module/zfs/spa_log_spacemap.c1308
-rw-r--r--module/zfs/spa_misc.c32
-rw-r--r--module/zfs/space_map.c10
-rw-r--r--module/zfs/txg.c4
-rw-r--r--module/zfs/vdev.c40
-rw-r--r--module/zfs/vdev_indirect.c3
-rw-r--r--module/zfs/vdev_removal.c40
-rw-r--r--module/zfs/zio.c11
-rw-r--r--tests/runfiles/linux.run6
-rw-r--r--tests/zfs-tests/tests/functional/Makefile.am3
-rwxr-xr-xtests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh2
-rw-r--r--tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg1
-rw-r--r--tests/zfs-tests/tests/functional/log_spacemap/Makefile.am2
-rwxr-xr-xtests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh81
-rwxr-xr-xtests/zfs-tests/tests/functional/removal/removal_condense_export.ksh4
41 files changed, 3194 insertions, 331 deletions
diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c
index e0ea07280..8ccbd5e8b 100644
--- a/cmd/zdb/zdb.c
+++ b/cmd/zdb/zdb.c
@@ -813,6 +813,12 @@ get_checkpoint_refcount(vdev_t *vd)
}
static int
+get_log_spacemap_refcount(spa_t *spa)
+{
+ return (avl_numnodes(&spa->spa_sm_logs_by_txg));
+}
+
+static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
@@ -826,6 +832,7 @@ verify_spacemap_refcounts(spa_t *spa)
actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
+ actual_refcount += get_log_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
@@ -924,7 +931,7 @@ dump_spacemap(objset_t *os, space_map_t *sm)
alloc -= entry_run;
entry_id++;
}
- if ((uint64_t)alloc != space_map_allocated(sm)) {
+ if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%lld) INCONSISTENT "
"with space map summary (%lld)\n",
(longlong_t)space_map_allocated(sm), (longlong_t)alloc);
@@ -990,23 +997,45 @@ dump_metaslab(metaslab_t *msp)
ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift));
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
+
+ if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
+ (void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
+ (u_longlong_t)metaslab_unflushed_txg(msp));
+ }
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
- const char *bias_str;
+ const char *bias_str = "";
+ if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
+ bias_str = VDEV_ALLOC_BIAS_LOG;
+ } else if (alloc_bias == VDEV_BIAS_SPECIAL) {
+ bias_str = VDEV_ALLOC_BIAS_SPECIAL;
+ } else if (alloc_bias == VDEV_BIAS_DEDUP) {
+ bias_str = VDEV_ALLOC_BIAS_DEDUP;
+ }
+
+ uint64_t ms_flush_data_obj = 0;
+ if (vd->vdev_top_zap != 0) {
+ int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
+ vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
+ sizeof (uint64_t), 1, &ms_flush_data_obj);
+ if (error != ENOENT) {
+ ASSERT0(error);
+ }
+ }
+
+ (void) printf("\tvdev %10llu %s",
+ (u_longlong_t)vd->vdev_id, bias_str);
- bias_str = (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) ?
- VDEV_ALLOC_BIAS_LOG :
- (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
- (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP :
- vd->vdev_islog ? "log" : "";
+ if (ms_flush_data_obj != 0) {
+ (void) printf(" ms_unflushed_phys object %llu",
+ (u_longlong_t)ms_flush_data_obj);
+ }
- (void) printf("\tvdev %10llu %s\n"
- "\t%-10s%5llu %-19s %-15s %-12s\n",
- (u_longlong_t)vd->vdev_id, bias_str,
+ (void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %12s\n",
@@ -1173,6 +1202,24 @@ dump_metaslabs(spa_t *spa)
}
static void
+dump_log_spacemaps(spa_t *spa)
+{
+ (void) printf("\nLog Space Maps in Pool:\n");
+ for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
+ sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
+ space_map_t *sm = NULL;
+ VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
+ sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
+
+ (void) printf("Log Spacemap object %llu txg %llu\n",
+ (u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
+ dump_spacemap(spa->spa_meta_objset, sm);
+ space_map_close(sm);
+ }
+ (void) printf("\n");
+}
+
+static void
dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
{
const ddt_phys_t *ddp = dde->dde_phys;
@@ -3782,6 +3829,84 @@ static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
+typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme,
+ uint64_t txg, void *arg);
+
+typedef struct unflushed_iter_cb_arg {
+ spa_t *uic_spa;
+ uint64_t uic_txg;
+ void *uic_arg;
+ zdb_log_sm_cb_t uic_cb;
+} unflushed_iter_cb_arg_t;
+
+static int
+iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
+{
+ unflushed_iter_cb_arg_t *uic = arg;
+ return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
+}
+
+static void
+iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
+{
+ if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
+ return;
+
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
+ sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
+ space_map_t *sm = NULL;
+ VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
+ sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
+
+ unflushed_iter_cb_arg_t uic = {
+ .uic_spa = spa,
+ .uic_txg = sls->sls_txg,
+ .uic_arg = arg,
+ .uic_cb = cb
+ };
+
+ VERIFY0(space_map_iterate(sm, space_map_length(sm),
+ iterate_through_spacemap_logs_cb, &uic));
+ space_map_close(sm);
+ }
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+}
+
+/* ARGSUSED */
+static int
+load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
+ uint64_t txg, void *arg)
+{
+ spa_vdev_removal_t *svr = arg;
+
+ uint64_t offset = sme->sme_offset;
+ uint64_t size = sme->sme_run;
+
+ /* skip vdevs we don't care about */
+ if (sme->sme_vdev != svr->svr_vdev_id)
+ return (0);
+
+ vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
+ metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+ ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
+
+ if (txg < metaslab_unflushed_txg(ms))
+ return (0);
+
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ ASSERT(vim != NULL);
+ if (offset >= vdev_indirect_mapping_max_offset(vim))
+ return (0);
+
+ if (sme->sme_type == SM_ALLOC)
+ range_tree_add(svr->svr_allocd_segs, offset, size);
+ else
+ range_tree_remove(svr->svr_allocd_segs, offset, size);
+
+ return (0);
+}
+
/* ARGSUSED */
static void
claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
@@ -3830,36 +3955,35 @@ zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ ASSERT0(range_tree_space(svr->svr_allocd_segs));
+
+ range_tree_t *allocs = range_tree_create(NULL, NULL);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
break;
- ASSERT0(range_tree_space(svr->svr_allocd_segs));
+ ASSERT0(range_tree_space(allocs));
+ if (msp->ms_sm != NULL)
+ VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
+ range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
+ }
+ range_tree_destroy(allocs);
- if (msp->ms_sm != NULL) {
- VERIFY0(space_map_load(msp->ms_sm,
- svr->svr_allocd_segs, SM_ALLOC));
+ iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
- /*
- * Clear everything past what has been synced unless
- * it's past the spacemap, because we have not allocated
- * mappings for it yet.
- */
- uint64_t vim_max_offset =
- vdev_indirect_mapping_max_offset(vim);
- uint64_t sm_end = msp->ms_sm->sm_start +
- msp->ms_sm->sm_size;
- if (sm_end > vim_max_offset)
- range_tree_clear(svr->svr_allocd_segs,
- vim_max_offset, sm_end - vim_max_offset);
- }
+ /*
+ * Clear everything past what has been synced,
+ * because we have not allocated mappings for
+ * it yet.
+ */
+ range_tree_clear(svr->svr_allocd_segs,
+ vdev_indirect_mapping_max_offset(vim),
+ vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
- zcb->zcb_removing_size +=
- range_tree_space(svr->svr_allocd_segs);
- range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
- }
+ zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
+ range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
@@ -4070,6 +4194,82 @@ zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
}
}
+static int
+count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
+ uint64_t txg, void *arg)
+{
+ int64_t *ualloc_space = arg;
+
+ uint64_t offset = sme->sme_offset;
+ uint64_t vdev_id = sme->sme_vdev;
+
+ vdev_t *vd = vdev_lookup_top(spa, vdev_id);
+ if (!vdev_is_concrete(vd))
+ return (0);
+
+ metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+ ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
+
+ if (txg < metaslab_unflushed_txg(ms))
+ return (0);
+
+ if (sme->sme_type == SM_ALLOC)
+ *ualloc_space += sme->sme_run;
+ else
+ *ualloc_space -= sme->sme_run;
+
+ return (0);
+}
+
+static int64_t
+get_unflushed_alloc_space(spa_t *spa)
+{
+ if (dump_opt['L'])
+ return (0);
+
+ int64_t ualloc_space = 0;
+ iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
+ &ualloc_space);
+ return (ualloc_space);
+}
+
+static int
+load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
+{
+ maptype_t *uic_maptype = arg;
+
+ uint64_t offset = sme->sme_offset;
+ uint64_t size = sme->sme_run;
+ uint64_t vdev_id = sme->sme_vdev;
+
+ vdev_t *vd = vdev_lookup_top(spa, vdev_id);
+
+ /* skip indirect vdevs */
+ if (!vdev_is_concrete(vd))
+ return (0);
+
+ metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+
+ ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
+ ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
+
+ if (txg < metaslab_unflushed_txg(ms))
+ return (0);
+
+ if (*uic_maptype == sme->sme_type)
+ range_tree_add(ms->ms_allocatable, offset, size);
+ else
+ range_tree_remove(ms->ms_allocatable, offset, size);
+
+ return (0);
+}
+
+static void
+load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
+{
+ iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
+}
+
static void
load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
{
@@ -4093,7 +4293,7 @@ load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
(longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock);
- metaslab_unload(msp);
+ range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
@@ -4110,6 +4310,8 @@ load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
mutex_exit(&msp->ms_lock);
}
}
+
+ load_unflushed_to_ms_allocatables(spa, maptype);
}
/*
@@ -4124,7 +4326,7 @@ load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock);
- metaslab_unload(msp);
+ range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
@@ -4383,7 +4585,6 @@ zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
-
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
@@ -4520,7 +4721,8 @@ dump_block_stats(spa_t *spa)
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
- metaslab_class_get_alloc(spa_dedup_class(spa));
+ metaslab_class_get_alloc(spa_dedup_class(spa)) +
+ get_unflushed_alloc_space(spa);
total_found = tzb->zb_asize - zcb.zcb_dedup_asize +
zcb.zcb_removing_size + zcb.zcb_checkpoint_size;
@@ -5393,11 +5595,24 @@ mos_obj_refd_multiple(uint64_t obj)
}
static void
+mos_leak_vdev_top_zap(vdev_t *vd)
+{
+ uint64_t ms_flush_data_obj;
+ int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
+ vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
+ sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
+ if (error == ENOENT)
+ return;
+ ASSERT0(error);
+
+ mos_obj_refd(ms_flush_data_obj);
+}
+
+static void
mos_leak_vdev(vdev_t *vd)
{
mos_obj_refd(vd->vdev_dtl_object);
mos_obj_refd(vd->vdev_ms_array);
- mos_obj_refd(vd->vdev_top_zap);
mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
mos_obj_refd(vd->vdev_leaf_zap);
@@ -5415,11 +5630,33 @@ mos_leak_vdev(vdev_t *vd)
mos_obj_refd(space_map_object(ms->ms_sm));
}
+ if (vd->vdev_top_zap != 0) {
+ mos_obj_refd(vd->vdev_top_zap);
+ mos_leak_vdev_top_zap(vd);
+ }
+
for (uint64_t c = 0; c < vd->vdev_children; c++) {
mos_leak_vdev(vd->vdev_child[c]);
}
}
+static void
+mos_leak_log_spacemaps(spa_t *spa)
+{
+ uint64_t spacemap_zap;
+ int error = zap_lookup(spa_meta_objset(spa),
+ DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
+ sizeof (spacemap_zap), 1, &spacemap_zap);
+ if (error == ENOENT)
+ return;
+ ASSERT0(error);
+
+ mos_obj_refd(spacemap_zap);
+ for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
+ sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
+ mos_obj_refd(sls->sls_sm_obj);
+}
+
static int
dump_mos_leaks(spa_t *spa)
{
@@ -5451,6 +5688,10 @@ dump_mos_leaks(spa_t *spa)
mos_obj_refd(spa->spa_l2cache.sav_object);
mos_obj_refd(spa->spa_spares.sav_object);
+ if (spa->spa_syncing_log_sm != NULL)
+ mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
+ mos_leak_log_spacemaps(spa);
+
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_next_mapping_object);
mos_obj_refd(spa->spa_condensing_indirect_phys.
@@ -5528,6 +5769,79 @@ dump_mos_leaks(spa_t *spa)
return (rv);
}
+typedef struct log_sm_obsolete_stats_arg {
+ uint64_t lsos_current_txg;
+
+ uint64_t lsos_total_entries;
+ uint64_t lsos_valid_entries;
+
+ uint64_t lsos_sm_entries;
+ uint64_t lsos_valid_sm_entries;
+} log_sm_obsolete_stats_arg_t;
+
+static int
+log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
+ uint64_t txg, void *arg)
+{
+ log_sm_obsolete_stats_arg_t *lsos = arg;
+
+ uint64_t offset = sme->sme_offset;
+ uint64_t vdev_id = sme->sme_vdev;
+
+ if (lsos->lsos_current_txg == 0) {
+ /* this is the first log */
+ lsos->lsos_current_txg = txg;
+ } else if (lsos->lsos_current_txg < txg) {
+ /* we just changed log - print stats and reset */
+ (void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
+ (u_longlong_t)lsos->lsos_valid_sm_entries,
+ (u_longlong_t)lsos->lsos_sm_entries,
+ (u_longlong_t)lsos->lsos_current_txg);
+ lsos->lsos_valid_sm_entries = 0;
+ lsos->lsos_sm_entries = 0;
+ lsos->lsos_current_txg = txg;
+ }
+ ASSERT3U(lsos->lsos_current_txg, ==, txg);
+
+ lsos->lsos_sm_entries++;
+ lsos->lsos_total_entries++;
+
+ vdev_t *vd = vdev_lookup_top(spa, vdev_id);
+ if (!vdev_is_concrete(vd))
+ return (0);
+
+ metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+ ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
+
+ if (txg < metaslab_unflushed_txg(ms))
+ return (0);
+ lsos->lsos_valid_sm_entries++;
+ lsos->lsos_valid_entries++;
+ return (0);
+}
+
+static void
+dump_log_spacemap_obsolete_stats(spa_t *spa)
+{
+ log_sm_obsolete_stats_arg_t lsos;
+ bzero(&lsos, sizeof (lsos));
+
+ (void) printf("Log Space Map Obsolete Entry Statistics:\n");
+
+ iterate_through_spacemap_logs(spa,
+ log_spacemap_obsolete_stats_cb, &lsos);
+
+ /* print stats for latest log */
+ (void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
+ (u_longlong_t)lsos.lsos_valid_sm_entries,
+ (u_longlong_t)lsos.lsos_sm_entries,
+ (u_longlong_t)lsos.lsos_current_txg);
+
+ (void) printf("%-8llu valid entries out of %-8llu - total\n\n",
+ (u_longlong_t)lsos.lsos_valid_entries,
+ (u_longlong_t)lsos.lsos_total_entries);
+}
+
static void
dump_zpool(spa_t *spa)
{
@@ -5557,6 +5871,10 @@ dump_zpool(spa_t *spa)
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa);
+ if (dump_opt['d'] > 2 || dump_opt['m']) {
+ dump_log_spacemaps(spa);
+ dump_log_spacemap_obsolete_stats(spa);
+ }
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
@@ -5635,9 +5953,8 @@ dump_zpool(spa_t *spa)
}
}
- if (rc == 0) {
+ if (rc == 0)
rc = verify_device_removal_feature_counts(spa);
- }
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
index f67d94fa8..3b1be5d40 100644
--- a/cmd/ztest/ztest.c
+++ b/cmd/ztest/ztest.c
@@ -2924,24 +2924,12 @@ vdev_lookup_by_path(vdev_t *vd, const char *path)
return (NULL);
}
-/*
- * Find the first available hole which can be used as a top-level.
- */
-int
-find_vdev_hole(spa_t *spa)
+static int
+spa_num_top_vdevs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
- int c;
-
- ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
-
- for (c = 0; c < rvd->vdev_children; c++) {
- vdev_t *cvd = rvd->vdev_child[c];
-
- if (cvd->vdev_ishole)
- break;
- }
- return (c);
+ ASSERT3U(spa_config_held(spa, SCL_VDEV, RW_READER), ==, SCL_VDEV);
+ return (rvd->vdev_children);
}
/*
@@ -2966,7 +2954,7 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
- ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
+ ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves;
/*
* If we have slogs then remove them 1/4 of the time.
@@ -3073,7 +3061,7 @@ ztest_vdev_class_add(ztest_ds_t *zd, uint64_t id)
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
- ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
+ ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves;
spa_config_exit(spa, SCL_VDEV, FTAG);
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
@@ -7329,6 +7317,15 @@ ztest_init(ztest_shared_t *zs)
for (i = 0; i < SPA_FEATURES; i++) {
char *buf;
+
+ /*
+ * 75% chance of using the log space map feature. We want ztest
+ * to exercise both the code paths that use the log space map
+ * feature and the ones that don't.
+ */
+ if (i == SPA_FEATURE_LOG_SPACEMAP && ztest_random(4) == 0)
+ continue;
+
VERIFY3S(-1, !=, asprintf(&buf, "feature@%s",
spa_feature_table[i].fi_uname));
VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
diff --git a/configure.ac b/configure.ac
index 49396efb8..f1d3ddc20 100644
--- a/configure.ac
+++ b/configure.ac
@@ -296,6 +296,7 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/link_count/Makefile
tests/zfs-tests/tests/functional/libzfs/Makefile
tests/zfs-tests/tests/functional/limits/Makefile
+ tests/zfs-tests/tests/functional/log_spacemap/Makefile
tests/zfs-tests/tests/functional/migration/Makefile
tests/zfs-tests/tests/functional/mmap/Makefile
tests/zfs-tests/tests/functional/mmp/Makefile
diff --git a/include/sys/Makefile.am b/include/sys/Makefile.am
index af45eb3bc..c5d64f9fd 100644
--- a/include/sys/Makefile.am
+++ b/include/sys/Makefile.am
@@ -13,7 +13,6 @@ COMMON_H = \
$(top_srcdir)/include/sys/bptree.h \
$(top_srcdir)/include/sys/bqueue.h \
$(top_srcdir)/include/sys/cityhash.h \
- $(top_srcdir)/include/sys/spa_checkpoint.h \
$(top_srcdir)/include/sys/dataset_kstats.h \
$(top_srcdir)/include/sys/dbuf.h \
$(top_srcdir)/include/sys/ddt.h \
@@ -63,6 +62,8 @@ COMMON_H = \
$(top_srcdir)/include/sys/sha2.h \
$(top_srcdir)/include/sys/skein.h \
$(top_srcdir)/include/sys/spa_boot.h \
+ $(top_srcdir)/include/sys/spa_checkpoint.h \
+ $(top_srcdir)/include/sys/spa_log_spacemap.h \
$(top_srcdir)/include/sys/space_map.h \
$(top_srcdir)/include/sys/space_reftree.h \
$(top_srcdir)/include/sys/spa.h \
diff --git a/include/sys/dmu.h b/include/sys/dmu.h
index 3f7350554..65da78eb5 100644
--- a/include/sys/dmu.h
+++ b/include/sys/dmu.h
@@ -382,6 +382,7 @@ typedef struct dmu_buf {
#define DMU_POOL_OBSOLETE_BPOBJ "com.delphix:obsolete_bpobj"
#define DMU_POOL_CONDENSING_INDIRECT "com.delphix:condensing_indirect"
#define DMU_POOL_ZPOOL_CHECKPOINT "com.delphix:zpool_checkpoint"
+#define DMU_POOL_LOG_SPACEMAP_ZAP "com.delphix:log_spacemap_zap"
/*
* Allocate an object from this objset. The range of object numbers
diff --git a/include/sys/fs/zfs.h b/include/sys/fs/zfs.h
index a9dd8e466..2cd133b1f 100644
--- a/include/sys/fs/zfs.h
+++ b/include/sys/fs/zfs.h
@@ -770,6 +770,8 @@ typedef struct zpool_load_policy {
"com.delphix:obsolete_counts_are_precise"
#define VDEV_TOP_ZAP_POOL_CHECKPOINT_SM \
"com.delphix:pool_checkpoint_sm"
+#define VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS \
+ "com.delphix:ms_unflushed_phys_txgs"
#define VDEV_TOP_ZAP_ALLOCATION_BIAS \
"org.zfsonlinux:allocation_bias"
diff --git a/include/sys/metaslab.h b/include/sys/metaslab.h
index 330902529..973f15d75 100644
--- a/include/sys/metaslab.h
+++ b/include/sys/metaslab.h
@@ -49,9 +49,17 @@ int metaslab_init(metaslab_group_t *, uint64_t, uint64_t, uint64_t,
metaslab_t **);
void metaslab_fini(metaslab_t *);
+void metaslab_set_unflushed_txg(metaslab_t *, uint64_t, dmu_tx_t *);
+void metaslab_set_estimated_condensed_size(metaslab_t *, uint64_t, dmu_tx_t *);
+uint64_t metaslab_unflushed_txg(metaslab_t *);
+uint64_t metaslab_estimated_condensed_size(metaslab_t *);
+int metaslab_sort_by_flushed(const void *, const void *);
+uint64_t metaslab_unflushed_changes_memused(metaslab_t *);
+
int metaslab_load(metaslab_t *);
void metaslab_potentially_unload(metaslab_t *, uint64_t);
void metaslab_unload(metaslab_t *);
+boolean_t metaslab_flush(metaslab_t *, dmu_tx_t *);
uint64_t metaslab_allocated_space(metaslab_t *);
@@ -108,6 +116,9 @@ uint64_t metaslab_class_get_space(metaslab_class_t *);
uint64_t metaslab_class_get_dspace(metaslab_class_t *);
uint64_t metaslab_class_get_deferred(metaslab_class_t *);
+void metaslab_space_update(vdev_t *, metaslab_class_t *,
+ int64_t, int64_t, int64_t);
+
metaslab_group_t *metaslab_group_create(metaslab_class_t *, vdev_t *, int);
void metaslab_group_destroy(metaslab_group_t *);
void metaslab_group_activate(metaslab_group_t *);
@@ -124,6 +135,8 @@ void metaslab_recalculate_weight_and_sort(metaslab_t *);
void metaslab_disable(metaslab_t *);
void metaslab_enable(metaslab_t *, boolean_t);
+extern int metaslab_debug_load;
+
#ifdef __cplusplus
}
#endif
diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h
index ca1104c14..29bc8cd5e 100644
--- a/include/sys/metaslab_impl.h
+++ b/include/sys/metaslab_impl.h
@@ -24,7 +24,7 @@
*/
/*
- * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_METASLAB_IMPL_H
@@ -357,7 +357,7 @@ struct metaslab {
* write to metaslab data on-disk (i.e flushing entries to
* the metaslab's space map). It helps coordinate readers of
* the metaslab's space map [see spa_vdev_remove_thread()]
- * with writers [see metaslab_sync()].
+ * with writers [see metaslab_sync() or metaslab_flush()].
*
* Note that metaslab_load(), even though a reader, uses
* a completely different mechanism to deal with the reading
@@ -401,7 +401,6 @@ struct metaslab {
boolean_t ms_condensing; /* condensing? */
boolean_t ms_condense_wanted;
- uint64_t ms_condense_checked_txg;
/*
* The number of consumers which have disabled the metaslab.
@@ -414,6 +413,8 @@ struct metaslab {
*/
boolean_t ms_loaded;
boolean_t ms_loading;
+ kcondvar_t ms_flush_cv;
+ boolean_t ms_flushing;
/*
* The following histograms count entries that are in the
@@ -499,6 +500,22 @@ struct metaslab {
metaslab_group_t *ms_group; /* metaslab group */
avl_node_t ms_group_node; /* node in metaslab group tree */
txg_node_t ms_txg_node; /* per-txg dirty metaslab links */
+ avl_node_t ms_spa_txg_node; /* node in spa_metaslabs_by_txg */
+
+ /*
+ * Allocs and frees that are committed to the vdev log spacemap but
+ * not yet to this metaslab's spacemap.
+ */
+ range_tree_t *ms_unflushed_allocs;
+ range_tree_t *ms_unflushed_frees;
+
+ /*
+ * We have flushed entries up to but not including this TXG. In
+ * other words, all changes from this TXG and onward should not
+ * be in this metaslab's space map and must be read from the
+ * log space maps.
+ */
+ uint64_t ms_unflushed_txg;
/* updated every time we are done syncing the metaslab's space map */
uint64_t ms_synced_length;
@@ -506,6 +523,11 @@ struct metaslab {
boolean_t ms_new;
};
+typedef struct metaslab_unflushed_phys {
+ /* on-disk counterpart of ms_unflushed_txg */
+ uint64_t msp_unflushed_txg;
+} metaslab_unflushed_phys_t;
+
#ifdef __cplusplus
}
#endif
diff --git a/include/sys/range_tree.h b/include/sys/range_tree.h
index ae1a0c323..fce1df68d 100644
--- a/include/sys/range_tree.h
+++ b/include/sys/range_tree.h
@@ -24,7 +24,7 @@
*/
/*
- * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
+ * Copyright (c) 2013, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_RANGE_TREE_H
@@ -95,6 +95,7 @@ range_seg_t *range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size);
void range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
uint64_t newstart, uint64_t newsize);
uint64_t range_tree_space(range_tree_t *rt);
+uint64_t range_tree_numsegs(range_tree_t *rt);
boolean_t range_tree_is_empty(range_tree_t *rt);
void range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst);
void range_tree_stat_verify(range_tree_t *rt);
@@ -112,6 +113,11 @@ void range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg);
void range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg);
range_seg_t *range_tree_first(range_tree_t *rt);
+void range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
+ range_tree_t *removefrom, range_tree_t *addto);
+void range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom,
+ range_tree_t *addto);
+
void rt_avl_create(range_tree_t *rt, void *arg);
void rt_avl_destroy(range_tree_t *rt, void *arg);
void rt_avl_add(range_tree_t *rt, range_seg_t *rs, void *arg);
diff --git a/include/sys/spa.h b/include/sys/spa.h
index a7e4d154f..50ca15be5 100644
--- a/include/sys/spa.h
+++ b/include/sys/spa.h
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
@@ -42,6 +42,7 @@
#include <sys/fs/zfs.h>
#include <sys/spa_checksum.h>
#include <sys/dmu.h>
+#include <sys/space_map.h>
#ifdef __cplusplus
extern "C" {
@@ -1075,6 +1076,7 @@ extern boolean_t spa_suspended(spa_t *spa);
extern uint64_t spa_bootfs(spa_t *spa);
extern uint64_t spa_delegation(spa_t *spa);
extern objset_t *spa_meta_objset(spa_t *spa);
+extern space_map_t *spa_syncing_log_sm(spa_t *spa);
extern uint64_t spa_deadman_synctime(spa_t *spa);
extern uint64_t spa_deadman_ziotime(spa_t *spa);
extern uint64_t spa_dirty_data(spa_t *spa);
@@ -1125,6 +1127,7 @@ extern boolean_t spa_trust_config(spa_t *spa);
extern uint64_t spa_missing_tvds_allowed(spa_t *spa);
extern void spa_set_missing_tvds(spa_t *spa, uint64_t missing);
extern boolean_t spa_top_vdevs_spacemap_addressable(spa_t *spa);
+extern uint64_t spa_total_metaslabs(spa_t *spa);
extern boolean_t spa_multihost(spa_t *spa);
extern unsigned long spa_get_hostid(void);
extern void spa_activate_allocation_classes(spa_t *, dmu_tx_t *);
diff --git a/include/sys/spa_impl.h b/include/sys/spa_impl.h
index d49b970c9..ff69286cc 100644
--- a/include/sys/spa_impl.h
+++ b/include/sys/spa_impl.h
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
@@ -34,6 +34,7 @@
#include <sys/spa.h>
#include <sys/spa_checkpoint.h>
+#include <sys/spa_log_spacemap.h>
#include <sys/vdev.h>
#include <sys/vdev_removal.h>
#include <sys/metaslab.h>
@@ -307,6 +308,14 @@ struct spa {
spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */
zthr_t *spa_checkpoint_discard_zthr;
+ space_map_t *spa_syncing_log_sm; /* current log space map */
+ avl_tree_t spa_sm_logs_by_txg;
+ kmutex_t spa_flushed_ms_lock; /* for metaslabs_by_flushed */
+ avl_tree_t spa_metaslabs_by_flushed;
+ spa_unflushed_stats_t spa_unflushed_stats;
+ list_t spa_log_summary;
+ uint64_t spa_log_flushall_txg;
+
char *spa_root; /* alternate root directory */
uint64_t spa_ena; /* spa-wide ereport ENA */
int spa_last_open_failed; /* error if last open failed */
diff --git a/include/sys/spa_log_spacemap.h b/include/sys/spa_log_spacemap.h
new file mode 100644
index 000000000..b2ed77fac
--- /dev/null
+++ b/include/sys/spa_log_spacemap.h
@@ -0,0 +1,79 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2018, 2019 by Delphix. All rights reserved.
+ */
+
+#ifndef _SYS_SPA_LOG_SPACEMAP_H
+#define _SYS_SPA_LOG_SPACEMAP_H
+
+#include <sys/avl.h>
+
+typedef struct log_summary_entry {
+ uint64_t lse_start; /* start TXG */
+ uint64_t lse_mscount; /* # of metaslabs needed to be flushed */
+ uint64_t lse_blkcount; /* blocks held by this entry */
+ list_node_t lse_node;
+} log_summary_entry_t;
+
+typedef struct spa_unflushed_stats {
+ /* used for memory heuristic */
+ uint64_t sus_memused; /* current memory used for unflushed trees */
+
+ /* used for block heuristic */
+ uint64_t sus_blocklimit; /* max # of log blocks allowed */
+ uint64_t sus_nblocks; /* # of blocks in log space maps currently */
+} spa_unflushed_stats_t;
+
+typedef struct spa_log_sm {
+ uint64_t sls_sm_obj; /* space map object ID */
+ uint64_t sls_txg; /* txg logged on the space map */
+ uint64_t sls_nblocks; /* number of blocks in this log */
+ uint64_t sls_mscount; /* # of metaslabs flushed in the log's txg */
+ avl_node_t sls_node; /* node in spa_sm_logs_by_txg */
+} spa_log_sm_t;
+
+int spa_ld_log_spacemaps(spa_t *);
+
+void spa_generate_syncing_log_sm(spa_t *, dmu_tx_t *);
+void spa_flush_metaslabs(spa_t *, dmu_tx_t *);
+void spa_sync_close_syncing_log_sm(spa_t *);
+
+void spa_cleanup_old_sm_logs(spa_t *, dmu_tx_t *);
+
+uint64_t spa_log_sm_blocklimit(spa_t *);
+void spa_log_sm_set_blocklimit(spa_t *);
+uint64_t spa_log_sm_nblocks(spa_t *);
+uint64_t spa_log_sm_memused(spa_t *);
+
+void spa_log_sm_decrement_mscount(spa_t *, uint64_t);
+void spa_log_sm_increment_current_mscount(spa_t *);
+
+void spa_log_summary_add_flushed_metaslab(spa_t *);
+void spa_log_summary_decrement_mscount(spa_t *, uint64_t);
+void spa_log_summary_decrement_blkcount(spa_t *, uint64_t);
+
+boolean_t spa_flush_all_logs_requested(spa_t *);
+
+extern int zfs_keep_log_spacemaps_at_export;
+
+#endif /* _SYS_SPA_LOG_SPACEMAP_H */
diff --git a/include/sys/space_map.h b/include/sys/space_map.h
index 7731a352f..81f56076a 100644
--- a/include/sys/space_map.h
+++ b/include/sys/space_map.h
@@ -24,7 +24,7 @@
*/
/*
- * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_SPACE_MAP_H
@@ -72,6 +72,11 @@ typedef struct space_map_phys {
* bucket, smp_histogram[i], contains the number of free regions
* whose size is:
* 2^(i+sm_shift) <= size of free region in bytes < 2^(i+sm_shift+1)
+ *
+ * Note that, if log space map feature is enabled, histograms of
+ * space maps that belong to metaslabs will take into account any
+ * unflushed changes for their metaslabs, even though the actual
+ * space map doesn't have entries for these changes.
*/
uint64_t smp_histogram[SPACE_MAP_HISTOGRAM_SIZE];
} space_map_phys_t;
@@ -209,6 +214,8 @@ void space_map_histogram_add(space_map_t *sm, range_tree_t *rt,
uint64_t space_map_object(space_map_t *sm);
int64_t space_map_allocated(space_map_t *sm);
uint64_t space_map_length(space_map_t *sm);
+uint64_t space_map_entries(space_map_t *sm, range_tree_t *rt);
+uint64_t space_map_nblocks(space_map_t *sm);
void space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
uint64_t vdev_id, dmu_tx_t *tx);
diff --git a/include/sys/vdev_impl.h b/include/sys/vdev_impl.h
index f6f7bbb4b..c179191e3 100644
--- a/include/sys/vdev_impl.h
+++ b/include/sys/vdev_impl.h
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
@@ -535,7 +535,7 @@ extern void vdev_set_min_asize(vdev_t *vd);
/*
* Global variables
*/
-extern int vdev_standard_sm_blksz;
+extern int zfs_vdev_standard_sm_blksz;
/* zdb uses this tunable, so it must be declared here to make lint happy. */
extern int zfs_vdev_cache_size;
diff --git a/include/sys/zfs_debug.h b/include/sys/zfs_debug.h
index 7968a01cd..78d5efc8a 100644
--- a/include/sys/zfs_debug.h
+++ b/include/sys/zfs_debug.h
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_ZFS_DEBUG_H
@@ -55,6 +55,7 @@ extern int zfs_dbgmsg_enable;
#define ZFS_DEBUG_SET_ERROR (1 << 9)
#define ZFS_DEBUG_INDIRECT_REMAP (1 << 10)
#define ZFS_DEBUG_TRIM (1 << 11)
+#define ZFS_DEBUG_LOG_SPACEMAP (1 << 12)
extern void __zfs_dbgmsg(char *buf);
extern void __dprintf(boolean_t dprint, const char *file, const char *func,
diff --git a/include/zfeature_common.h b/include/zfeature_common.h
index 62a2c0fee..4012b71d6 100644
--- a/include/zfeature_common.h
+++ b/include/zfeature_common.h
@@ -70,6 +70,7 @@ typedef enum spa_feature {
SPA_FEATURE_REDACTION_BOOKMARKS,
SPA_FEATURE_REDACTED_DATASETS,
SPA_FEATURE_BOOKMARK_WRITTEN,
+ SPA_FEATURE_LOG_SPACEMAP,
SPA_FEATURES
} spa_feature_t;
diff --git a/lib/libzpool/Makefile.am b/lib/libzpool/Makefile.am
index 2cf5cec65..072cd1a46 100644
--- a/lib/libzpool/Makefile.am
+++ b/lib/libzpool/Makefile.am
@@ -101,6 +101,7 @@ KERNEL_C = \
spa_config.c \
spa_errlog.c \
spa_history.c \
+ spa_log_spacemap.c \
spa_misc.c \
spa_stats.c \
space_map.c \
diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5
index 9462608f9..62245f6a0 100644
--- a/man/man5/zfs-module-parameters.5
+++ b/man/man5/zfs-module-parameters.5
@@ -271,6 +271,17 @@ Default value: \fB16,777,217\fR.
.sp
.ne 2
.na
+\fBzfs_keep_log_spacemaps_at_export\fR (int)
+.ad
+.RS 12n
+Prevent log spacemaps from being destroyed during pool exports and destroys.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
\fBzfs_metaslab_segment_weight_enabled\fR (int)
.ad
.RS 12n
@@ -373,6 +384,17 @@ Default value: \fB200\fR.
.sp
.ne 2
.na
+\fBzfs_vdev_default_ms_shift\fR (int)
+.ad
+.RS 12n
+Default limit for metaslab size.
+.sp
+Default value: \fB29\fR [meaning (1 << 29) = 512MB].
+.RE
+
+.sp
+.ne 2
+.na
\fBzfs_vdev_min_ms_count\fR (int)
.ad
.RS 12n
@@ -1232,6 +1254,93 @@ Default value: 20
.sp
.ne 2
.na
+\fBzfs_unflushed_max_mem_amt\fR (ulong)
+.ad
+.RS 12n
+Upper-bound limit for unflushed metadata changes to be held by the
+log spacemap in memory (in bytes).
+.sp
+Default value: \fB1,073,741,824\fR (1GB).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_unflushed_max_mem_ppm\fR (ulong)
+.ad
+.RS 12n
+Percentage of the overall system memory that ZFS allows to be used
+for unflushed metadata changes by the log spacemap.
+(value is calculated over 1000000 for finer granularity).
+.sp
+Default value: \fB1000\fR (which is divided by 1000000, resulting in
+the limit to be \fB0.1\fR% of memory)
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_unflushed_log_block_max\fR (ulong)
+.ad
+.RS 12n
+Describes the maximum number of log spacemap blocks allowed for each pool.
+The default value of 262144 means that the space in all the log spacemaps
+can add up to no more than 262144 blocks (which means 32GB of logical
+space before compression and ditto blocks, assuming that blocksize is
+128k).
+.sp
+This tunable is important because it involves a trade-off between import
+time after an unclean export and the frequency of flushing metaslabs.
+The higher this number is, the more log blocks we allow when the pool is
+active which means that we flush metaslabs less often and thus decrease
+the number of I/Os for spacemap updates per TXG.
+At the same time though, that means that in the event of an unclean export,
+there will be more log spacemap blocks for us to read, inducing overhead
+in the import time of the pool.
+The lower the number, the amount of flushing increases destroying log
+blocks quicker as they become obsolete faster, which leaves less blocks
+to be read during import time after a crash.
+.sp
+Each log spacemap block existing during pool import leads to approximately
+one extra logical I/O issued.
+This is the reason why this tunable is exposed in terms of blocks rather
+than space used.
+.sp
+Default value: \fB262144\fR (256K).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_unflushed_log_block_min\fR (ulong)
+.ad
+.RS 12n
+If the number of metaslabs is small and our incoming rate is high, we
+could get into a situation that we are flushing all our metaslabs every
+TXG.
+Thus we always allow at least this many log blocks.
+.sp
+Default value: \fB1000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_unflushed_log_block_pct\fR (ulong)
+.ad
+.RS 12n
+Tunable used to determine the number of blocks that can be used for
+the spacemap log, expressed as a percentage of the total number of
+metaslabs in the pool.
+.sp
+Default value: \fB400\fR (read as \fB400\fR% - meaning that the number
+of log spacemap blocks are capped at 4 times the number of
+metaslabs in the pool).
+.RE
+
+.sp
+.ne 2
+.na
\fBzfs_unlink_suspend_progress\fR (uint)
.ad
.RS 12n
@@ -1717,6 +1826,10 @@ _
_
2048 ZFS_DEBUG_TRIM
Verify TRIM ranges are always within the allocatable range tree.
+_
+4096 ZFS_DEBUG_LOG_SPACEMAP
+ Verify that the log summary is consistent with the spacemap log
+ and enable zfs_dbgmsgs for metaslab loading and flushing.
.TE
.sp
* Requires debug build.
@@ -1835,6 +1948,29 @@ Default value: \fB50\fR.
.sp
.ne 2
.na
+\fBzfs_max_log_walking\fR (ulong)
+.ad
+.RS 12n
+The number of past TXGs that the flushing algorithm of the log spacemap
+feature uses to estimate incoming log blocks.
+.sp
+Default value: \fB5\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_max_logsm_summary_length\fR (ulong)
+.ad
+.RS 12n
+Maximum number of rows allowed in the summary of the spacemap log.
+.sp
+Default value: \fB10\fR.
+.RE
+
+.sp
+.ne 2
+.na
\fBzfs_max_recordsize\fR (int)
.ad
.RS 12n
@@ -1865,6 +2001,17 @@ Default value: \fB0\fR.
.sp
.ne 2
.na
+\fBzfs_min_metaslabs_to_flush\fR (ulong)
+.ad
+.RS 12n
+Minimum number of metaslabs to flush per dirty TXG
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
\fBzfs_metaslab_fragmentation_threshold\fR (int)
.ad
.RS 12n
diff --git a/man/man8/zdb.8 b/man/man8/zdb.8
index 57403cba7..edbf28bf3 100644
--- a/man/man8/zdb.8
+++ b/man/man8/zdb.8
@@ -197,7 +197,8 @@ By default,
.Nm
verifies that all non-free blocks are referenced, which can be very expensive.
.It Fl m
-Display the offset, spacemap, and free space of each metaslab.
+Display the offset, spacemap, free space of each metaslab, all the log
+spacemaps and their obsolete entry statistics.
.It Fl mm
Also display information about the on-disk free space histogram associated with
each metaslab.
diff --git a/module/zcommon/zfeature_common.c b/module/zcommon/zfeature_common.c
index cb43a19a3..fa9d678a7 100644
--- a/module/zcommon/zfeature_common.c
+++ b/module/zcommon/zfeature_common.c
@@ -349,6 +349,19 @@ zpool_feature_init(void)
ZFEATURE_TYPE_BOOLEAN, NULL);
{
+ static const spa_feature_t log_spacemap_deps[] = {
+ SPA_FEATURE_SPACEMAP_V2,
+ SPA_FEATURE_NONE
+ };
+ zfeature_register(SPA_FEATURE_LOG_SPACEMAP,
+ "com.delphix:log_spacemap", "log_spacemap",
+ "Log metaslab changes on a single spacemap and "
+ "flush them periodically.",
+ ZFEATURE_FLAG_READONLY_COMPAT, ZFEATURE_TYPE_BOOLEAN,
+ log_spacemap_deps);
+ }
+
+ {
static const spa_feature_t large_blocks_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
diff --git a/module/zfs/Makefile.in b/module/zfs/Makefile.in
index a9f1ebdc0..5adea9fb5 100644
--- a/module/zfs/Makefile.in
+++ b/module/zfs/Makefile.in
@@ -76,6 +76,7 @@ $(MODULE)-objs += spa_checkpoint.o
$(MODULE)-objs += spa_config.o
$(MODULE)-objs += spa_errlog.o
$(MODULE)-objs += spa_history.o
+$(MODULE)-objs += spa_log_spacemap.o
$(MODULE)-objs += spa_misc.o
$(MODULE)-objs += spa_stats.o
$(MODULE)-objs += space_map.o
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index 8b55469a5..7a540bdfa 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -1483,7 +1483,7 @@ dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx)
ASSERT(dn->dn_dbuf->db_data_pending);
/*
* Initialize dn_zio outside dnode_sync() because the
- * meta-dnode needs to set it ouside dnode_sync().
+ * meta-dnode needs to set it outside dnode_sync().
*/
dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
ASSERT(dn->dn_zio);
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 864376c1e..49e527912 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
@@ -757,7 +757,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
dp->dp_mos_uncompressed_delta = 0;
}
- if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) {
+ if (dmu_objset_is_dirty(mos, txg)) {
dsl_pool_sync_mos(dp, tx);
}
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index 5da929b48..0b22aa875 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -56,12 +56,21 @@ unsigned long metaslab_aliquot = 512 << 10;
unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
/*
- * Since we can touch multiple metaslabs (and their respective space maps)
- * with each transaction group, we benefit from having a smaller space map
+ * In pools where the log space map feature is not enabled we touch
+ * multiple metaslabs (and their respective space maps) with each
+ * transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
- * around the disk.
+ * around the disk. So a sane default for the space map block size
+ * is 8~16K.
*/
-int zfs_metaslab_sm_blksz = (1 << 12);
+int zfs_metaslab_sm_blksz_no_log = (1 << 14);
+
+/*
+ * When the log space map feature is enabled, we accumulate a lot of
+ * changes per metaslab that are flushed once in a while so we benefit
+ * from a bigger block size like 128K for the metaslab space maps.
+ */
+int zfs_metaslab_sm_blksz_with_log = (1 << 17);
/*
* The in-core space map representation is more compact than its on-disk form.
@@ -270,6 +279,7 @@ static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
+static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
#ifdef _METASLAB_TRACING
kmem_cache_t *metaslab_alloc_trace_cache;
#endif
@@ -540,67 +550,6 @@ metaslab_compare(const void *x1, const void *x2)
return (AVL_CMP(m1->ms_start, m2->ms_start));
}
-uint64_t
-metaslab_allocated_space(metaslab_t *msp)
-{
- return (msp->ms_allocated_space);
-}
-
-/*
- * Verify that the space accounting on disk matches the in-core range_trees.
- */
-static void
-metaslab_verify_space(metaslab_t *msp, uint64_t txg)
-{
- spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
- uint64_t allocating = 0;
- uint64_t sm_free_space, msp_free_space;
-
- ASSERT(MUTEX_HELD(&msp->ms_lock));
- ASSERT(!msp->ms_condensing);
-
- if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
- return;
-
- /*
- * We can only verify the metaslab space when we're called
- * from syncing context with a loaded metaslab that has an
- * allocated space map. Calling this in non-syncing context
- * does not provide a consistent view of the metaslab since
- * we're performing allocations in the future.
- */
- if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
- !msp->ms_loaded)
- return;
-
- /*
- * Even though the smp_alloc field can get negative (e.g.
- * see vdev_checkpoint_sm), that should never be the case
- * when it come's to a metaslab's space map.
- */
- ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
-
- sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
-
- /*
- * Account for future allocations since we would have
- * already deducted that space from the ms_allocatable.
- */
- for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
- allocating +=
- range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
- }
-
- ASSERT3U(msp->ms_deferspace, ==,
- range_tree_space(msp->ms_defer[0]) +
- range_tree_space(msp->ms_defer[1]));
-
- msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
- msp->ms_deferspace + range_tree_space(msp->ms_freed);
-
- VERIFY3U(sm_free_space, ==, msp_free_space);
-}
-
/*
* ==========================================================================
* Metaslab groups
@@ -689,6 +638,25 @@ metaslab_group_alloc_update(metaslab_group_t *mg)
mutex_exit(&mg->mg_lock);
}
+int
+metaslab_sort_by_flushed(const void *va, const void *vb)
+{
+ const metaslab_t *a = va;
+ const metaslab_t *b = vb;
+
+ int cmp = AVL_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
+ if (likely(cmp))
+ return (cmp);
+
+ uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
+ uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
+ cmp = AVL_CMP(a_vdev_id, b_vdev_id);
+ if (cmp)
+ return (cmp);
+
+ return (AVL_CMP(a->ms_id, b->ms_id));
+}
+
metaslab_group_t *
metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
{
@@ -703,7 +671,7 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
KM_SLEEP);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
- sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
+ sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
mg->mg_vd = vd;
mg->mg_class = mc;
mg->mg_activation_count = 0;
@@ -900,7 +868,6 @@ metaslab_group_histogram_verify(metaslab_group_t *mg)
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
- ASSERT(msp != NULL);
/* skip if not active or not a member */
if (msp->ms_sm == NULL || msp->ms_group != mg)
@@ -1454,6 +1421,101 @@ metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
* ==========================================================================
*/
+/*
+ * Wait for any in-progress metaslab loads to complete.
+ */
+void
+metaslab_load_wait(metaslab_t *msp)
+{
+ ASSERT(MUTEX_HELD(&msp->ms_lock));
+
+ while (msp->ms_loading) {
+ ASSERT(!msp->ms_loaded);
+ cv_wait(&msp->ms_load_cv, &msp->ms_lock);
+ }
+}
+
+/*
+ * Wait for any in-progress flushing to complete.
+ */
+void
+metaslab_flush_wait(metaslab_t *msp)
+{
+ ASSERT(MUTEX_HELD(&msp->ms_lock));
+
+ while (msp->ms_flushing)
+ cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
+}
+
+uint64_t
+metaslab_allocated_space(metaslab_t *msp)
+{
+ return (msp->ms_allocated_space);
+}
+
+/*
+ * Verify that the space accounting on disk matches the in-core range_trees.
+ */
+static void
+metaslab_verify_space(metaslab_t *msp, uint64_t txg)
+{
+ spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
+ uint64_t allocating = 0;
+ uint64_t sm_free_space, msp_free_space;
+
+ ASSERT(MUTEX_HELD(&msp->ms_lock));
+ ASSERT(!msp->ms_condensing);
+
+ if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
+ return;
+
+ /*
+ * We can only verify the metaslab space when we're called
+ * from syncing context with a loaded metaslab that has an
+ * allocated space map. Calling this in non-syncing context
+ * does not provide a consistent view of the metaslab since
+ * we're performing allocations in the future.
+ */
+ if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
+ !msp->ms_loaded)
+ return;
+
+ /*
+ * Even though the smp_alloc field can get negative,
+ * when it comes to a metaslab's space map, that should
+ * never be the case.
+ */
+ ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
+
+ ASSERT3U(space_map_allocated(msp->ms_sm), >=,
+ range_tree_space(msp->ms_unflushed_frees));
+
+ ASSERT3U(metaslab_allocated_space(msp), ==,
+ space_map_allocated(msp->ms_sm) +
+ range_tree_space(msp->ms_unflushed_allocs) -
+ range_tree_space(msp->ms_unflushed_frees));
+
+ sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
+
+ /*
+ * Account for future allocations since we would have
+ * already deducted that space from the ms_allocatable.
+ */
+ for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
+ allocating +=
+ range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
+ }
+
+ ASSERT3U(msp->ms_deferspace, ==,
+ range_tree_space(msp->ms_defer[0]) +
+ range_tree_space(msp->ms_defer[1]));
+
+ msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
+ msp->ms_deferspace + range_tree_space(msp->ms_freed);
+
+ VERIFY3U(sm_free_space, ==, msp_free_space);
+}
+
static void
metaslab_aux_histograms_clear(metaslab_t *msp)
{
@@ -1651,20 +1713,6 @@ metaslab_verify_weight_and_frag(metaslab_t *msp)
VERIFY3U(msp->ms_weight, ==, weight);
}
-/*
- * Wait for any in-progress metaslab loads to complete.
- */
-static void
-metaslab_load_wait(metaslab_t *msp)
-{
- ASSERT(MUTEX_HELD(&msp->ms_lock));
-
- while (msp->ms_loading) {
- ASSERT(!msp->ms_loaded);
- cv_wait(&msp->ms_load_cv, &msp->ms_lock);
- }
-}
-
static int
metaslab_load_impl(metaslab_t *msp)
{
@@ -1679,13 +1727,19 @@ metaslab_load_impl(metaslab_t *msp)
* are reading the space map. Therefore, metaslab_sync() and
* metaslab_sync_done() can run at the same time as we do.
*
- * metaslab_sync() can append to the space map while we are loading.
- * Therefore we load only entries that existed when we started the
- * load. Additionally, metaslab_sync_done() has to wait for the load
- * to complete because there are potential races like metaslab_load()
- * loading parts of the space map that are currently being appended
- * by metaslab_sync(). If we didn't, the ms_allocatable would have
- * entries that metaslab_sync_done() would try to re-add later.
+ * If we are using the log space maps, metaslab_sync() can't write to
+ * the metaslab's space map while we are loading as we only write to
+ * it when we are flushing the metaslab, and that can't happen while
+ * we are loading it.
+ *
+ * If we are not using log space maps though, metaslab_sync() can
+ * append to the space map while we are loading. Therefore we load
+ * only entries that existed when we started the load. Additionally,
+ * metaslab_sync_done() has to wait for the load to complete because
+ * there are potential races like metaslab_load() loading parts of the
+ * space map that are currently being appended by metaslab_sync(). If
+ * we didn't, the ms_allocatable would have entries that
+ * metaslab_sync_done() would try to re-add later.
*
* That's why before dropping the lock we remember the synced length
* of the metaslab and read up to that point of the space map,
@@ -1695,6 +1749,7 @@ metaslab_load_impl(metaslab_t *msp)
uint64_t length = msp->ms_synced_length;
mutex_exit(&msp->ms_lock);
+ hrtime_t load_start = gethrtime();
if (msp->ms_sm != NULL) {
error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
SM_FREE, length);
@@ -1706,18 +1761,37 @@ metaslab_load_impl(metaslab_t *msp)
*/
range_tree_add(msp->ms_allocatable,
msp->ms_start, msp->ms_size);
+
+ if (msp->ms_freed != NULL) {
+ /*
+ * If the ms_sm doesn't exist, this means that this
+ * metaslab hasn't gone through metaslab_sync() and
+ * thus has never been dirtied. So we shouldn't
+ * expect any unflushed allocs or frees from previous
+ * TXGs.
+ *
+ * Note: ms_freed and all the other trees except for
+ * the ms_allocatable, can be NULL at this point only
+ * if this is a new metaslab of a vdev that just got
+ * expanded.
+ */
+ ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
+ ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
+ }
}
/*
* We need to grab the ms_sync_lock to prevent metaslab_sync() from
- * changing the ms_sm and the metaslab's range trees while we are
- * about to use them and populate the ms_allocatable. The ms_lock
- * is insufficient for this because metaslab_sync() doesn't hold
- * the ms_lock while writing the ms_checkpointing tree to disk.
+ * changing the ms_sm (or log_sm) and the metaslab's range trees
+ * while we are about to use them and populate the ms_allocatable.
+ * The ms_lock is insufficient for this because metaslab_sync() doesn't
+ * hold the ms_lock while writing the ms_checkpointing tree to disk.
*/
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
+
ASSERT(!msp->ms_condensing);
+ ASSERT(!msp->ms_flushing);
if (error != 0) {
mutex_exit(&msp->ms_sync_lock);
@@ -1728,10 +1802,60 @@ metaslab_load_impl(metaslab_t *msp)
msp->ms_loaded = B_TRUE;
/*
- * The ms_allocatable contains the segments that exist in the
- * ms_defer trees [see ms_synced_length]. Thus we need to remove
- * them from ms_allocatable as they will be added again in
+ * Apply all the unflushed changes to ms_allocatable right
+ * away so any manipulations we do below have a clear view
+ * of what is allocated and what is free.
+ */
+ range_tree_walk(msp->ms_unflushed_allocs,
+ range_tree_remove, msp->ms_allocatable);
+ range_tree_walk(msp->ms_unflushed_frees,
+ range_tree_add, msp->ms_allocatable);
+
+ msp->ms_loaded = B_TRUE;
+
+ ASSERT3P(msp->ms_group, !=, NULL);
+ spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
+ if (spa_syncing_log_sm(spa) != NULL) {
+ ASSERT(spa_feature_is_enabled(spa,
+ SPA_FEATURE_LOG_SPACEMAP));
+
+ /*
+ * If we use a log space map we add all the segments
+ * that are in ms_unflushed_frees so they are available
+ * for allocation.
+ *
+ * ms_allocatable needs to contain all free segments
+ * that are ready for allocations (thus not segments
+ * from ms_freeing, ms_freed, and the ms_defer trees).
+ * But if we grab the lock in this code path at a sync
+ * pass later that 1, then it also contains the
+ * segments of ms_freed (they were added to it earlier
+ * in this path through ms_unflushed_frees). So we
+ * need to remove all the segments that exist in
+ * ms_freed from ms_allocatable as they will be added
+ * later in metaslab_sync_done().
+ *
+ * When there's no log space map, the ms_allocatable
+ * correctly doesn't contain any segments that exist
+ * in ms_freed [see ms_synced_length].
+ */
+ range_tree_walk(msp->ms_freed,
+ range_tree_remove, msp->ms_allocatable);
+ }
+
+ /*
+ * If we are not using the log space map, ms_allocatable
+ * contains the segments that exist in the ms_defer trees
+ * [see ms_synced_length]. Thus we need to remove them
+ * from ms_allocatable as they will be added again in
* metaslab_sync_done().
+ *
+ * If we are using the log space map, ms_allocatable still
+ * contains the segments that exist in the ms_defer trees.
+ * Not because it read them through the ms_sm though. But
+ * because these segments are part of ms_unflushed_frees
+ * whose segments we add to ms_allocatable earlier in this
+ * code path.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
@@ -1756,10 +1880,26 @@ metaslab_load_impl(metaslab_t *msp)
ASSERT3U(weight, <=, msp->ms_weight);
msp->ms_max_size = metaslab_block_maxsize(msp);
- spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
+ hrtime_t load_end = gethrtime();
+ if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
+ zfs_dbgmsg("loading: txg %llu, spa %s, vdev_id %llu, "
+ "ms_id %llu, smp_length %llu, "
+ "unflushed_allocs %llu, unflushed_frees %llu, "
+ "freed %llu, defer %llu + %llu, "
+ "loading_time %lld ms",
+ spa_syncing_txg(spa), spa_name(spa),
+ msp->ms_group->mg_vd->vdev_id, msp->ms_id,
+ space_map_length(msp->ms_sm),
+ range_tree_space(msp->ms_unflushed_allocs),
+ range_tree_space(msp->ms_unflushed_frees),
+ range_tree_space(msp->ms_freed),
+ range_tree_space(msp->ms_defer[0]),
+ range_tree_space(msp->ms_defer[1]),
+ (longlong_t)((load_end - load_start) / 1000000));
+ }
+
metaslab_verify_space(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_sync_lock);
-
return (0);
}
@@ -1778,8 +1918,32 @@ metaslab_load(metaslab_t *msp)
VERIFY(!msp->ms_loading);
ASSERT(!msp->ms_condensing);
+ /*
+ * We set the loading flag BEFORE potentially dropping the lock to
+ * wait for an ongoing flush (see ms_flushing below). This way other
+ * threads know that there is already a thread that is loading this
+ * metaslab.
+ */
msp->ms_loading = B_TRUE;
+
+ /*
+ * Wait for any in-progress flushing to finish as we drop the ms_lock
+ * both here (during space_map_load()) and in metaslab_flush() (when
+ * we flush our changes to the ms_sm).
+ */
+ if (msp->ms_flushing)
+ metaslab_flush_wait(msp);
+
+ /*
+ * In the possibility that we were waiting for the metaslab to be
+ * flushed (where we temporarily dropped the ms_lock), ensure that
+ * no one else loaded the metaslab somehow.
+ */
+ ASSERT(!msp->ms_loaded);
+
int error = metaslab_load_impl(msp);
+
+ ASSERT(MUTEX_HELD(&msp->ms_lock));
msp->ms_loading = B_FALSE;
cv_broadcast(&msp->ms_load_cv);
@@ -1806,7 +1970,7 @@ metaslab_unload(metaslab_t *msp)
* have their weights calculated from the space map histograms, while
* loaded ones have it calculated from their in-core range tree
* [see metaslab_load()]. This way, the weight reflects the information
- * available in-core, whether it is loaded or not
+ * available in-core, whether it is loaded or not.
*
* If ms_group == NULL means that we came here from metaslab_fini(),
* at which point it doesn't make sense for us to do the recalculation
@@ -1816,7 +1980,7 @@ metaslab_unload(metaslab_t *msp)
metaslab_recalculate_weight_and_sort(msp);
}
-static void
+void
metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta)
{
@@ -1830,8 +1994,8 @@ metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
}
int
-metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
- metaslab_t **msp)
+metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
+ uint64_t txg, metaslab_t **msp)
{
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
@@ -1843,6 +2007,7 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
ms->ms_id = id;
ms->ms_start = id << vd->vdev_ms_shift;
@@ -1905,17 +2070,6 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
metaslab_allocated_space(ms), 0, 0);
}
- /*
- * If metaslab_debug_load is set and we're initializing a metaslab
- * that has an allocated space map object then load the space map
- * so that we can verify frees.
- */
- if (metaslab_debug_load && ms->ms_sm != NULL) {
- mutex_enter(&ms->ms_lock);
- VERIFY0(metaslab_load(ms));
- mutex_exit(&ms->ms_lock);
- }
-
if (txg != 0) {
vdev_dirty(vd, 0, NULL, txg);
vdev_dirty(vd, VDD_METASLAB, ms, txg);
@@ -1926,11 +2080,42 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
return (0);
}
+static void
+metaslab_fini_flush_data(metaslab_t *msp)
+{
+ spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
+
+ if (metaslab_unflushed_txg(msp) == 0) {
+ ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
+ ==, NULL);
+ return;
+ }
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
+
+ mutex_enter(&spa->spa_flushed_ms_lock);
+ avl_remove(&spa->spa_metaslabs_by_flushed, msp);
+ mutex_exit(&spa->spa_flushed_ms_lock);
+
+ spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
+ spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp));
+}
+
+uint64_t
+metaslab_unflushed_changes_memused(metaslab_t *ms)
+{
+ return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
+ range_tree_numsegs(ms->ms_unflushed_frees)) *
+ sizeof (range_seg_t));
+}
+
void
metaslab_fini(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
+ spa_t *spa = vd->vdev_spa;
+
+ metaslab_fini_flush_data(msp);
metaslab_group_remove(mg, msp);
@@ -1940,13 +2125,22 @@ metaslab_fini(metaslab_t *msp)
-metaslab_allocated_space(msp), 0, -msp->ms_size);
space_map_close(msp->ms_sm);
+ msp->ms_sm = NULL;
metaslab_unload(msp);
-
range_tree_destroy(msp->ms_allocatable);
range_tree_destroy(msp->ms_freeing);
range_tree_destroy(msp->ms_freed);
+ ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
+ metaslab_unflushed_changes_memused(msp));
+ spa->spa_unflushed_stats.sus_memused -=
+ metaslab_unflushed_changes_memused(msp);
+ range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
+ range_tree_destroy(msp->ms_unflushed_allocs);
+ range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
+ range_tree_destroy(msp->ms_unflushed_frees);
+
for (int t = 0; t < TXG_SIZE; t++) {
range_tree_destroy(msp->ms_allocating[t]);
}
@@ -1966,6 +2160,7 @@ metaslab_fini(metaslab_t *msp)
mutex_exit(&msp->ms_lock);
cv_destroy(&msp->ms_load_cv);
+ cv_destroy(&msp->ms_flush_cv);
mutex_destroy(&msp->ms_lock);
mutex_destroy(&msp->ms_sync_lock);
ASSERT3U(msp->ms_allocator, ==, -1);
@@ -2207,9 +2402,9 @@ metaslab_weight_from_range_tree(metaslab_t *msp)
}
/*
- * Calculate the weight based on the on-disk histogram. This should only
- * be called after a sync pass has completely finished since the on-disk
- * information is updated in metaslab_sync().
+ * Calculate the weight based on the on-disk histogram. Should be applied
+ * only to unloaded metaslabs (i.e no incoming allocations) in-order to
+ * give results consistent with the on-disk state
*/
static uint64_t
metaslab_weight_from_spacemap(metaslab_t *msp)
@@ -2283,7 +2478,6 @@ metaslab_segment_weight(metaslab_t *msp)
}
WEIGHT_SET_ACTIVE(weight, 0);
ASSERT(!WEIGHT_IS_SPACEBASED(weight));
-
return (weight);
}
@@ -2651,18 +2845,19 @@ metaslab_group_preload(metaslab_group_t *mg)
}
/*
- * Determine if the space map's on-disk footprint is past our tolerance
- * for inefficiency. We would like to use the following criteria to make
- * our decision:
+ * Determine if the space map's on-disk footprint is past our tolerance for
+ * inefficiency. We would like to use the following criteria to make our
+ * decision:
*
- * 1. The size of the space map object should not dramatically increase as a
- * result of writing out the free space range tree.
+ * 1. Do not condense if the size of the space map object would dramatically
+ * increase as a result of writing out the free space range tree.
*
- * 2. The minimal on-disk space map representation is zfs_condense_pct/100
- * times the size than the free space range tree representation
- * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
+ * 2. Condense if the on on-disk space map representation is at least
+ * zfs_condense_pct/100 times the size of the optimal representation
+ * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
*
- * 3. The on-disk size of the space map should actually decrease.
+ * 3. Do not condense if the on-disk size of the space map does not actually
+ * decrease.
*
* Unfortunately, we cannot compute the on-disk size of the space map in this
* context because we cannot accurately compute the effects of compression, etc.
@@ -2676,27 +2871,11 @@ metaslab_should_condense(metaslab_t *msp)
space_map_t *sm = msp->ms_sm;
vdev_t *vd = msp->ms_group->mg_vd;
uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
- uint64_t current_txg = spa_syncing_txg(vd->vdev_spa);
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
-
- /*
- * Allocations and frees in early passes are generally more space
- * efficient (in terms of blocks described in space map entries)
- * than the ones in later passes (e.g. we don't compress after
- * sync pass 5) and condensing a metaslab multiple times in a txg
- * could degrade performance.
- *
- * Thus we prefer condensing each metaslab at most once every txg at
- * the earliest sync pass possible. If a metaslab is eligible for
- * condensing again after being considered for condensing within the
- * same txg, it will hopefully be dirty in the next txg where it will
- * be condensed at an earlier pass.
- */
- if (msp->ms_condense_checked_txg == current_txg)
- return (B_FALSE);
- msp->ms_condense_checked_txg = current_txg;
+ ASSERT(sm != NULL);
+ ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
/*
* We always condense metaslabs that are empty and metaslabs for
@@ -2706,97 +2885,343 @@ metaslab_should_condense(metaslab_t *msp)
msp->ms_condense_wanted)
return (B_TRUE);
- uint64_t object_size = space_map_length(msp->ms_sm);
+ uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
+ uint64_t object_size = space_map_length(sm);
uint64_t optimal_size = space_map_estimate_optimal_size(sm,
msp->ms_allocatable, SM_NO_VDEVID);
- dmu_object_info_t doi;
- dmu_object_info_from_db(sm->sm_dbuf, &doi);
- uint64_t record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
-
return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
object_size > zfs_metaslab_condense_block_threshold * record_size);
}
/*
* Condense the on-disk space map representation to its minimized form.
- * The minimized form consists of a small number of allocations followed by
- * the entries of the free range tree.
+ * The minimized form consists of a small number of allocations followed
+ * by the entries of the free range tree (ms_allocatable). The condensed
+ * spacemap contains all the entries of previous TXGs (including those in
+ * the pool-wide log spacemaps; thus this is effectively a superset of
+ * metaslab_flush()), but this TXG's entries still need to be written.
*/
static void
-metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
+metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
{
range_tree_t *condense_tree;
space_map_t *sm = msp->ms_sm;
+ uint64_t txg = dmu_tx_get_txg(tx);
+ spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
+ ASSERT(msp->ms_sm != NULL);
+ /*
+ * In order to condense the space map, we need to change it so it
+ * only describes which segments are currently allocated and free.
+ *
+ * All the current free space resides in the ms_allocatable, all
+ * the ms_defer trees, and all the ms_allocating trees. We ignore
+ * ms_freed because it is empty because we're in sync pass 1. We
+ * ignore ms_freeing because these changes are not yet reflected
+ * in the spacemap (they will be written later this txg).
+ *
+ * So to truncate the space map to represent all the entries of
+ * previous TXGs we do the following:
+ *
+ * 1] We create a range tree (condense tree) that is 100% allocated.
+ * 2] We remove from it all segments found in the ms_defer trees
+ * as those segments are marked as free in the original space
+ * map. We do the same with the ms_allocating trees for the same
+ * reason. Removing these segments should be a relatively
+ * inexpensive operation since we expect these trees to have a
+ * small number of nodes.
+ * 3] We vacate any unflushed allocs as they should already exist
+ * in the condense tree. Then we vacate any unflushed frees as
+ * they should already be part of ms_allocatable.
+ * 4] At this point, we would ideally like to remove all segments
+ * in the ms_allocatable tree from the condense tree. This way
+ * we would write all the entries of the condense tree as the
+ * condensed space map, which would only contain allocated
+ * segments with everything else assumed to be freed.
+ *
+ * Doing so can be prohibitively expensive as ms_allocatable can
+ * be large, and therefore computationally expensive to subtract
+ * from the condense_tree. Instead we first sync out the
+ * condense_tree and then the ms_allocatable, in the condensed
+ * space map. While this is not optimal, it is typically close to
+ * optimal and more importantly much cheaper to compute.
+ *
+ * 5] Finally, as both of the unflushed trees were written to our
+ * new and condensed metaslab space map, we basically flushed
+ * all the unflushed changes to disk, thus we call
+ * metaslab_flush_update().
+ */
+ ASSERT3U(spa_sync_pass(spa), ==, 1);
+ ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
"spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
- msp->ms_group->mg_vd->vdev_spa->spa_name,
- space_map_length(msp->ms_sm),
+ spa->spa_name, space_map_length(msp->ms_sm),
avl_numnodes(&msp->ms_allocatable->rt_root),
msp->ms_condense_wanted ? "TRUE" : "FALSE");
msp->ms_condense_wanted = B_FALSE;
- /*
- * Create an range tree that is 100% allocated. We remove segments
- * that have been freed in this txg, any deferred frees that exist,
- * and any allocation in the future. Removing segments should be
- * a relatively inexpensive operation since we expect these trees to
- * have a small number of nodes.
- */
condense_tree = range_tree_create(NULL, NULL);
range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
- range_tree_walk(msp->ms_freeing, range_tree_remove, condense_tree);
- range_tree_walk(msp->ms_freed, range_tree_remove, condense_tree);
-
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_remove, condense_tree);
}
- for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
+ for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
range_tree_remove, condense_tree);
}
+ ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
+ metaslab_unflushed_changes_memused(msp));
+ spa->spa_unflushed_stats.sus_memused -=
+ metaslab_unflushed_changes_memused(msp);
+ range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
+ range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
+
/*
- * We're about to drop the metaslab's lock thus allowing
- * other consumers to change it's content. Set the
- * metaslab's ms_condensing flag to ensure that
- * allocations on this metaslab do not occur while we're
- * in the middle of committing it to disk. This is only critical
- * for ms_allocatable as all other range trees use per txg
+ * We're about to drop the metaslab's lock thus allowing other
+ * consumers to change it's content. Set the metaslab's ms_condensing
+ * flag to ensure that allocations on this metaslab do not occur
+ * while we're in the middle of committing it to disk. This is only
+ * critical for ms_allocatable as all other range trees use per TXG
* views of their content.
*/
msp->ms_condensing = B_TRUE;
mutex_exit(&msp->ms_lock);
- space_map_truncate(sm, zfs_metaslab_sm_blksz, tx);
+ uint64_t object = space_map_object(msp->ms_sm);
+ space_map_truncate(sm,
+ spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
+ zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
+
+ /*
+ * space_map_truncate() may have reallocated the spacemap object.
+ * If so, update the vdev_ms_array.
+ */
+ if (space_map_object(msp->ms_sm) != object) {
+ object = space_map_object(msp->ms_sm);
+ dmu_write(spa->spa_meta_objset,
+ msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
+ msp->ms_id, sizeof (uint64_t), &object, tx);
+ }
/*
- * While we would ideally like to create a space map representation
- * that consists only of allocation records, doing so can be
- * prohibitively expensive because the in-core free tree can be
- * large, and therefore computationally expensive to subtract
- * from the condense_tree. Instead we sync out two trees, a cheap
- * allocation only tree followed by the in-core free tree. While not
- * optimal, this is typically close to optimal, and much cheaper to
- * compute.
+ * Note:
+ * When the log space map feature is enabled, each space map will
+ * always have ALLOCS followed by FREES for each sync pass. This is
+ * typically true even when the log space map feature is disabled,
+ * except from the case where a metaslab goes through metaslab_sync()
+ * and gets condensed. In that case the metaslab's space map will have
+ * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
+ * followed by FREES (due to space_map_write() in metaslab_sync()) for
+ * sync pass 1.
*/
space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx);
+ space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
+
range_tree_vacate(condense_tree, NULL, NULL);
range_tree_destroy(condense_tree);
-
- space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
+
msp->ms_condensing = B_FALSE;
+ metaslab_flush_update(msp, tx);
+}
+
+/*
+ * Called when the metaslab has been flushed (its own spacemap now reflects
+ * all the contents of the pool-wide spacemap log). Updates the metaslab's
+ * metadata and any pool-wide related log space map data (e.g. summary,
+ * obsolete logs, etc..) to reflect that.
+ */
+static void
+metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
+{
+ metaslab_group_t *mg = msp->ms_group;
+ spa_t *spa = mg->mg_vd->vdev_spa;
+
+ ASSERT(MUTEX_HELD(&msp->ms_lock));
+
+ ASSERT3U(spa_sync_pass(spa), ==, 1);
+ ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
+ ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
+
+ /*
+ * Just because a metaslab got flushed, that doesn't mean that
+ * it will pass through metaslab_sync_done(). Thus, make sure to
+ * update ms_synced_length here in case it doesn't.
+ */
+ msp->ms_synced_length = space_map_length(msp->ms_sm);
+
+ /*
+ * We may end up here from metaslab_condense() without the
+ * feature being active. In that case this is a no-op.
+ */
+ if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
+ return;
+
+ ASSERT(spa_syncing_log_sm(spa) != NULL);
+ ASSERT(msp->ms_sm != NULL);
+ ASSERT(metaslab_unflushed_txg(msp) != 0);
+ ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
+
+ VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
+
+ /* update metaslab's position in our flushing tree */
+ uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
+ mutex_enter(&spa->spa_flushed_ms_lock);
+ avl_remove(&spa->spa_metaslabs_by_flushed, msp);
+ metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
+ avl_add(&spa->spa_metaslabs_by_flushed, msp);
+ mutex_exit(&spa->spa_flushed_ms_lock);
+
+ /* update metaslab counts of spa_log_sm_t nodes */
+ spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
+ spa_log_sm_increment_current_mscount(spa);
+
+ /* cleanup obsolete logs if any */
+ uint64_t log_blocks_before = spa_log_sm_nblocks(spa);
+ spa_cleanup_old_sm_logs(spa, tx);
+ uint64_t log_blocks_after = spa_log_sm_nblocks(spa);
+ VERIFY3U(log_blocks_after, <=, log_blocks_before);
+
+ /* update log space map summary */
+ uint64_t blocks_gone = log_blocks_before - log_blocks_after;
+ spa_log_summary_add_flushed_metaslab(spa);
+ spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg);
+ spa_log_summary_decrement_blkcount(spa, blocks_gone);
+}
+
+boolean_t
+metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
+{
+ spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
+
+ ASSERT(MUTEX_HELD(&msp->ms_lock));
+ ASSERT3U(spa_sync_pass(spa), ==, 1);
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
+
+ ASSERT(msp->ms_sm != NULL);
+ ASSERT(metaslab_unflushed_txg(msp) != 0);
+ ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
+
+ /*
+ * There is nothing wrong with flushing the same metaslab twice, as
+ * this codepath should work on that case. However, the current
+ * flushing scheme makes sure to avoid this situation as we would be
+ * making all these calls without having anything meaningful to write
+ * to disk. We assert this behavior here.
+ */
+ ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
+
+ /*
+ * We can not flush while loading, because then we would
+ * not load the ms_unflushed_{allocs,frees}.
+ */
+ if (msp->ms_loading)
+ return (B_FALSE);
+
+ metaslab_verify_space(msp, dmu_tx_get_txg(tx));
+ metaslab_verify_weight_and_frag(msp);
+
+ /*
+ * Metaslab condensing is effectively flushing. Therefore if the
+ * metaslab can be condensed we can just condense it instead of
+ * flushing it.
+ *
+ * Note that metaslab_condense() does call metaslab_flush_update()
+ * so we can just return immediately after condensing. We also
+ * don't need to care about setting ms_flushing or broadcasting
+ * ms_flush_cv, even if we temporarily drop the ms_lock in
+ * metaslab_condense(), as the metaslab is already loaded.
+ */
+ if (msp->ms_loaded && metaslab_should_condense(msp)) {
+ metaslab_group_t *mg = msp->ms_group;
+
+ /*
+ * For all histogram operations below refer to the
+ * comments of metaslab_sync() where we follow a
+ * similar procedure.
+ */
+ metaslab_group_histogram_verify(mg);
+ metaslab_class_histogram_verify(mg->mg_class);
+ metaslab_group_histogram_remove(mg, msp);
+
+ metaslab_condense(msp, tx);
+
+ space_map_histogram_clear(msp->ms_sm);
+ space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
+ ASSERT(range_tree_is_empty(msp->ms_freed));
+ for (int t = 0; t < TXG_DEFER_SIZE; t++) {
+ space_map_histogram_add(msp->ms_sm,
+ msp->ms_defer[t], tx);
+ }
+ metaslab_aux_histograms_update(msp);
+
+ metaslab_group_histogram_add(mg, msp);
+ metaslab_group_histogram_verify(mg);
+ metaslab_class_histogram_verify(mg->mg_class);
+
+ metaslab_verify_space(msp, dmu_tx_get_txg(tx));
+
+ /*
+ * Since we recreated the histogram (and potentially
+ * the ms_sm too while condensing) ensure that the
+ * weight is updated too because we are not guaranteed
+ * that this metaslab is dirty and will go through
+ * metaslab_sync_done().
+ */
+ metaslab_recalculate_weight_and_sort(msp);
+ return (B_TRUE);
+ }
+
+ msp->ms_flushing = B_TRUE;
+ uint64_t sm_len_before = space_map_length(msp->ms_sm);
+
+ mutex_exit(&msp->ms_lock);
+ space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
+ SM_NO_VDEVID, tx);
+ space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
+ SM_NO_VDEVID, tx);
+ mutex_enter(&msp->ms_lock);
+
+ uint64_t sm_len_after = space_map_length(msp->ms_sm);
+ if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
+ zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
+ "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
+ "appended %llu bytes", dmu_tx_get_txg(tx), spa_name(spa),
+ msp->ms_group->mg_vd->vdev_id, msp->ms_id,
+ range_tree_space(msp->ms_unflushed_allocs),
+ range_tree_space(msp->ms_unflushed_frees),
+ (sm_len_after - sm_len_before));
+ }
+
+ ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
+ metaslab_unflushed_changes_memused(msp));
+ spa->spa_unflushed_stats.sus_memused -=
+ metaslab_unflushed_changes_memused(msp);
+ range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
+ range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
+
+ metaslab_verify_space(msp, dmu_tx_get_txg(tx));
+ metaslab_verify_weight_and_frag(msp);
+
+ metaslab_flush_update(msp, tx);
+
+ metaslab_verify_space(msp, dmu_tx_get_txg(tx));
+ metaslab_verify_weight_and_frag(msp);
+
+ msp->ms_flushing = B_FALSE;
+ cv_broadcast(&msp->ms_flush_cv);
+ return (B_TRUE);
}
/*
@@ -2811,7 +3236,6 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
objset_t *mos = spa_meta_objset(spa);
range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
dmu_tx_t *tx;
- uint64_t object = space_map_object(msp->ms_sm);
ASSERT(!vd->vdev_ishole);
@@ -2858,25 +3282,53 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
*/
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
- if (msp->ms_sm == NULL) {
- uint64_t new_object;
+ /*
+ * Generate a log space map if one doesn't exist already.
+ */
+ spa_generate_syncing_log_sm(spa, tx);
- new_object = space_map_alloc(mos, zfs_metaslab_sm_blksz, tx);
+ if (msp->ms_sm == NULL) {
+ uint64_t new_object = space_map_alloc(mos,
+ spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
+ zfs_metaslab_sm_blksz_with_log :
+ zfs_metaslab_sm_blksz_no_log, tx);
VERIFY3U(new_object, !=, 0);
+ dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
+ msp->ms_id, sizeof (uint64_t), &new_object, tx);
+
VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
msp->ms_start, msp->ms_size, vd->vdev_ashift));
-
ASSERT(msp->ms_sm != NULL);
+
+ ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
+ ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
ASSERT0(metaslab_allocated_space(msp));
}
+ if (metaslab_unflushed_txg(msp) == 0 &&
+ spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
+ ASSERT(spa_syncing_log_sm(spa) != NULL);
+
+ metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
+ spa_log_sm_increment_current_mscount(spa);
+ spa_log_summary_add_flushed_metaslab(spa);
+
+ ASSERT(msp->ms_sm != NULL);
+ mutex_enter(&spa->spa_flushed_ms_lock);
+ avl_add(&spa->spa_metaslabs_by_flushed, msp);
+ mutex_exit(&spa->spa_flushed_ms_lock);
+
+ ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
+ ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
+ }
+
if (!range_tree_is_empty(msp->ms_checkpointing) &&
vd->vdev_checkpoint_sm == NULL) {
ASSERT(spa_has_checkpoint(spa));
uint64_t new_object = space_map_alloc(mos,
- vdev_standard_sm_blksz, tx);
+ zfs_vdev_standard_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
@@ -2905,10 +3357,39 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
- if (msp->ms_loaded && metaslab_should_condense(msp)) {
- metaslab_condense(msp, txg, tx);
+ if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
+ metaslab_should_condense(msp))
+ metaslab_condense(msp, tx);
+
+ /*
+ * We'll be going to disk to sync our space accounting, thus we
+ * drop the ms_lock during that time so allocations coming from
+ * open-context (ZIL) for future TXGs do not block.
+ */
+ mutex_exit(&msp->ms_lock);
+ space_map_t *log_sm = spa_syncing_log_sm(spa);
+ if (log_sm != NULL) {
+ ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
+
+ space_map_write(log_sm, alloctree, SM_ALLOC,
+ vd->vdev_id, tx);
+ space_map_write(log_sm, msp->ms_freeing, SM_FREE,
+ vd->vdev_id, tx);
+ mutex_enter(&msp->ms_lock);
+
+ ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
+ metaslab_unflushed_changes_memused(msp));
+ spa->spa_unflushed_stats.sus_memused -=
+ metaslab_unflushed_changes_memused(msp);
+ range_tree_remove_xor_add(alloctree,
+ msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
+ range_tree_remove_xor_add(msp->ms_freeing,
+ msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
+ spa->spa_unflushed_stats.sus_memused +=
+ metaslab_unflushed_changes_memused(msp);
} else {
- mutex_exit(&msp->ms_lock);
+ ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
+
space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
@@ -2928,7 +3409,8 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
/*
* Since we are doing writes to disk and the ms_checkpointing
* tree won't be changing during that time, we drop the
- * ms_lock while writing to the checkpoint space map.
+ * ms_lock while writing to the checkpoint space map, for the
+ * same reason mentioned above.
*/
mutex_exit(&msp->ms_lock);
space_map_write(vd->vdev_checkpoint_sm,
@@ -2996,6 +3478,10 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
* and instead will just swap the pointers for freeing and freed.
* We can safely do this since the freed_tree is guaranteed to be
* empty on the initial pass.
+ *
+ * Keep in mind that even if we are currently using a log spacemap
+ * we want current frees to end up in the ms_allocatable (but not
+ * get appended to the ms_sm) so their ranges can be reused as usual.
*/
if (spa_sync_pass(spa) == 1) {
range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
@@ -3015,11 +3501,15 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
mutex_exit(&msp->ms_lock);
- if (object != space_map_object(msp->ms_sm)) {
- object = space_map_object(msp->ms_sm);
- dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
- msp->ms_id, sizeof (uint64_t), &object, tx);
- }
+ /*
+ * Verify that the space map object ID has been recorded in the
+ * vdev_ms_array.
+ */
+ uint64_t object;
+ VERIFY0(dmu_read(mos, vd->vdev_ms_array,
+ msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
+ VERIFY3U(object, ==, space_map_object(msp->ms_sm));
+
mutex_exit(&msp->ms_sync_lock);
dmu_tx_commit(tx);
}
@@ -3084,14 +3574,18 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
msp->ms_freed = range_tree_create(NULL, NULL);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
- ASSERT(msp->ms_defer[t] == NULL);
-
+ ASSERT3P(msp->ms_defer[t], ==, NULL);
msp->ms_defer[t] = range_tree_create(NULL, NULL);
}
ASSERT3P(msp->ms_checkpointing, ==, NULL);
msp->ms_checkpointing = range_tree_create(NULL, NULL);
+ ASSERT3P(msp->ms_unflushed_allocs, ==, NULL);
+ msp->ms_unflushed_allocs = range_tree_create(NULL, NULL);
+ ASSERT3P(msp->ms_unflushed_frees, ==, NULL);
+ msp->ms_unflushed_frees = range_tree_create(NULL, NULL);
+
metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
}
ASSERT0(range_tree_space(msp->ms_freeing));
@@ -3108,21 +3602,28 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
defer_delta = 0;
alloc_delta = msp->ms_allocated_this_txg -
range_tree_space(msp->ms_freed);
+
if (defer_allowed) {
defer_delta = range_tree_space(msp->ms_freed) -
range_tree_space(*defer_tree);
} else {
defer_delta -= range_tree_space(*defer_tree);
}
-
metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
defer_delta, 0);
- /*
- * If there's a metaslab_load() in progress, wait for it to complete
- * so that we have a consistent view of the in-core space map.
- */
- metaslab_load_wait(msp);
+ if (spa_syncing_log_sm(spa) == NULL) {
+ /*
+ * If there's a metaslab_load() in progress and we don't have
+ * a log space map, it means that we probably wrote to the
+ * metaslab's space map. If this is the case, we need to
+ * make sure that we wait for the load to complete so that we
+ * have a consistent view at the in-core side of the metaslab.
+ */
+ metaslab_load_wait(msp);
+ } else {
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
+ }
/*
* When auto-trimming is enabled, free ranges which are added to
@@ -3451,6 +3952,7 @@ metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
range_tree_t *rt = msp->ms_allocatable;
metaslab_class_t *mc = msp->ms_group->mg_class;
+ ASSERT(MUTEX_HELD(&msp->ms_lock));
VERIFY(!msp->ms_condensing);
VERIFY0(msp->ms_disabled);
@@ -4864,12 +5366,23 @@ metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
offset, size);
}
- range_tree_verify_not_present(msp->ms_trim, offset, size);
+ /*
+ * Check all segments that currently exist in the freeing pipeline.
+ *
+ * It would intuitively make sense to also check the current allocating
+ * tree since metaslab_unalloc_dva() exists for extents that are
+ * allocated and freed in the same sync pass withing the same txg.
+ * Unfortunately there are places (e.g. the ZIL) where we allocate a
+ * segment but then we free part of it within the same txg
+ * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
+ * current allocating tree.
+ */
range_tree_verify_not_present(msp->ms_freeing, offset, size);
range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
range_tree_verify_not_present(msp->ms_freed, offset, size);
for (int j = 0; j < TXG_DEFER_SIZE; j++)
range_tree_verify_not_present(msp->ms_defer[j], offset, size);
+ range_tree_verify_not_present(msp->ms_trim, offset, size);
mutex_exit(&msp->ms_lock);
}
@@ -4979,6 +5492,57 @@ metaslab_enable(metaslab_t *msp, boolean_t sync)
mutex_exit(&mg->mg_ms_disabled_lock);
}
+static void
+metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
+{
+ vdev_t *vd = ms->ms_group->mg_vd;
+ spa_t *spa = vd->vdev_spa;
+ objset_t *mos = spa_meta_objset(spa);
+
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
+
+ metaslab_unflushed_phys_t entry = {
+ .msp_unflushed_txg = metaslab_unflushed_txg(ms),
+ };
+ uint64_t entry_size = sizeof (entry);
+ uint64_t entry_offset = ms->ms_id * entry_size;
+
+ uint64_t object = 0;
+ int err = zap_lookup(mos, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
+ &object);
+ if (err == ENOENT) {
+ object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
+ SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
+ VERIFY0(zap_add(mos, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
+ &object, tx));
+ } else {
+ VERIFY0(err);
+ }
+
+ dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
+ &entry, tx);
+}
+
+void
+metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
+{
+ spa_t *spa = ms->ms_group->mg_vd->vdev_spa;
+
+ if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
+ return;
+
+ ms->ms_unflushed_txg = txg;
+ metaslab_update_ondisk_flush_data(ms, tx);
+}
+
+uint64_t
+metaslab_unflushed_txg(metaslab_t *ms)
+{
+ return (ms->ms_unflushed_txg);
+}
+
#if defined(_KERNEL)
/* BEGIN CSTYLED */
module_param(metaslab_aliquot, ulong, 0644);
diff --git a/module/zfs/range_tree.c b/module/zfs/range_tree.c
index 391533b3f..5919236d9 100644
--- a/module/zfs/range_tree.c
+++ b/module/zfs/range_tree.c
@@ -23,7 +23,7 @@
* Use is subject to license terms.
*/
/*
- * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
+ * Copyright (c) 2013, 2019 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@@ -578,10 +578,10 @@ range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
void
range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg)
{
- range_seg_t *rs;
-
- for (rs = avl_first(&rt->rt_root); rs; rs = AVL_NEXT(&rt->rt_root, rs))
+ for (range_seg_t *rs = avl_first(&rt->rt_root); rs;
+ rs = AVL_NEXT(&rt->rt_root, rs)) {
func(arg, rs->rs_start, rs->rs_end - rs->rs_start);
+ }
}
range_seg_t *
@@ -596,6 +596,12 @@ range_tree_space(range_tree_t *rt)
return (rt->rt_space);
}
+uint64_t
+range_tree_numsegs(range_tree_t *rt)
+{
+ return ((rt == NULL) ? 0 : avl_numnodes(&rt->rt_root));
+}
+
boolean_t
range_tree_is_empty(range_tree_t *rt)
{
@@ -667,3 +673,73 @@ range_tree_span(range_tree_t *rt)
{
return (range_tree_max(rt) - range_tree_min(rt));
}
+
+/*
+ * Remove any overlapping ranges between the given segment [start, end)
+ * from removefrom. Add non-overlapping leftovers to addto.
+ */
+void
+range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
+ range_tree_t *removefrom, range_tree_t *addto)
+{
+ avl_index_t where;
+ range_seg_t starting_rs = {
+ .rs_start = start,
+ .rs_end = start + 1
+ };
+
+ range_seg_t *curr = avl_find(&removefrom->rt_root,
+ &starting_rs, &where);
+
+ if (curr == NULL)
+ curr = avl_nearest(&removefrom->rt_root, where, AVL_AFTER);
+
+ range_seg_t *next;
+ for (; curr != NULL; curr = next) {
+ next = AVL_NEXT(&removefrom->rt_root, curr);
+
+ if (start == end)
+ return;
+ VERIFY3U(start, <, end);
+
+ /* there is no overlap */
+ if (end <= curr->rs_start) {
+ range_tree_add(addto, start, end - start);
+ return;
+ }
+
+ uint64_t overlap_start = MAX(curr->rs_start, start);
+ uint64_t overlap_end = MIN(curr->rs_end, end);
+ uint64_t overlap_size = overlap_end - overlap_start;
+ ASSERT3S(overlap_size, >, 0);
+ range_tree_remove(removefrom, overlap_start, overlap_size);
+
+ if (start < overlap_start)
+ range_tree_add(addto, start, overlap_start - start);
+
+ start = overlap_end;
+ }
+ VERIFY3P(curr, ==, NULL);
+
+ if (start != end) {
+ VERIFY3U(start, <, end);
+ range_tree_add(addto, start, end - start);
+ } else {
+ VERIFY3U(start, ==, end);
+ }
+}
+
+/*
+ * For each entry in rt, if it exists in removefrom, remove it
+ * from removefrom. Otherwise, add it to addto.
+ */
+void
+range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom,
+ range_tree_t *addto)
+{
+ for (range_seg_t *rs = avl_first(&rt->rt_root); rs;
+ rs = AVL_NEXT(&rt->rt_root, rs)) {
+ range_tree_remove_xor_add_segment(rs->rs_start, rs->rs_end,
+ removefrom, addto);
+ }
+}
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index 23cf17a58..3ad8fc6e4 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -1420,20 +1420,89 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
return (0);
}
+static boolean_t
+spa_should_flush_logs_on_unload(spa_t *spa)
+{
+ if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
+ return (B_FALSE);
+
+ if (!spa_writeable(spa))
+ return (B_FALSE);
+
+ if (!spa->spa_sync_on)
+ return (B_FALSE);
+
+ if (spa_state(spa) != POOL_STATE_EXPORTED)
+ return (B_FALSE);
+
+ if (zfs_keep_log_spacemaps_at_export)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+/*
+ * Opens a transaction that will set the flag that will instruct
+ * spa_sync to attempt to flush all the metaslabs for that txg.
+ */
+static void
+spa_unload_log_sm_flush_all(spa_t *spa)
+{
+ dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
+ VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
+
+ ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
+ spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
+
+ dmu_tx_commit(tx);
+ txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
+}
+
+static void
+spa_unload_log_sm_metadata(spa_t *spa)
+{
+ void *cookie = NULL;
+ spa_log_sm_t *sls;
+ while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
+ &cookie)) != NULL) {
+ VERIFY0(sls->sls_mscount);
+ kmem_free(sls, sizeof (spa_log_sm_t));
+ }
+
+ for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
+ e != NULL; e = list_head(&spa->spa_log_summary)) {
+ VERIFY0(e->lse_mscount);
+ list_remove(&spa->spa_log_summary, e);
+ kmem_free(e, sizeof (log_summary_entry_t));
+ }
+
+ spa->spa_unflushed_stats.sus_nblocks = 0;
+ spa->spa_unflushed_stats.sus_memused = 0;
+ spa->spa_unflushed_stats.sus_blocklimit = 0;
+}
+
/*
* Opposite of spa_load().
*/
static void
spa_unload(spa_t *spa)
{
- int i;
-
ASSERT(MUTEX_HELD(&spa_namespace_lock));
+ ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
spa_import_progress_remove(spa_guid(spa));
spa_load_note(spa, "UNLOADING");
/*
+ * If the log space map feature is enabled and the pool is getting
+ * exported (but not destroyed), we want to spend some time flushing
+ * as many metaslabs as we can in an attempt to destroy log space
+ * maps and save import time.
+ */
+ if (spa_should_flush_logs_on_unload(spa))
+ spa_unload_log_sm_flush_all(spa);
+
+ /*
* Stop async tasks.
*/
spa_async_suspend(spa);
@@ -1454,16 +1523,15 @@ spa_unload(spa_t *spa)
}
/*
- * Even though vdev_free() also calls vdev_metaslab_fini, we need
- * to call it earlier, before we wait for async i/o to complete.
- * This ensures that there is no async metaslab prefetching, by
- * calling taskq_wait(mg_taskq).
+ * This ensures that there is no async metaslab prefetching
+ * while we attempt to unload the spa.
*/
if (spa->spa_root_vdev != NULL) {
- spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
- for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
- vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
- spa_config_exit(spa, SCL_ALL, spa);
+ for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
+ vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
+ if (vc->vdev_mg != NULL)
+ taskq_wait(vc->vdev_mg->mg_taskq);
+ }
}
if (spa->spa_mmp.mmp_thread)
@@ -1517,13 +1585,14 @@ spa_unload(spa_t *spa)
}
ddt_unload(spa);
+ spa_unload_log_sm_metadata(spa);
/*
* Drop and purge level 2 cache
*/
spa_l2cache_drop(spa);
- for (i = 0; i < spa->spa_spares.sav_count; i++)
+ for (int i = 0; i < spa->spa_spares.sav_count; i++)
vdev_free(spa->spa_spares.sav_vdevs[i]);
if (spa->spa_spares.sav_vdevs) {
kmem_free(spa->spa_spares.sav_vdevs,
@@ -1536,7 +1605,7 @@ spa_unload(spa_t *spa)
}
spa->spa_spares.sav_count = 0;
- for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
+ for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
vdev_free(spa->spa_l2cache.sav_vdevs[i]);
}
@@ -3723,6 +3792,13 @@ spa_ld_load_vdev_metadata(spa_t *spa)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
+ error = spa_ld_log_spacemaps(spa);
+ if (error != 0) {
+ spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]",
+ error);
+ return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
+ }
+
/*
* Propagate the leaf DTLs we just loaded all the way up the vdev tree.
*/
@@ -5864,7 +5940,7 @@ spa_reset(char *pool)
int
spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
{
- uint64_t txg, id;
+ uint64_t txg;
int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
@@ -5939,19 +6015,9 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
}
for (int c = 0; c < vd->vdev_children; c++) {
-
- /*
- * Set the vdev id to the first hole, if one exists.
- */
- for (id = 0; id < rvd->vdev_children; id++) {
- if (rvd->vdev_child[id]->vdev_ishole) {
- vdev_free(rvd->vdev_child[id]);
- break;
- }
- }
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
- tvd->vdev_id = id;
+ tvd->vdev_id = rvd->vdev_children;
vdev_add_child(rvd, tvd);
vdev_config_dirty(tvd);
}
@@ -7597,6 +7663,18 @@ spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
if (spa_sync_pass(spa) != 1)
return;
+ /*
+ * Note:
+ * If the log space map feature is active, we stop deferring
+ * frees to the next TXG and therefore running this function
+ * would be considered a no-op as spa_deferred_bpobj should
+ * not have any entries.
+ *
+ * That said we run this function anyway (instead of returning
+ * immediately) for the edge-case scenario where we just
+ * activated the log space map feature in this TXG but we have
+ * deferred frees from the previous TXG.
+ */
zio_t *zio = zio_root(spa, NULL, NULL, 0);
VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
spa_free_sync_cb, zio, tx), ==, 0);
@@ -8187,7 +8265,14 @@ spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
spa_errlog_sync(spa, txg);
dsl_pool_sync(dp, txg);
- if (pass < zfs_sync_pass_deferred_free) {
+ if (pass < zfs_sync_pass_deferred_free ||
+ spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
+ /*
+ * If the log space map feature is active we don't
+ * care about deferred frees and the deferred bpobj
+ * as the log space map should effectively have the
+ * same results (i.e. appending only to one object).
+ */
spa_sync_frees(spa, free_bpl, tx);
} else {
/*
@@ -8204,6 +8289,8 @@ spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
svr_sync(spa, tx);
spa_sync_upgrades(spa, tx);
+ spa_flush_metaslabs(spa, tx);
+
vdev_t *vd = NULL;
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
!= NULL)
@@ -8453,6 +8540,7 @@ spa_sync(spa_t *spa, uint64_t txg)
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
!= NULL)
vdev_sync_done(vd, txg);
+ spa_sync_close_syncing_log_sm(spa);
spa_update_dspace(spa);
@@ -8639,6 +8727,21 @@ spa_has_active_shared_spare(spa_t *spa)
return (B_FALSE);
}
+uint64_t
+spa_total_metaslabs(spa_t *spa)
+{
+ vdev_t *rvd = spa->spa_root_vdev;
+
+ uint64_t m = 0;
+ for (uint64_t c = 0; c < rvd->vdev_children; c++) {
+ vdev_t *vd = rvd->vdev_child[c];
+ if (!vdev_is_concrete(vd))
+ continue;
+ m += vd->vdev_ms_count;
+ }
+ return (m);
+}
+
sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
diff --git a/module/zfs/spa_log_spacemap.c b/module/zfs/spa_log_spacemap.c
new file mode 100644
index 000000000..5733b6c13
--- /dev/null
+++ b/module/zfs/spa_log_spacemap.c
@@ -0,0 +1,1308 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2018, 2019 by Delphix. All rights reserved.
+ */
+
+#include <sys/dmu_objset.h>
+#include <sys/metaslab.h>
+#include <sys/metaslab_impl.h>
+#include <sys/spa.h>
+#include <sys/spa_impl.h>
+#include <sys/spa_log_spacemap.h>
+#include <sys/vdev_impl.h>
+#include <sys/zap.h>
+
+/*
+ * Log Space Maps
+ *
+ * Log space maps are an optimization in ZFS metadata allocations for pools
+ * whose workloads are primarily random-writes. Random-write workloads are also
+ * typically random-free, meaning that they are freeing from locations scattered
+ * throughout the pool. This means that each TXG we will have to append some
+ * FREE records to almost every metaslab. With log space maps, we hold their
+ * changes in memory and log them altogether in one pool-wide space map on-disk
+ * for persistence. As more blocks are accumulated in the log space maps and
+ * more unflushed changes are accounted in memory, we flush a selected group
+ * of metaslabs every TXG to relieve memory pressure and potential overheads
+ * when loading the pool. Flushing a metaslab to disk relieves memory as we
+ * flush any unflushed changes from memory to disk (i.e. the metaslab's space
+ * map) and saves import time by making old log space maps obsolete and
+ * eventually destroying them. [A log space map is said to be obsolete when all
+ * its entries have made it to their corresponding metaslab space maps].
+ *
+ * == On disk data structures used ==
+ *
+ * - The pool has a new feature flag and a new entry in the MOS. The feature
+ * is activated when we create the first log space map and remains active
+ * for the lifetime of the pool. The new entry in the MOS Directory [refer
+ * to DMU_POOL_LOG_SPACEMAP_ZAP] is populated with a ZAP whose key-value
+ * pairs are of the form <key: txg, value: log space map object for that txg>.
+ * This entry is our on-disk reference of the log space maps that exist in
+ * the pool for each TXG and it is used during import to load all the
+ * metaslab unflushed changes in memory. To see how this structure is first
+ * created and later populated refer to spa_generate_syncing_log_sm(). To see
+ * how it is used during import time refer to spa_ld_log_sm_metadata().
+ *
+ * - Each vdev has a new entry in its vdev_top_zap (see field
+ * VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS) which holds the msp_unflushed_txg of
+ * each metaslab in this vdev. This field is the on-disk counterpart of the
+ * in-memory field ms_unflushed_txg which tells us from which TXG and onwards
+ * the metaslab haven't had its changes flushed. During import, we use this
+ * to ignore any entries in the space map log that are for this metaslab but
+ * from a TXG before msp_unflushed_txg. At that point, we also populate its
+ * in-memory counterpart and from there both fields are updated every time
+ * we flush that metaslab.
+ *
+ * - A space map is created every TXG and, during that TXG, it is used to log
+ * all incoming changes (the log space map). When created, the log space map
+ * is referenced in memory by spa_syncing_log_sm and its object ID is inserted
+ * to the space map ZAP mentioned above. The log space map is closed at the
+ * end of the TXG and will be destroyed when it becomes fully obsolete. We
+ * know when a log space map has become obsolete by looking at the oldest
+ * (and smallest) ms_unflushed_txg in the pool. If the value of that is bigger
+ * than the log space map's TXG, then it means that there is no metaslab who
+ * doesn't have the changes from that log and we can therefore destroy it.
+ * [see spa_cleanup_old_sm_logs()].
+ *
+ * == Important in-memory structures ==
+ *
+ * - The per-spa field spa_metaslabs_by_flushed sorts all the metaslabs in
+ * the pool by their ms_unflushed_txg field. It is primarily used for three
+ * reasons. First of all, it is used during flushing where we try to flush
+ * metaslabs in-order from the oldest-flushed to the most recently flushed
+ * every TXG. Secondly, it helps us to lookup the ms_unflushed_txg of the
+ * oldest flushed metaslab to distinguish which log space maps have become
+ * obsolete and which ones are still relevant. Finally it tells us which
+ * metaslabs have unflushed changes in a pool where this feature was just
+ * enabled, as we don't immediately add all of the pool's metaslabs but we
+ * add them over time as they go through metaslab_sync(). The reason that
+ * we do that is to ease these pools into the behavior of the flushing
+ * algorithm (described later on).
+ *
+ * - The per-spa field spa_sm_logs_by_txg can be thought as the in-memory
+ * counterpart of the space map ZAP mentioned above. It's an AVL tree whose
+ * nodes represent the log space maps in the pool. This in-memory
+ * representation of log space maps in the pool sorts the log space maps by
+ * the TXG that they were created (which is also the TXG of their unflushed
+ * changes). It also contains the following extra information for each
+ * space map:
+ * [1] The number of metaslabs that were last flushed on that TXG. This is
+ * important because if that counter is zero and this is the oldest
+ * log then it means that it is also obsolete.
+ * [2] The number of blocks of that space map. This field is used by the
+ * block heuristic of our flushing algorithm (described later on).
+ * It represents how many blocks of metadata changes ZFS had to write
+ * to disk for that TXG.
+ *
+ * - The per-spa field spa_log_summary is a list of entries that summarizes
+ * the metaslab and block counts of all the nodes of the spa_sm_logs_by_txg
+ * AVL tree mentioned above. The reason this exists is that our flushing
+ * algorithm (described later) tries to estimate how many metaslabs to flush
+ * in each TXG by iterating over all the log space maps and looking at their
+ * block counts. Summarizing that information means that don't have to
+ * iterate through each space map, minimizing the runtime overhead of the
+ * flushing algorithm which would be induced in syncing context. In terms of
+ * implementation the log summary is used as a queue:
+ * * we modify or pop entries from its head when we flush metaslabs
+ * * we modify or append entries to its tail when we sync changes.
+ *
+ * - Each metaslab has two new range trees that hold its unflushed changes,
+ * ms_unflushed_allocs and ms_unflushed_frees. These are always disjoint.
+ *
+ * == Flushing algorithm ==
+ *
+ * The decision of how many metaslabs to flush on a give TXG is guided by
+ * two heuristics:
+ *
+ * [1] The memory heuristic -
+ * We keep track of the memory used by the unflushed trees from all the
+ * metaslabs [see sus_memused of spa_unflushed_stats] and we ensure that it
+ * stays below a certain threshold which is determined by an arbitrary hard
+ * limit and an arbitrary percentage of the system's memory [see
+ * spa_log_exceeds_memlimit()]. When we see that the memory usage of the
+ * unflushed changes are passing that threshold, we flush metaslabs, which
+ * empties their unflushed range trees, reducing the memory used.
+ *
+ * [2] The block heuristic -
+ * We try to keep the total number of blocks in the log space maps in check
+ * so the log doesn't grow indefinitely and we don't induce a lot of overhead
+ * when loading the pool. At the same time we don't want to flush a lot of
+ * metaslabs too often as this would defeat the purpose of the log space map.
+ * As a result we set a limit in the amount of blocks that we think it's
+ * acceptable for the log space maps to have and try not to cross it.
+ * [see sus_blocklimit from spa_unflushed_stats].
+ *
+ * In order to stay below the block limit every TXG we have to estimate how
+ * many metaslabs we need to flush based on the current rate of incoming blocks
+ * and our history of log space map blocks. The main idea here is to answer
+ * the question of how many metaslabs do we need to flush in order to get rid
+ * at least an X amount of log space map blocks. We can answer this question
+ * by iterating backwards from the oldest log space map to the newest one
+ * and looking at their metaslab and block counts. At this point the log summary
+ * mentioned above comes handy as it reduces the amount of things that we have
+ * to iterate (even though it may reduce the preciseness of our estimates due
+ * to its aggregation of data). So with that in mind, we project the incoming
+ * rate of the current TXG into the future and attempt to approximate how many
+ * metaslabs would we need to flush from now in order to avoid exceeding our
+ * block limit in different points in the future (granted that we would keep
+ * flushing the same number of metaslabs for every TXG). Then we take the
+ * maximum number from all these estimates to be on the safe side. For the
+ * exact implementation details of algorithm refer to
+ * spa_estimate_metaslabs_to_flush.
+ */
+
+/*
+ * This is used as the block size for the space maps used for the
+ * log space map feature. These space maps benefit from a bigger
+ * block size as we expect to be writing a lot of data to them at
+ * once.
+ */
+unsigned long zfs_log_sm_blksz = 1ULL << 17;
+
+/*
+ * Percentage of the overall system’s memory that ZFS allows to be
+ * used for unflushed changes (e.g. the sum of size of all the nodes
+ * in the unflushed trees).
+ *
+ * Note that this value is calculated over 1000000 for finer granularity
+ * (thus the _ppm suffix; reads as "parts per million"). As an example,
+ * the default of 1000 allows 0.1% of memory to be used.
+ */
+unsigned long zfs_unflushed_max_mem_ppm = 1000;
+
+/*
+ * Specific hard-limit in memory that ZFS allows to be used for
+ * unflushed changes.
+ */
+unsigned long zfs_unflushed_max_mem_amt = 1ULL << 30;
+
+/*
+ * The following tunable determines the number of blocks that can be used for
+ * the log space maps. It is expressed as a percentage of the total number of
+ * metaslabs in the pool (i.e. the default of 400 means that the number of log
+ * blocks is capped at 4 times the number of metaslabs).
+ *
+ * This value exists to tune our flushing algorithm, with higher values
+ * flushing metaslabs less often (doing less I/Os) per TXG versus lower values
+ * flushing metaslabs more aggressively with the upside of saving overheads
+ * when loading the pool. Another factor in this tradeoff is that flushing
+ * less often can potentially lead to better utilization of the metaslab space
+ * map's block size as we accumulate more changes per flush.
+ *
+ * Given that this tunable indirectly controls the flush rate (metaslabs
+ * flushed per txg) and that's why making it a percentage in terms of the
+ * number of metaslabs in the pool makes sense here.
+ *
+ * As a rule of thumb we default this tunable to 400% based on the following:
+ *
+ * 1] Assuming a constant flush rate and a constant incoming rate of log blocks
+ * it is reasonable to expect that the amount of obsolete entries changes
+ * linearly from txg to txg (e.g. the oldest log should have the most
+ * obsolete entries, and the most recent one the least). With this we could
+ * say that, at any given time, about half of the entries in the whole space
+ * map log are obsolete. Thus for every two entries for a metaslab in the
+ * log space map, only one of them is valid and actually makes it to the
+ * metaslab's space map.
+ * [factor of 2]
+ * 2] Each entry in the log space map is guaranteed to be two words while
+ * entries in metaslab space maps are generally single-word.
+ * [an extra factor of 2 - 400% overall]
+ * 3] Even if [1] and [2] are slightly less than 2 each, we haven't taken into
+ * account any consolidation of segments from the log space map to the
+ * unflushed range trees nor their history (e.g. a segment being allocated,
+ * then freed, then allocated again means 3 log space map entries but 0
+ * metaslab space map entries). Depending on the workload, we've seen ~1.8
+ * non-obsolete log space map entries per metaslab entry, for a total of
+ * ~600%. Since most of these estimates though are workload dependent, we
+ * default on 400% to be conservative.
+ *
+ * Thus we could say that even in the worst
+ * case of [1] and [2], the factor should end up being 4.
+ *
+ * That said, regardless of the number of metaslabs in the pool we need to
+ * provide upper and lower bounds for the log block limit.
+ * [see zfs_unflushed_log_block_{min,max}]
+ */
+unsigned long zfs_unflushed_log_block_pct = 400;
+
+/*
+ * If the number of metaslabs is small and our incoming rate is high, we could
+ * get into a situation that we are flushing all our metaslabs every TXG. Thus
+ * we always allow at least this many log blocks.
+ */
+unsigned long zfs_unflushed_log_block_min = 1000;
+
+/*
+ * If the log becomes too big, the import time of the pool can take a hit in
+ * terms of performance. Thus we have a hard limit in the size of the log in
+ * terms of blocks.
+ */
+unsigned long zfs_unflushed_log_block_max = (1ULL << 18);
+
+/*
+ * Max # of rows allowed for the log_summary. The tradeoff here is accuracy and
+ * stability of the flushing algorithm (longer summary) vs its runtime overhead
+ * (smaller summary is faster to traverse).
+ */
+unsigned long zfs_max_logsm_summary_length = 10;
+
+/*
+ * Tunable that sets the lower bound on the metaslabs to flush every TXG.
+ *
+ * Setting this to 0 has no effect since if the pool is idle we won't even be
+ * creating log space maps and therefore we won't be flushing. On the other
+ * hand if the pool has any incoming workload our block heuristic will start
+ * flushing metaslabs anyway.
+ *
+ * The point of this tunable is to be used in extreme cases where we really
+ * want to flush more metaslabs than our adaptable heuristic plans to flush.
+ */
+unsigned long zfs_min_metaslabs_to_flush = 1;
+
+/*
+ * Tunable that specifies how far in the past do we want to look when trying to
+ * estimate the incoming log blocks for the current TXG.
+ *
+ * Setting this too high may not only increase runtime but also minimize the
+ * effect of the incoming rates from the most recent TXGs as we take the
+ * average over all the blocks that we walk
+ * [see spa_estimate_incoming_log_blocks].
+ */
+unsigned long zfs_max_log_walking = 5;
+
+/*
+ * This tunable exists solely for testing purposes. It ensures that the log
+ * spacemaps are not flushed and destroyed during export in order for the
+ * relevant log spacemap import code paths to be tested (effectively simulating
+ * a crash).
+ */
+int zfs_keep_log_spacemaps_at_export = 0;
+
+static uint64_t
+spa_estimate_incoming_log_blocks(spa_t *spa)
+{
+ ASSERT3U(spa_sync_pass(spa), ==, 1);
+ uint64_t steps = 0, sum = 0;
+ for (spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
+ sls != NULL && steps < zfs_max_log_walking;
+ sls = AVL_PREV(&spa->spa_sm_logs_by_txg, sls)) {
+ if (sls->sls_txg == spa_syncing_txg(spa)) {
+ /*
+ * skip the log created in this TXG as this would
+ * make our estimations inaccurate.
+ */
+ continue;
+ }
+ sum += sls->sls_nblocks;
+ steps++;
+ }
+ return ((steps > 0) ? DIV_ROUND_UP(sum, steps) : 0);
+}
+
+uint64_t
+spa_log_sm_blocklimit(spa_t *spa)
+{
+ return (spa->spa_unflushed_stats.sus_blocklimit);
+}
+
+void
+spa_log_sm_set_blocklimit(spa_t *spa)
+{
+ if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
+ ASSERT0(spa_log_sm_blocklimit(spa));
+ return;
+ }
+
+ uint64_t calculated_limit =
+ (spa_total_metaslabs(spa) * zfs_unflushed_log_block_pct) / 100;
+ spa->spa_unflushed_stats.sus_blocklimit = MIN(MAX(calculated_limit,
+ zfs_unflushed_log_block_min), zfs_unflushed_log_block_max);
+}
+
+uint64_t
+spa_log_sm_nblocks(spa_t *spa)
+{
+ return (spa->spa_unflushed_stats.sus_nblocks);
+}
+
+/*
+ * Ensure that the in-memory log space map structures and the summary
+ * have the same block and metaslab counts.
+ */
+static void
+spa_log_summary_verify_counts(spa_t *spa)
+{
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
+
+ if ((zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) == 0)
+ return;
+
+ uint64_t ms_in_avl = avl_numnodes(&spa->spa_metaslabs_by_flushed);
+
+ uint64_t ms_in_summary = 0, blk_in_summary = 0;
+ for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
+ e; e = list_next(&spa->spa_log_summary, e)) {
+ ms_in_summary += e->lse_mscount;
+ blk_in_summary += e->lse_blkcount;
+ }
+
+ uint64_t ms_in_logs = 0, blk_in_logs = 0;
+ for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
+ sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
+ ms_in_logs += sls->sls_mscount;
+ blk_in_logs += sls->sls_nblocks;
+ }
+
+ VERIFY3U(ms_in_logs, ==, ms_in_summary);
+ VERIFY3U(ms_in_logs, ==, ms_in_avl);
+ VERIFY3U(blk_in_logs, ==, blk_in_summary);
+ VERIFY3U(blk_in_logs, ==, spa_log_sm_nblocks(spa));
+}
+
+static boolean_t
+summary_entry_is_full(spa_t *spa, log_summary_entry_t *e)
+{
+ uint64_t blocks_per_row = MAX(1,
+ DIV_ROUND_UP(spa_log_sm_blocklimit(spa),
+ zfs_max_logsm_summary_length));
+ return (blocks_per_row <= e->lse_blkcount);
+}
+
+/*
+ * Update the log summary information to reflect the fact that a metaslab
+ * was flushed or destroyed (e.g due to device removal or pool export/destroy).
+ *
+ * We typically flush the oldest flushed metaslab so the first (and olderst)
+ * entry of the summary is updated. However if that metaslab is getting loaded
+ * we may flush the second oldest one which may be part of an entry later in
+ * the summary. Moreover, if we call into this function from metaslab_fini()
+ * the metaslabs probably won't be ordered by ms_unflushed_txg. Thus we ask
+ * for a txg as an argument so we can locate the appropriate summary entry for
+ * the metaslab.
+ */
+void
+spa_log_summary_decrement_mscount(spa_t *spa, uint64_t txg)
+{
+ /*
+ * We don't track summary data for read-only pools and this function
+ * can be called from metaslab_fini(). In that case return immediately.
+ */
+ if (!spa_writeable(spa))
+ return;
+
+ log_summary_entry_t *target = NULL;
+ for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
+ e != NULL; e = list_next(&spa->spa_log_summary, e)) {
+ if (e->lse_start > txg)
+ break;
+ target = e;
+ }
+
+ if (target == NULL || target->lse_mscount == 0) {
+ /*
+ * We didn't find a summary entry for this metaslab. We must be
+ * at the teardown of a spa_load() attempt that got an error
+ * while reading the log space maps.
+ */
+ VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
+ return;
+ }
+
+ target->lse_mscount--;
+}
+
+/*
+ * Update the log summary information to reflect the fact that we destroyed
+ * old log space maps. Since we can only destroy the oldest log space maps,
+ * we decrement the block count of the oldest summary entry and potentially
+ * destroy it when that count hits 0.
+ *
+ * This function is called after a metaslab is flushed and typically that
+ * metaslab is the oldest flushed, which means that this function will
+ * typically decrement the block count of the first entry of the summary and
+ * potentially free it if the block count gets to zero (its metaslab count
+ * should be zero too at that point).
+ *
+ * There are certain scenarios though that don't work exactly like that so we
+ * need to account for them:
+ *
+ * Scenario [1]: It is possible that after we flushed the oldest flushed
+ * metaslab and we destroyed the oldest log space map, more recent logs had 0
+ * metaslabs pointing to them so we got rid of them too. This can happen due
+ * to metaslabs being destroyed through device removal, or because the oldest
+ * flushed metaslab was loading but we kept flushing more recently flushed
+ * metaslabs due to the memory pressure of unflushed changes. Because of that,
+ * we always iterate from the beginning of the summary and if blocks_gone is
+ * bigger than the block_count of the current entry we free that entry (we
+ * expect its metaslab count to be zero), we decrement blocks_gone and on to
+ * the next entry repeating this procedure until blocks_gone gets decremented
+ * to 0. Doing this also works for the typical case mentioned above.
+ *
+ * Scenario [2]: The oldest flushed metaslab isn't necessarily accounted by
+ * the first (and oldest) entry in the summary. If the first few entries of
+ * the summary were only accounting metaslabs from a device that was just
+ * removed, then the current oldest flushed metaslab could be accounted by an
+ * entry somewhere in the middle of the summary. Moreover flushing that
+ * metaslab will destroy all the log space maps older than its ms_unflushed_txg
+ * because they became obsolete after the removal. Thus, iterating as we did
+ * for scenario [1] works out for this case too.
+ *
+ * Scenario [3]: At times we decide to flush all the metaslabs in the pool
+ * in one TXG (either because we are exporting the pool or because our flushing
+ * heuristics decided to do so). When that happens all the log space maps get
+ * destroyed except the one created for the current TXG which doesn't have
+ * any log blocks yet. As log space maps get destroyed with every metaslab that
+ * we flush, entries in the summary are also destroyed. This brings a weird
+ * corner-case when we flush the last metaslab and the log space map of the
+ * current TXG is in the same summary entry with other log space maps that
+ * are older. When that happens we are eventually left with this one last
+ * summary entry whose blocks are gone (blocks_gone equals the entry's block
+ * count) but its metaslab count is non-zero (because it accounts all the
+ * metaslabs in the pool as they all got flushed). Under this scenario we can't
+ * free this last summary entry as it's referencing all the metaslabs in the
+ * pool and its block count will get incremented at the end of this sync (when
+ * we close the syncing log space map). Thus we just decrement its current
+ * block count and leave it alone. In the case that the pool gets exported,
+ * its metaslab count will be decremented over time as we call metaslab_fini()
+ * for all the metaslabs in the pool and the entry will be freed at
+ * spa_unload_log_sm_metadata().
+ */
+void
+spa_log_summary_decrement_blkcount(spa_t *spa, uint64_t blocks_gone)
+{
+ for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
+ e != NULL; e = list_head(&spa->spa_log_summary)) {
+ if (e->lse_blkcount > blocks_gone) {
+ /*
+ * Assert that we stopped at an entry that is not
+ * obsolete.
+ */
+ ASSERT(e->lse_mscount != 0);
+
+ e->lse_blkcount -= blocks_gone;
+ blocks_gone = 0;
+ break;
+ } else if (e->lse_mscount == 0) {
+ /* remove obsolete entry */
+ blocks_gone -= e->lse_blkcount;
+ list_remove(&spa->spa_log_summary, e);
+ kmem_free(e, sizeof (log_summary_entry_t));
+ } else {
+ /* Verify that this is scenario [3] mentioned above. */
+ VERIFY3U(blocks_gone, ==, e->lse_blkcount);
+
+ /*
+ * Assert that this is scenario [3] further by ensuring
+ * that this is the only entry in the summary.
+ */
+ VERIFY3P(e, ==, list_tail(&spa->spa_log_summary));
+ ASSERT3P(e, ==, list_head(&spa->spa_log_summary));
+
+ blocks_gone = e->lse_blkcount = 0;
+ break;
+ }
+ }
+
+ /*
+ * Ensure that there is no way we are trying to remove more blocks
+ * than the # of blocks in the summary.
+ */
+ ASSERT0(blocks_gone);
+}
+
+void
+spa_log_sm_decrement_mscount(spa_t *spa, uint64_t txg)
+{
+ spa_log_sm_t target = { .sls_txg = txg };
+ spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
+ &target, NULL);
+
+ if (sls == NULL) {
+ /*
+ * We must be at the teardown of a spa_load() attempt that
+ * got an error while reading the log space maps.
+ */
+ VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
+ return;
+ }
+
+ ASSERT(sls->sls_mscount > 0);
+ sls->sls_mscount--;
+}
+
+void
+spa_log_sm_increment_current_mscount(spa_t *spa)
+{
+ spa_log_sm_t *last_sls = avl_last(&spa->spa_sm_logs_by_txg);
+ ASSERT3U(last_sls->sls_txg, ==, spa_syncing_txg(spa));
+ last_sls->sls_mscount++;
+}
+
+static void
+summary_add_data(spa_t *spa, uint64_t txg, uint64_t metaslabs_flushed,
+ uint64_t nblocks)
+{
+ log_summary_entry_t *e = list_tail(&spa->spa_log_summary);
+
+ if (e == NULL || summary_entry_is_full(spa, e)) {
+ e = kmem_zalloc(sizeof (log_summary_entry_t), KM_SLEEP);
+ e->lse_start = txg;
+ list_insert_tail(&spa->spa_log_summary, e);
+ }
+
+ ASSERT3U(e->lse_start, <=, txg);
+ e->lse_mscount += metaslabs_flushed;
+ e->lse_blkcount += nblocks;
+}
+
+static void
+spa_log_summary_add_incoming_blocks(spa_t *spa, uint64_t nblocks)
+{
+ summary_add_data(spa, spa_syncing_txg(spa), 0, nblocks);
+}
+
+void
+spa_log_summary_add_flushed_metaslab(spa_t *spa)
+{
+ summary_add_data(spa, spa_syncing_txg(spa), 1, 0);
+}
+
+/*
+ * This function attempts to estimate how many metaslabs should
+ * we flush to satisfy our block heuristic for the log spacemap
+ * for the upcoming TXGs.
+ *
+ * Specifically, it first tries to estimate the number of incoming
+ * blocks in this TXG. Then by projecting that incoming rate to
+ * future TXGs and using the log summary, it figures out how many
+ * flushes we would need to do for future TXGs individually to
+ * stay below our block limit and returns the maximum number of
+ * flushes from those estimates.
+ */
+static uint64_t
+spa_estimate_metaslabs_to_flush(spa_t *spa)
+{
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
+ ASSERT3U(spa_sync_pass(spa), ==, 1);
+ ASSERT(spa_log_sm_blocklimit(spa) != 0);
+
+ /*
+ * This variable contains the incoming rate that will be projected
+ * and used for our flushing estimates in the future.
+ */
+ uint64_t incoming = spa_estimate_incoming_log_blocks(spa);
+
+ /*
+ * At any point in time this variable tells us how many
+ * TXGs in the future we are so we can make our estimations.
+ */
+ uint64_t txgs_in_future = 1;
+
+ /*
+ * This variable tells us how much room do we have until we hit
+ * our limit. When it goes negative, it means that we've exceeded
+ * our limit and we need to flush.
+ *
+ * Note that since we start at the first TXG in the future (i.e.
+ * txgs_in_future starts from 1) we already decrement this
+ * variable by the incoming rate.
+ */
+ int64_t available_blocks =
+ spa_log_sm_blocklimit(spa) - spa_log_sm_nblocks(spa) - incoming;
+
+ /*
+ * This variable tells us the total number of flushes needed to
+ * keep the log size within the limit when we reach txgs_in_future.
+ */
+ uint64_t total_flushes = 0;
+
+ /* Holds the current maximum of our estimates so far. */
+ uint64_t max_flushes_pertxg =
+ MIN(avl_numnodes(&spa->spa_metaslabs_by_flushed),
+ zfs_min_metaslabs_to_flush);
+
+ /*
+ * For our estimations we only look as far in the future
+ * as the summary allows us.
+ */
+ for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
+ e; e = list_next(&spa->spa_log_summary, e)) {
+
+ /*
+ * If there is still room before we exceed our limit
+ * then keep skipping TXGs accumulating more blocks
+ * based on the incoming rate until we exceed it.
+ */
+ if (available_blocks >= 0) {
+ uint64_t skip_txgs = (available_blocks / incoming) + 1;
+ available_blocks -= (skip_txgs * incoming);
+ txgs_in_future += skip_txgs;
+ ASSERT3S(available_blocks, >=, -incoming);
+ }
+
+ /*
+ * At this point we're far enough into the future where
+ * the limit was just exceeded and we flush metaslabs
+ * based on the current entry in the summary, updating
+ * our available_blocks.
+ */
+ ASSERT3S(available_blocks, <, 0);
+ available_blocks += e->lse_blkcount;
+ total_flushes += e->lse_mscount;
+
+ /*
+ * Keep the running maximum of the total_flushes that
+ * we've done so far over the number of TXGs in the
+ * future that we are. The idea here is to estimate
+ * the average number of flushes that we should do
+ * every TXG so that when we are that many TXGs in the
+ * future we stay under the limit.
+ */
+ max_flushes_pertxg = MAX(max_flushes_pertxg,
+ DIV_ROUND_UP(total_flushes, txgs_in_future));
+ ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=,
+ max_flushes_pertxg);
+ }
+ return (max_flushes_pertxg);
+}
+
+uint64_t
+spa_log_sm_memused(spa_t *spa)
+{
+ return (spa->spa_unflushed_stats.sus_memused);
+}
+
+static boolean_t
+spa_log_exceeds_memlimit(spa_t *spa)
+{
+ if (spa_log_sm_memused(spa) > zfs_unflushed_max_mem_amt)
+ return (B_TRUE);
+
+ uint64_t system_mem_allowed = ((physmem * PAGESIZE) *
+ zfs_unflushed_max_mem_ppm) / 1000000;
+ if (spa_log_sm_memused(spa) > system_mem_allowed)
+ return (B_TRUE);
+
+ return (B_FALSE);
+}
+
+boolean_t
+spa_flush_all_logs_requested(spa_t *spa)
+{
+ return (spa->spa_log_flushall_txg != 0);
+}
+
+void
+spa_flush_metaslabs(spa_t *spa, dmu_tx_t *tx)
+{
+ uint64_t txg = dmu_tx_get_txg(tx);
+
+ if (spa_sync_pass(spa) != 1)
+ return;
+
+ if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
+ return;
+
+ /*
+ * If we don't have any metaslabs with unflushed changes
+ * return immediately.
+ */
+ if (avl_numnodes(&spa->spa_metaslabs_by_flushed) == 0)
+ return;
+
+ /*
+ * During SPA export we leave a few empty TXGs to go by [see
+ * spa_final_dirty_txg() to understand why]. For this specific
+ * case, it is important to not flush any metaslabs as that
+ * would dirty this TXG.
+ *
+ * That said, during one of these dirty TXGs that is less or
+ * equal to spa_final_dirty(), spa_unload() will request that
+ * we try to flush all the metaslabs for that TXG before
+ * exporting the pool, thus we ensure that we didn't get a
+ * request of flushing everything before we attempt to return
+ * immediately.
+ */
+ if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
+ !dmu_objset_is_dirty(spa_meta_objset(spa), txg) &&
+ !spa_flush_all_logs_requested(spa))
+ return;
+
+ /*
+ * We need to generate a log space map before flushing because this
+ * will set up the in-memory data (i.e. node in spa_sm_logs_by_txg)
+ * for this TXG's flushed metaslab count (aka sls_mscount which is
+ * manipulated in many ways down the metaslab_flush() codepath).
+ *
+ * That is not to say that we may generate a log space map when we
+ * don't need it. If we are flushing metaslabs, that means that we
+ * were going to write changes to disk anyway, so even if we were
+ * not flushing, a log space map would have been created anyway in
+ * metaslab_sync().
+ */
+ spa_generate_syncing_log_sm(spa, tx);
+
+ /*
+ * This variable tells us how many metaslabs we want to flush based
+ * on the block-heuristic of our flushing algorithm (see block comment
+ * of log space map feature). We also decrement this as we flush
+ * metaslabs and attempt to destroy old log space maps.
+ */
+ uint64_t want_to_flush;
+ if (spa_flush_all_logs_requested(spa)) {
+ ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
+ want_to_flush = avl_numnodes(&spa->spa_metaslabs_by_flushed);
+ } else {
+ want_to_flush = spa_estimate_metaslabs_to_flush(spa);
+ }
+
+ ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=,
+ want_to_flush);
+
+ /* Used purely for verification purposes */
+ uint64_t visited = 0;
+
+ /*
+ * Ideally we would only iterate through spa_metaslabs_by_flushed
+ * using only one variable (curr). We can't do that because
+ * metaslab_flush() mutates position of curr in the AVL when
+ * it flushes that metaslab by moving it to the end of the tree.
+ * Thus we always keep track of the original next node of the
+ * current node (curr) in another variable (next).
+ */
+ metaslab_t *next = NULL;
+ for (metaslab_t *curr = avl_first(&spa->spa_metaslabs_by_flushed);
+ curr != NULL; curr = next) {
+ next = AVL_NEXT(&spa->spa_metaslabs_by_flushed, curr);
+
+ /*
+ * If this metaslab has been flushed this txg then we've done
+ * a full circle over the metaslabs.
+ */
+ if (metaslab_unflushed_txg(curr) == txg)
+ break;
+
+ /*
+ * If we are done flushing for the block heuristic and the
+ * unflushed changes don't exceed the memory limit just stop.
+ */
+ if (want_to_flush == 0 && !spa_log_exceeds_memlimit(spa))
+ break;
+
+ mutex_enter(&curr->ms_sync_lock);
+ mutex_enter(&curr->ms_lock);
+ boolean_t flushed = metaslab_flush(curr, tx);
+ mutex_exit(&curr->ms_lock);
+ mutex_exit(&curr->ms_sync_lock);
+
+ /*
+ * If we failed to flush a metaslab (because it was loading),
+ * then we are done with the block heuristic as it's not
+ * possible to destroy any log space maps once you've skipped
+ * a metaslab. In that case we just set our counter to 0 but
+ * we continue looping in case there is still memory pressure
+ * due to unflushed changes. Note that, flushing a metaslab
+ * that is not the oldest flushed in the pool, will never
+ * destroy any log space maps [see spa_cleanup_old_sm_logs()].
+ */
+ if (!flushed) {
+ want_to_flush = 0;
+ } else if (want_to_flush > 0) {
+ want_to_flush--;
+ }
+
+ visited++;
+ }
+ ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=, visited);
+}
+
+/*
+ * Close the log space map for this TXG and update the block counts
+ * for the the log's in-memory structure and the summary.
+ */
+void
+spa_sync_close_syncing_log_sm(spa_t *spa)
+{
+ if (spa_syncing_log_sm(spa) == NULL)
+ return;
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
+
+ spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
+ ASSERT3U(sls->sls_txg, ==, spa_syncing_txg(spa));
+
+ sls->sls_nblocks = space_map_nblocks(spa_syncing_log_sm(spa));
+ spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
+
+ /*
+ * Note that we can't assert that sls_mscount is not 0,
+ * because there is the case where the first metaslab
+ * in spa_metaslabs_by_flushed is loading and we were
+ * not able to flush any metaslabs the current TXG.
+ */
+ ASSERT(sls->sls_nblocks != 0);
+
+ spa_log_summary_add_incoming_blocks(spa, sls->sls_nblocks);
+ spa_log_summary_verify_counts(spa);
+
+ space_map_close(spa->spa_syncing_log_sm);
+ spa->spa_syncing_log_sm = NULL;
+
+ /*
+ * At this point we tried to flush as many metaslabs as we
+ * can as the pool is getting exported. Reset the "flush all"
+ * so the last few TXGs before closing the pool can be empty
+ * (e.g. not dirty).
+ */
+ if (spa_flush_all_logs_requested(spa)) {
+ ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
+ spa->spa_log_flushall_txg = 0;
+ }
+}
+
+void
+spa_cleanup_old_sm_logs(spa_t *spa, dmu_tx_t *tx)
+{
+ objset_t *mos = spa_meta_objset(spa);
+
+ uint64_t spacemap_zap;
+ int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
+ if (error == ENOENT) {
+ ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
+ return;
+ }
+ VERIFY0(error);
+
+ metaslab_t *oldest = avl_first(&spa->spa_metaslabs_by_flushed);
+ uint64_t oldest_flushed_txg = metaslab_unflushed_txg(oldest);
+
+ /* Free all log space maps older than the oldest_flushed_txg. */
+ for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
+ sls && sls->sls_txg < oldest_flushed_txg;
+ sls = avl_first(&spa->spa_sm_logs_by_txg)) {
+ ASSERT0(sls->sls_mscount);
+ avl_remove(&spa->spa_sm_logs_by_txg, sls);
+ space_map_free_obj(mos, sls->sls_sm_obj, tx);
+ VERIFY0(zap_remove_int(mos, spacemap_zap, sls->sls_txg, tx));
+ spa->spa_unflushed_stats.sus_nblocks -= sls->sls_nblocks;
+ kmem_free(sls, sizeof (spa_log_sm_t));
+ }
+}
+
+static spa_log_sm_t *
+spa_log_sm_alloc(uint64_t sm_obj, uint64_t txg)
+{
+ spa_log_sm_t *sls = kmem_zalloc(sizeof (*sls), KM_SLEEP);
+ sls->sls_sm_obj = sm_obj;
+ sls->sls_txg = txg;
+ return (sls);
+}
+
+void
+spa_generate_syncing_log_sm(spa_t *spa, dmu_tx_t *tx)
+{
+ uint64_t txg = dmu_tx_get_txg(tx);
+ objset_t *mos = spa_meta_objset(spa);
+
+ if (spa_syncing_log_sm(spa) != NULL)
+ return;
+
+ if (!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP))
+ return;
+
+ uint64_t spacemap_zap;
+ int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
+ if (error == ENOENT) {
+ ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
+
+ error = 0;
+ spacemap_zap = zap_create(mos,
+ DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
+ VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1,
+ &spacemap_zap, tx));
+ spa_feature_incr(spa, SPA_FEATURE_LOG_SPACEMAP, tx);
+ }
+ VERIFY0(error);
+
+ uint64_t sm_obj;
+ ASSERT3U(zap_lookup_int_key(mos, spacemap_zap, txg, &sm_obj),
+ ==, ENOENT);
+ sm_obj = space_map_alloc(mos, zfs_log_sm_blksz, tx);
+ VERIFY0(zap_add_int_key(mos, spacemap_zap, txg, sm_obj, tx));
+ avl_add(&spa->spa_sm_logs_by_txg, spa_log_sm_alloc(sm_obj, txg));
+
+ /*
+ * We pass UINT64_MAX as the space map's representation size
+ * and SPA_MINBLOCKSHIFT as the shift, to make the space map
+ * accept any sorts of segments since there's no real advantage
+ * to being more restrictive (given that we're already going
+ * to be using 2-word entries).
+ */
+ VERIFY0(space_map_open(&spa->spa_syncing_log_sm, mos, sm_obj,
+ 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
+
+ /*
+ * If the log space map feature was just enabled, the blocklimit
+ * has not yet been set.
+ */
+ if (spa_log_sm_blocklimit(spa) == 0)
+ spa_log_sm_set_blocklimit(spa);
+}
+
+/*
+ * Find all the log space maps stored in the space map ZAP and sort
+ * them by their TXG in spa_sm_logs_by_txg.
+ */
+static int
+spa_ld_log_sm_metadata(spa_t *spa)
+{
+ int error;
+ uint64_t spacemap_zap;
+
+ ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
+
+ error = zap_lookup(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
+ if (error == ENOENT) {
+ /* the space map ZAP doesn't exist yet */
+ return (0);
+ } else if (error != 0) {
+ spa_load_failed(spa, "spa_ld_unflushed_txgs(): failed at "
+ "zap_lookup(DMU_POOL_DIRECTORY_OBJECT) [error %d]",
+ error);
+ return (error);
+ }
+
+ zap_cursor_t zc;
+ zap_attribute_t za;
+ for (zap_cursor_init(&zc, spa_meta_objset(spa), spacemap_zap);
+ zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
+ uint64_t log_txg = zfs_strtonum(za.za_name, NULL);
+ spa_log_sm_t *sls =
+ spa_log_sm_alloc(za.za_first_integer, log_txg);
+ avl_add(&spa->spa_sm_logs_by_txg, sls);
+ }
+ zap_cursor_fini(&zc);
+
+ for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
+ m; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
+ spa_log_sm_t target = { .sls_txg = metaslab_unflushed_txg(m) };
+ spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
+ &target, NULL);
+ sls->sls_mscount++;
+ }
+
+ return (0);
+}
+
+typedef struct spa_ld_log_sm_arg {
+ spa_t *slls_spa;
+ uint64_t slls_txg;
+} spa_ld_log_sm_arg_t;
+
+static int
+spa_ld_log_sm_cb(space_map_entry_t *sme, void *arg)
+{
+ uint64_t offset = sme->sme_offset;
+ uint64_t size = sme->sme_run;
+ uint32_t vdev_id = sme->sme_vdev;
+
+ spa_ld_log_sm_arg_t *slls = arg;
+ spa_t *spa = slls->slls_spa;
+
+ vdev_t *vd = vdev_lookup_top(spa, vdev_id);
+
+ /*
+ * If the vdev has been removed (i.e. it is indirect or a hole)
+ * skip this entry. The contents of this vdev have already moved
+ * elsewhere.
+ */
+ if (!vdev_is_concrete(vd))
+ return (0);
+
+ metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+ ASSERT(!ms->ms_loaded);
+
+ /*
+ * If we have already flushed entries for this TXG to this
+ * metaslab's space map, then ignore it. Note that we flush
+ * before processing any allocations/frees for that TXG, so
+ * the metaslab's space map only has entries from *before*
+ * the unflushed TXG.
+ */
+ if (slls->slls_txg < metaslab_unflushed_txg(ms))
+ return (0);
+
+ switch (sme->sme_type) {
+ case SM_ALLOC:
+ range_tree_remove_xor_add_segment(offset, offset + size,
+ ms->ms_unflushed_frees, ms->ms_unflushed_allocs);
+ break;
+ case SM_FREE:
+ range_tree_remove_xor_add_segment(offset, offset + size,
+ ms->ms_unflushed_allocs, ms->ms_unflushed_frees);
+ break;
+ default:
+ panic("invalid maptype_t");
+ break;
+ }
+ return (0);
+}
+
+static int
+spa_ld_log_sm_data(spa_t *spa)
+{
+ int error = 0;
+
+ /*
+ * If we are not going to do any writes there is no need
+ * to read the log space maps.
+ */
+ if (!spa_writeable(spa))
+ return (0);
+
+ ASSERT0(spa->spa_unflushed_stats.sus_nblocks);
+ ASSERT0(spa->spa_unflushed_stats.sus_memused);
+
+ hrtime_t read_logs_starttime = gethrtime();
+ /* this is a no-op when we don't have space map logs */
+ for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
+ sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
+ space_map_t *sm = NULL;
+ error = space_map_open(&sm, spa_meta_objset(spa),
+ sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT);
+ if (error != 0) {
+ spa_load_failed(spa, "spa_ld_log_sm_data(): failed at "
+ "space_map_open(obj=%llu) [error %d]",
+ (u_longlong_t)sls->sls_sm_obj, error);
+ goto out;
+ }
+
+ struct spa_ld_log_sm_arg vla = {
+ .slls_spa = spa,
+ .slls_txg = sls->sls_txg
+ };
+ error = space_map_iterate(sm, space_map_length(sm),
+ spa_ld_log_sm_cb, &vla);
+ if (error != 0) {
+ space_map_close(sm);
+ spa_load_failed(spa, "spa_ld_log_sm_data(): failed "
+ "at space_map_iterate(obj=%llu) [error %d]",
+ (u_longlong_t)sls->sls_sm_obj, error);
+ goto out;
+ }
+
+ ASSERT0(sls->sls_nblocks);
+ sls->sls_nblocks = space_map_nblocks(sm);
+ spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
+ summary_add_data(spa, sls->sls_txg,
+ sls->sls_mscount, sls->sls_nblocks);
+
+ space_map_close(sm);
+ }
+ hrtime_t read_logs_endtime = gethrtime();
+ spa_load_note(spa,
+ "read %llu log space maps (%llu total blocks - blksz = %llu bytes) "
+ "in %lld ms", (u_longlong_t)avl_numnodes(&spa->spa_sm_logs_by_txg),
+ (u_longlong_t)spa_log_sm_nblocks(spa),
+ (u_longlong_t)zfs_log_sm_blksz,
+ (longlong_t)((read_logs_endtime - read_logs_starttime) / 1000000));
+
+out:
+ /*
+ * Now that the metaslabs contain their unflushed changes:
+ * [1] recalculate their actual allocated space
+ * [2] recalculate their weights
+ * [3] sum up the memory usage of their unflushed range trees
+ * [4] optionally load them, if debug_load is set
+ *
+ * Note that even in the case where we get here because of an
+ * error (e.g. error != 0), we still want to update the fields
+ * below in order to have a proper teardown in spa_unload().
+ */
+ for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
+ m != NULL; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
+ mutex_enter(&m->ms_lock);
+ m->ms_allocated_space = space_map_allocated(m->ms_sm) +
+ range_tree_space(m->ms_unflushed_allocs) -
+ range_tree_space(m->ms_unflushed_frees);
+
+ vdev_t *vd = m->ms_group->mg_vd;
+ metaslab_space_update(vd, m->ms_group->mg_class,
+ range_tree_space(m->ms_unflushed_allocs), 0, 0);
+ metaslab_space_update(vd, m->ms_group->mg_class,
+ -range_tree_space(m->ms_unflushed_frees), 0, 0);
+
+ ASSERT0(m->ms_weight & METASLAB_ACTIVE_MASK);
+ metaslab_recalculate_weight_and_sort(m);
+
+ spa->spa_unflushed_stats.sus_memused +=
+ metaslab_unflushed_changes_memused(m);
+
+ if (metaslab_debug_load && m->ms_sm != NULL) {
+ VERIFY0(metaslab_load(m));
+ }
+ mutex_exit(&m->ms_lock);
+ }
+
+ return (error);
+}
+
+static int
+spa_ld_unflushed_txgs(vdev_t *vd)
+{
+ spa_t *spa = vd->vdev_spa;
+ objset_t *mos = spa_meta_objset(spa);
+
+ if (vd->vdev_top_zap == 0)
+ return (0);
+
+ uint64_t object = 0;
+ int error = zap_lookup(mos, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
+ sizeof (uint64_t), 1, &object);
+ if (error == ENOENT)
+ return (0);
+ else if (error != 0) {
+ spa_load_failed(spa, "spa_ld_unflushed_txgs(): failed at "
+ "zap_lookup(vdev_top_zap=%llu) [error %d]",
+ (u_longlong_t)vd->vdev_top_zap, error);
+ return (error);
+ }
+
+ for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
+ metaslab_t *ms = vd->vdev_ms[m];
+ ASSERT(ms != NULL);
+
+ metaslab_unflushed_phys_t entry;
+ uint64_t entry_size = sizeof (entry);
+ uint64_t entry_offset = ms->ms_id * entry_size;
+
+ error = dmu_read(mos, object,
+ entry_offset, entry_size, &entry, 0);
+ if (error != 0) {
+ spa_load_failed(spa, "spa_ld_unflushed_txgs(): "
+ "failed at dmu_read(obj=%llu) [error %d]",
+ (u_longlong_t)object, error);
+ return (error);
+ }
+
+ ms->ms_unflushed_txg = entry.msp_unflushed_txg;
+ if (ms->ms_unflushed_txg != 0) {
+ mutex_enter(&spa->spa_flushed_ms_lock);
+ avl_add(&spa->spa_metaslabs_by_flushed, ms);
+ mutex_exit(&spa->spa_flushed_ms_lock);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Read all the log space map entries into their respective
+ * metaslab unflushed trees and keep them sorted by TXG in the
+ * SPA's metadata. In addition, setup all the metadata for the
+ * memory and the block heuristics.
+ */
+int
+spa_ld_log_spacemaps(spa_t *spa)
+{
+ int error;
+
+ spa_log_sm_set_blocklimit(spa);
+
+ for (uint64_t c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
+ vdev_t *vd = spa->spa_root_vdev->vdev_child[c];
+ error = spa_ld_unflushed_txgs(vd);
+ if (error != 0)
+ return (error);
+ }
+
+ error = spa_ld_log_sm_metadata(spa);
+ if (error != 0)
+ return (error);
+
+ /*
+ * Note: we don't actually expect anything to change at this point
+ * but we grab the config lock so we don't fail any assertions
+ * when using vdev_lookup_top().
+ */
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ error = spa_ld_log_sm_data(spa);
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+
+ return (error);
+}
+
+#if defined(_KERNEL)
+/* BEGIN CSTYLED */
+module_param(zfs_unflushed_max_mem_amt, ulong, 0644);
+MODULE_PARM_DESC(zfs_unflushed_max_mem_amt,
+ "Specific hard-limit in memory that ZFS allows to be used for "
+ "unflushed changes");
+
+module_param(zfs_unflushed_max_mem_ppm, ulong, 0644);
+MODULE_PARM_DESC(zfs_unflushed_max_mem_ppm,
+ "Percentage of the overall system memory that ZFS allows to be "
+ "used for unflushed changes (value is calculated over 1000000 for "
+ "finer granularity");
+
+module_param(zfs_unflushed_log_block_max, ulong, 0644);
+MODULE_PARM_DESC(zfs_unflushed_log_block_max,
+ "Hard limit (upper-bound) in the size of the space map log "
+ "in terms of blocks.");
+
+module_param(zfs_unflushed_log_block_min, ulong, 0644);
+MODULE_PARM_DESC(zfs_unflushed_log_block_min,
+ "Lower-bound limit for the maximum amount of blocks allowed in "
+ "log spacemap (see zfs_unflushed_log_block_max)");
+
+module_param(zfs_unflushed_log_block_pct, ulong, 0644);
+MODULE_PARM_DESC(zfs_unflushed_log_block_pct,
+ "Tunable used to determine the number of blocks that can be "
+ "used for the spacemap log, expressed as a percentage of the "
+ " total number of metaslabs in the pool (e.g. 400 means the "
+ " number of log blocks is capped at 4 times the number of "
+ "metaslabs)");
+
+module_param(zfs_max_log_walking, ulong, 0644);
+MODULE_PARM_DESC(zfs_max_log_walking,
+ "The number of past TXGs that the flushing algorithm of the log "
+ "spacemap feature uses to estimate incoming log blocks");
+
+module_param(zfs_max_logsm_summary_length, ulong, 0644);
+MODULE_PARM_DESC(zfs_max_logsm_summary_length,
+ "Maximum number of rows allowed in the summary of "
+ "the spacemap log");
+
+module_param(zfs_min_metaslabs_to_flush, ulong, 0644);
+MODULE_PARM_DESC(zfs_min_metaslabs_to_flush,
+ "Minimum number of metaslabs to flush per dirty TXG");
+
+module_param(zfs_keep_log_spacemaps_at_export, int, 0644);
+MODULE_PARM_DESC(zfs_keep_log_spacemaps_at_export,
+ "Prevent the log spacemaps from being flushed and destroyed "
+ "during pool export/destroy");
+/* END CSTYLED */
+#endif
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index a111a9e4e..3d6c0ee3e 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
@@ -64,7 +64,7 @@
/*
* SPA locking
*
- * There are four basic locks for managing spa_t structures:
+ * There are three basic locks for managing spa_t structures:
*
* spa_namespace_lock (global mutex)
*
@@ -613,6 +613,15 @@ spa_deadman(void *arg)
MSEC_TO_TICK(zfs_deadman_checktime_ms));
}
+int
+spa_log_sm_sort_by_txg(const void *va, const void *vb)
+{
+ const spa_log_sm_t *a = va;
+ const spa_log_sm_t *b = vb;
+
+ return (AVL_CMP(a->sls_txg, b->sls_txg));
+}
+
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
@@ -640,6 +649,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
@@ -685,6 +695,12 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare,
sizeof (zio_t), offsetof(zio_t, io_alloc_node));
}
+ avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
+ sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
+ avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
+ sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
+ list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
+ offsetof(log_summary_entry_t, lse_node));
/*
* Every pool starts with the default cachefile
@@ -748,7 +764,7 @@ spa_remove(spa_t *spa)
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
- ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
+ ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
nvlist_free(spa->spa_config_splitting);
@@ -775,6 +791,9 @@ spa_remove(spa_t *spa)
kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count *
sizeof (avl_tree_t));
+ avl_destroy(&spa->spa_metaslabs_by_flushed);
+ avl_destroy(&spa->spa_sm_logs_by_txg);
+ list_destroy(&spa->spa_log_summary);
list_destroy(&spa->spa_config_list);
list_destroy(&spa->spa_leaf_list);
@@ -799,6 +818,7 @@ spa_remove(spa_t *spa)
cv_destroy(&spa->spa_scrub_io_cv);
cv_destroy(&spa->spa_suspend_cv);
+ mutex_destroy(&spa->spa_flushed_ms_lock);
mutex_destroy(&spa->spa_async_lock);
mutex_destroy(&spa->spa_errlist_lock);
mutex_destroy(&spa->spa_errlog_lock);
@@ -2570,6 +2590,12 @@ spa_missing_tvds_allowed(spa_t *spa)
return (spa->spa_missing_tvds_allowed);
}
+space_map_t *
+spa_syncing_log_sm(spa_t *spa)
+{
+ return (spa->spa_syncing_log_sm);
+}
+
void
spa_set_missing_tvds(spa_t *spa, uint64_t missing)
{
diff --git a/module/zfs/space_map.c b/module/zfs/space_map.c
index d9cd8767e..b9467b26b 100644
--- a/module/zfs/space_map.c
+++ b/module/zfs/space_map.c
@@ -23,7 +23,7 @@
* Use is subject to license terms.
*/
/*
- * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@@ -1067,3 +1067,11 @@ space_map_length(space_map_t *sm)
{
return (sm != NULL ? sm->sm_phys->smp_length : 0);
}
+
+uint64_t
+space_map_nblocks(space_map_t *sm)
+{
+ if (sm == NULL)
+ return (0);
+ return (DIV_ROUND_UP(space_map_length(sm), sm->sm_blksz));
+}
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index cd9cb742c..b7914e000 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Portions Copyright 2011 Martin Matuska
- * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@@ -272,7 +272,7 @@ txg_sync_stop(dsl_pool_t *dp)
ASSERT3U(tx->tx_threads, ==, 2);
/*
- * We need to ensure that we've vacated the deferred space_maps.
+ * We need to ensure that we've vacated the deferred metaslab trees.
*/
txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index 81ef87e25..5644b9c5b 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -76,7 +76,7 @@ int vdev_validate_skip = B_FALSE;
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
-int vdev_dtl_sm_blksz = (1 << 12);
+int zfs_vdev_dtl_sm_blksz = (1 << 12);
/*
* Rate limit slow IO (delay) events to this many per second.
@@ -99,7 +99,7 @@ int zfs_scan_ignore_errors = 0;
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
-int vdev_standard_sm_blksz = (1 << 17);
+int zfs_vdev_standard_sm_blksz = (1 << 17);
/*
* Tunable parameter for debugging or performance analysis. Setting this
@@ -924,6 +924,7 @@ vdev_free(vdev_t *vd)
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
+ vd->vdev_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
@@ -1353,6 +1354,13 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
+ /*
+ * Regardless whether this vdev was just added or it is being
+ * expanded, the metaslab count has changed. Recalculate the
+ * block limit.
+ */
+ spa_log_sm_set_blocklimit(spa);
+
return (0);
}
@@ -2867,7 +2875,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
- new_object = space_map_alloc(mos, vdev_dtl_sm_blksz, tx);
+ new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
@@ -2881,7 +2889,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
- space_map_truncate(vd->vdev_dtl_sm, vdev_dtl_sm_blksz, tx);
+ space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(rtsync, NULL, NULL);
@@ -3172,6 +3180,25 @@ vdev_validate_aux(vdev_t *vd)
return (0);
}
+static void
+vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
+{
+ objset_t *mos = spa_meta_objset(vd->vdev_spa);
+
+ if (vd->vdev_top_zap == 0)
+ return;
+
+ uint64_t object = 0;
+ int err = zap_lookup(mos, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
+ if (err == ENOENT)
+ return;
+
+ VERIFY0(dmu_object_free(mos, object, tx));
+ VERIFY0(zap_remove(mos, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
+}
+
/*
* Free the objects used to store this vdev's spacemaps, and the array
* that points to them.
@@ -3199,6 +3226,7 @@ vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
kmem_free(smobj_array, array_bytes);
VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
+ vdev_destroy_ms_flush_data(vd, tx);
vd->vdev_ms_array = 0;
}
@@ -4762,6 +4790,10 @@ module_param(zfs_vdev_default_ms_count, int, 0644);
MODULE_PARM_DESC(zfs_vdev_default_ms_count,
"Target number of metaslabs per top-level vdev");
+module_param(zfs_vdev_default_ms_shift, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_default_ms_shift,
+ "Default limit for metaslab size");
+
module_param(zfs_vdev_min_ms_count, int, 0644);
MODULE_PARM_DESC(zfs_vdev_min_ms_count,
"Minimum number of metaslabs per top-level vdev");
diff --git a/module/zfs/vdev_indirect.c b/module/zfs/vdev_indirect.c
index 4539fa638..5827d3fac 100644
--- a/module/zfs/vdev_indirect.c
+++ b/module/zfs/vdev_indirect.c
@@ -16,6 +16,7 @@
/*
* Copyright (c) 2014, 2017 by Delphix. All rights reserved.
* Copyright (c) 2019, loli10K <[email protected]>. All rights reserved.
+ * Copyright (c) 2014, 2019 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@@ -825,7 +826,7 @@ vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object == 0) {
obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
- vdev_standard_sm_blksz, tx);
+ zfs_vdev_standard_sm_blksz, tx);
ASSERT(vd->vdev_top_zap != 0);
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
diff --git a/module/zfs/vdev_removal.c b/module/zfs/vdev_removal.c
index 6f64edd8c..3d6df0c41 100644
--- a/module/zfs/vdev_removal.c
+++ b/module/zfs/vdev_removal.c
@@ -1203,6 +1203,7 @@ vdev_remove_complete(spa_t *spa)
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
+ spa_log_sm_set_blocklimit(spa);
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
@@ -1461,6 +1462,10 @@ spa_vdev_remove_thread(void *arg)
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
+ range_tree_walk(msp->ms_unflushed_allocs,
+ range_tree_add, svr->svr_allocd_segs);
+ range_tree_walk(msp->ms_unflushed_frees,
+ range_tree_remove, svr->svr_allocd_segs);
range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
@@ -1685,6 +1690,11 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
mutex_enter(&svr->svr_lock);
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
+
+ range_tree_walk(msp->ms_unflushed_allocs,
+ range_tree_add, svr->svr_allocd_segs);
+ range_tree_walk(msp->ms_unflushed_frees,
+ range_tree_remove, svr->svr_allocd_segs);
range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
@@ -1813,19 +1823,14 @@ vdev_remove_make_hole_and_free(vdev_t *vd)
uint64_t id = vd->vdev_id;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
- boolean_t last_vdev = (id == (rvd->vdev_children - 1));
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
vdev_free(vd);
- if (last_vdev) {
- vdev_compact_children(rvd);
- } else {
- vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
- vdev_add_child(rvd, vd);
- }
+ vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
+ vdev_add_child(rvd, vd);
vdev_config_dirty(rvd);
/*
@@ -1887,7 +1892,28 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
vdev_dirty_leaves(vd, VDD_DTL, *txg);
vdev_config_dirty(vd);
+ /*
+ * When the log space map feature is enabled we look at
+ * the vdev's top_zap to find the on-disk flush data of
+ * the metaslab we just flushed. Thus, while removing a
+ * log vdev we make sure to call vdev_metaslab_fini()
+ * first, which removes all metaslabs of this vdev from
+ * spa_metaslabs_by_flushed before vdev_remove_empty()
+ * destroys the top_zap of this log vdev.
+ *
+ * This avoids the scenario where we flush a metaslab
+ * from the log vdev being removed that doesn't have a
+ * top_zap and end up failing to lookup its on-disk flush
+ * data.
+ *
+ * We don't call metaslab_group_destroy() right away
+ * though (it will be called in vdev_free() later) as
+ * during metaslab_sync() of metaslabs from other vdevs
+ * we may touch the metaslab group of this vdev through
+ * metaslab_class_histogram_verify()
+ */
vdev_metaslab_fini(vd);
+ spa_log_sm_set_blocklimit(spa);
spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index a1771df6f..b740afde6 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -1117,10 +1117,16 @@ zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
* deferred, and which will not need to do a read (i.e. not GANG or
* DEDUP), can be processed immediately. Otherwise, put them on the
* in-memory list for later processing.
+ *
+ * Note that we only defer frees after zfs_sync_pass_deferred_free
+ * when the log space map feature is disabled. [see relevant comment
+ * in spa_sync_iterate_to_convergence()]
*/
- if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) ||
+ if (BP_IS_GANG(bp) ||
+ BP_GET_DEDUP(bp) ||
txg != spa->spa_syncing_txg ||
- spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) {
+ (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
+ !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))) {
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0)));
@@ -1136,7 +1142,6 @@ zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
- ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free);
if (BP_IS_EMBEDDED(bp))
return (zio_null(pio, spa, NULL, NULL, NULL, 0));
diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run
index 43c7a748f..36ab5ef22 100644
--- a/tests/runfiles/linux.run
+++ b/tests/runfiles/linux.run
@@ -927,3 +927,9 @@ tags = ['functional', 'zvol', 'zvol_swap']
[tests/functional/libzfs]
tests = ['many_fds', 'libzfs_input']
tags = ['functional', 'libzfs']
+
+[tests/functional/log_spacemap]
+tests = ['log_spacemap_import_logs']
+pre =
+post =
+tags = ['functional', 'log_spacemap']
diff --git a/tests/zfs-tests/tests/functional/Makefile.am b/tests/zfs-tests/tests/functional/Makefile.am
index 2d23eb296..783fdfb8a 100644
--- a/tests/zfs-tests/tests/functional/Makefile.am
+++ b/tests/zfs-tests/tests/functional/Makefile.am
@@ -33,8 +33,8 @@ SUBDIRS = \
largest_pool \
libzfs \
limits \
- pyzfs \
link_count \
+ log_spacemap \
migration \
mmap \
mmp \
@@ -50,6 +50,7 @@ SUBDIRS = \
privilege \
procfs \
projectquota \
+ pyzfs \
quota \
raidz \
redacted_send \
diff --git a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh
index 7ec9eaf4c..4884f11bb 100755
--- a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh
@@ -58,7 +58,7 @@ function testdbufstat # stat_name dbufstat_filter
from_dbufstat=$(grep -w "$name" "$DBUFSTATS_FILE" | awk '{ print $3 }')
from_dbufs=$(dbufstat -bxn -i "$DBUFS_FILE" "$filter" | wc -l)
- within_tolerance $from_dbufstat $from_dbufs 9 \
+ within_tolerance $from_dbufstat $from_dbufs 15 \
|| log_fail "Stat $name exceeded tolerance"
}
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg
index a9ca718cc..717ee9cb2 100644
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg
@@ -79,6 +79,7 @@ typeset -a properties=(
"feature@redaction_bookmarks"
"feature@redacted_datasets"
"feature@bookmark_written"
+ "feature@log_spacemap"
)
# Additional properties added for Linux.
diff --git a/tests/zfs-tests/tests/functional/log_spacemap/Makefile.am b/tests/zfs-tests/tests/functional/log_spacemap/Makefile.am
new file mode 100644
index 000000000..a1e523426
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/log_spacemap/Makefile.am
@@ -0,0 +1,2 @@
+pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/log_spacemap
+dist_pkgdata_SCRIPTS = log_spacemap_import_logs.ksh
diff --git a/tests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh b/tests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh
new file mode 100755
index 000000000..702322a0c
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh
@@ -0,0 +1,81 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2019 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# Log spacemaps are generally destroyed at export in order to
+# not induce performance overheads at import time. As a result,
+# the log spacemap codepaths that read the logs in import times
+# are not tested outside of ztest and pools with DEBUG bits doing
+# many imports/exports while running the test suite.
+#
+# This test uses an internal tunable and forces ZFS to keep the
+# log spacemaps at export, and then re-imports the pool, thus
+# providing explicit testing of those codepaths. It also uses
+# another tunable to load all the metaslabs when the pool is
+# re-imported so more assertions and verifications will be hit.
+#
+# STRATEGY:
+# 1. Create pool.
+# 2. Do a couple of writes to generate some data for spacemap logs.
+# 3. Set tunable to keep logs after export.
+# 4. Export pool and verify that there are logs with zdb.
+# 5. Set tunable to load all metaslabs at import.
+# 6. Import pool.
+# 7. Reset tunables.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ log_must set_tunable64 zfs_keep_log_spacemaps_at_export 0
+ log_must set_tunable64 metaslab_debug_load 0
+ if poolexists $LOGSM_POOL; then
+ log_must zpool destroy -f $LOGSM_POOL
+ fi
+}
+log_onexit cleanup
+
+LOGSM_POOL="logsm_import"
+TESTDISK="$(echo $DISKS | cut -d' ' -f1)"
+
+log_must zpool create -o cachefile=none -f $LOGSM_POOL $TESTDISK
+log_must zfs create $LOGSM_POOL/fs
+
+log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10
+log_must sync
+log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10
+log_must sync
+
+log_must set_tunable64 zfs_keep_log_spacemaps_at_export 1
+log_must zpool export $LOGSM_POOL
+
+LOGSM_COUNT=$(zdb -m -e $LOGSM_POOL | grep "Log Spacemap object" | wc -l)
+if (( LOGSM_COUNT == 0 )); then
+ log_fail "Pool does not have any log spacemaps after being exported"
+fi
+
+log_must set_tunable64 metaslab_debug_load 1
+log_must zpool import $LOGSM_POOL
+
+log_pass "Log spacemaps imported with no errors"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh b/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh
index 655352f05..b5c2ed0ba 100755
--- a/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh
+++ b/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh
@@ -30,7 +30,7 @@ function reset
default_setup_noexit "$DISKS" "true"
log_onexit reset
-log_must set_tunable64 zfs_condense_indirect_commit_entry_delay_ms 1000
+log_must set_tunable64 zfs_condense_indirect_commit_entry_delay_ms 5000
log_must set_tunable64 zfs_condense_min_mapping_bytes 1
log_must zfs set recordsize=512 $TESTPOOL/$TESTFS
@@ -82,7 +82,7 @@ log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK
log_must stride_dd -i /dev/urandom -o $TESTDIR/file -b 512 -c 20 -s 1024
sync_pool $TESTPOOL
-sleep 5
+sleep 4
sync_pool $TESTPOOL
log_must zpool export $TESTPOOL
zdb -e -p $REMOVEDISKPATH $TESTPOOL | grep 'Condensing indirect vdev' || \