summaryrefslogtreecommitdiffstats
path: root/cmd/zdb/zdb.c
diff options
context:
space:
mode:
Diffstat (limited to 'cmd/zdb/zdb.c')
-rw-r--r--cmd/zdb/zdb.c393
1 files changed, 355 insertions, 38 deletions
diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c
index e0ea07280..8ccbd5e8b 100644
--- a/cmd/zdb/zdb.c
+++ b/cmd/zdb/zdb.c
@@ -813,6 +813,12 @@ get_checkpoint_refcount(vdev_t *vd)
}
static int
+get_log_spacemap_refcount(spa_t *spa)
+{
+ return (avl_numnodes(&spa->spa_sm_logs_by_txg));
+}
+
+static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
@@ -826,6 +832,7 @@ verify_spacemap_refcounts(spa_t *spa)
actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
+ actual_refcount += get_log_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
@@ -924,7 +931,7 @@ dump_spacemap(objset_t *os, space_map_t *sm)
alloc -= entry_run;
entry_id++;
}
- if ((uint64_t)alloc != space_map_allocated(sm)) {
+ if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%lld) INCONSISTENT "
"with space map summary (%lld)\n",
(longlong_t)space_map_allocated(sm), (longlong_t)alloc);
@@ -990,23 +997,45 @@ dump_metaslab(metaslab_t *msp)
ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift));
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
+
+ if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
+ (void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
+ (u_longlong_t)metaslab_unflushed_txg(msp));
+ }
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
- const char *bias_str;
+ const char *bias_str = "";
+ if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
+ bias_str = VDEV_ALLOC_BIAS_LOG;
+ } else if (alloc_bias == VDEV_BIAS_SPECIAL) {
+ bias_str = VDEV_ALLOC_BIAS_SPECIAL;
+ } else if (alloc_bias == VDEV_BIAS_DEDUP) {
+ bias_str = VDEV_ALLOC_BIAS_DEDUP;
+ }
+
+ uint64_t ms_flush_data_obj = 0;
+ if (vd->vdev_top_zap != 0) {
+ int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
+ vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
+ sizeof (uint64_t), 1, &ms_flush_data_obj);
+ if (error != ENOENT) {
+ ASSERT0(error);
+ }
+ }
+
+ (void) printf("\tvdev %10llu %s",
+ (u_longlong_t)vd->vdev_id, bias_str);
- bias_str = (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) ?
- VDEV_ALLOC_BIAS_LOG :
- (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
- (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP :
- vd->vdev_islog ? "log" : "";
+ if (ms_flush_data_obj != 0) {
+ (void) printf(" ms_unflushed_phys object %llu",
+ (u_longlong_t)ms_flush_data_obj);
+ }
- (void) printf("\tvdev %10llu %s\n"
- "\t%-10s%5llu %-19s %-15s %-12s\n",
- (u_longlong_t)vd->vdev_id, bias_str,
+ (void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %12s\n",
@@ -1173,6 +1202,24 @@ dump_metaslabs(spa_t *spa)
}
static void
+dump_log_spacemaps(spa_t *spa)
+{
+ (void) printf("\nLog Space Maps in Pool:\n");
+ for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
+ sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
+ space_map_t *sm = NULL;
+ VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
+ sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
+
+ (void) printf("Log Spacemap object %llu txg %llu\n",
+ (u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
+ dump_spacemap(spa->spa_meta_objset, sm);
+ space_map_close(sm);
+ }
+ (void) printf("\n");
+}
+
+static void
dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
{
const ddt_phys_t *ddp = dde->dde_phys;
@@ -3782,6 +3829,84 @@ static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
+typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme,
+ uint64_t txg, void *arg);
+
+typedef struct unflushed_iter_cb_arg {
+ spa_t *uic_spa;
+ uint64_t uic_txg;
+ void *uic_arg;
+ zdb_log_sm_cb_t uic_cb;
+} unflushed_iter_cb_arg_t;
+
+static int
+iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
+{
+ unflushed_iter_cb_arg_t *uic = arg;
+ return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
+}
+
+static void
+iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
+{
+ if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
+ return;
+
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
+ sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
+ space_map_t *sm = NULL;
+ VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
+ sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
+
+ unflushed_iter_cb_arg_t uic = {
+ .uic_spa = spa,
+ .uic_txg = sls->sls_txg,
+ .uic_arg = arg,
+ .uic_cb = cb
+ };
+
+ VERIFY0(space_map_iterate(sm, space_map_length(sm),
+ iterate_through_spacemap_logs_cb, &uic));
+ space_map_close(sm);
+ }
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+}
+
+/* ARGSUSED */
+static int
+load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
+ uint64_t txg, void *arg)
+{
+ spa_vdev_removal_t *svr = arg;
+
+ uint64_t offset = sme->sme_offset;
+ uint64_t size = sme->sme_run;
+
+ /* skip vdevs we don't care about */
+ if (sme->sme_vdev != svr->svr_vdev_id)
+ return (0);
+
+ vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
+ metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+ ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
+
+ if (txg < metaslab_unflushed_txg(ms))
+ return (0);
+
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ ASSERT(vim != NULL);
+ if (offset >= vdev_indirect_mapping_max_offset(vim))
+ return (0);
+
+ if (sme->sme_type == SM_ALLOC)
+ range_tree_add(svr->svr_allocd_segs, offset, size);
+ else
+ range_tree_remove(svr->svr_allocd_segs, offset, size);
+
+ return (0);
+}
+
/* ARGSUSED */
static void
claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
@@ -3830,36 +3955,35 @@ zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ ASSERT0(range_tree_space(svr->svr_allocd_segs));
+
+ range_tree_t *allocs = range_tree_create(NULL, NULL);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
break;
- ASSERT0(range_tree_space(svr->svr_allocd_segs));
+ ASSERT0(range_tree_space(allocs));
+ if (msp->ms_sm != NULL)
+ VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
+ range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
+ }
+ range_tree_destroy(allocs);
- if (msp->ms_sm != NULL) {
- VERIFY0(space_map_load(msp->ms_sm,
- svr->svr_allocd_segs, SM_ALLOC));
+ iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
- /*
- * Clear everything past what has been synced unless
- * it's past the spacemap, because we have not allocated
- * mappings for it yet.
- */
- uint64_t vim_max_offset =
- vdev_indirect_mapping_max_offset(vim);
- uint64_t sm_end = msp->ms_sm->sm_start +
- msp->ms_sm->sm_size;
- if (sm_end > vim_max_offset)
- range_tree_clear(svr->svr_allocd_segs,
- vim_max_offset, sm_end - vim_max_offset);
- }
+ /*
+ * Clear everything past what has been synced,
+ * because we have not allocated mappings for
+ * it yet.
+ */
+ range_tree_clear(svr->svr_allocd_segs,
+ vdev_indirect_mapping_max_offset(vim),
+ vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
- zcb->zcb_removing_size +=
- range_tree_space(svr->svr_allocd_segs);
- range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
- }
+ zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
+ range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
@@ -4070,6 +4194,82 @@ zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
}
}
+static int
+count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
+ uint64_t txg, void *arg)
+{
+ int64_t *ualloc_space = arg;
+
+ uint64_t offset = sme->sme_offset;
+ uint64_t vdev_id = sme->sme_vdev;
+
+ vdev_t *vd = vdev_lookup_top(spa, vdev_id);
+ if (!vdev_is_concrete(vd))
+ return (0);
+
+ metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+ ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
+
+ if (txg < metaslab_unflushed_txg(ms))
+ return (0);
+
+ if (sme->sme_type == SM_ALLOC)
+ *ualloc_space += sme->sme_run;
+ else
+ *ualloc_space -= sme->sme_run;
+
+ return (0);
+}
+
+static int64_t
+get_unflushed_alloc_space(spa_t *spa)
+{
+ if (dump_opt['L'])
+ return (0);
+
+ int64_t ualloc_space = 0;
+ iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
+ &ualloc_space);
+ return (ualloc_space);
+}
+
+static int
+load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
+{
+ maptype_t *uic_maptype = arg;
+
+ uint64_t offset = sme->sme_offset;
+ uint64_t size = sme->sme_run;
+ uint64_t vdev_id = sme->sme_vdev;
+
+ vdev_t *vd = vdev_lookup_top(spa, vdev_id);
+
+ /* skip indirect vdevs */
+ if (!vdev_is_concrete(vd))
+ return (0);
+
+ metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+
+ ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
+ ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
+
+ if (txg < metaslab_unflushed_txg(ms))
+ return (0);
+
+ if (*uic_maptype == sme->sme_type)
+ range_tree_add(ms->ms_allocatable, offset, size);
+ else
+ range_tree_remove(ms->ms_allocatable, offset, size);
+
+ return (0);
+}
+
+static void
+load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
+{
+ iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
+}
+
static void
load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
{
@@ -4093,7 +4293,7 @@ load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
(longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock);
- metaslab_unload(msp);
+ range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
@@ -4110,6 +4310,8 @@ load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
mutex_exit(&msp->ms_lock);
}
}
+
+ load_unflushed_to_ms_allocatables(spa, maptype);
}
/*
@@ -4124,7 +4326,7 @@ load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock);
- metaslab_unload(msp);
+ range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
@@ -4383,7 +4585,6 @@ zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
-
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
@@ -4520,7 +4721,8 @@ dump_block_stats(spa_t *spa)
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
- metaslab_class_get_alloc(spa_dedup_class(spa));
+ metaslab_class_get_alloc(spa_dedup_class(spa)) +
+ get_unflushed_alloc_space(spa);
total_found = tzb->zb_asize - zcb.zcb_dedup_asize +
zcb.zcb_removing_size + zcb.zcb_checkpoint_size;
@@ -5393,11 +5595,24 @@ mos_obj_refd_multiple(uint64_t obj)
}
static void
+mos_leak_vdev_top_zap(vdev_t *vd)
+{
+ uint64_t ms_flush_data_obj;
+ int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
+ vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
+ sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
+ if (error == ENOENT)
+ return;
+ ASSERT0(error);
+
+ mos_obj_refd(ms_flush_data_obj);
+}
+
+static void
mos_leak_vdev(vdev_t *vd)
{
mos_obj_refd(vd->vdev_dtl_object);
mos_obj_refd(vd->vdev_ms_array);
- mos_obj_refd(vd->vdev_top_zap);
mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
mos_obj_refd(vd->vdev_leaf_zap);
@@ -5415,11 +5630,33 @@ mos_leak_vdev(vdev_t *vd)
mos_obj_refd(space_map_object(ms->ms_sm));
}
+ if (vd->vdev_top_zap != 0) {
+ mos_obj_refd(vd->vdev_top_zap);
+ mos_leak_vdev_top_zap(vd);
+ }
+
for (uint64_t c = 0; c < vd->vdev_children; c++) {
mos_leak_vdev(vd->vdev_child[c]);
}
}
+static void
+mos_leak_log_spacemaps(spa_t *spa)
+{
+ uint64_t spacemap_zap;
+ int error = zap_lookup(spa_meta_objset(spa),
+ DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
+ sizeof (spacemap_zap), 1, &spacemap_zap);
+ if (error == ENOENT)
+ return;
+ ASSERT0(error);
+
+ mos_obj_refd(spacemap_zap);
+ for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
+ sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
+ mos_obj_refd(sls->sls_sm_obj);
+}
+
static int
dump_mos_leaks(spa_t *spa)
{
@@ -5451,6 +5688,10 @@ dump_mos_leaks(spa_t *spa)
mos_obj_refd(spa->spa_l2cache.sav_object);
mos_obj_refd(spa->spa_spares.sav_object);
+ if (spa->spa_syncing_log_sm != NULL)
+ mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
+ mos_leak_log_spacemaps(spa);
+
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_next_mapping_object);
mos_obj_refd(spa->spa_condensing_indirect_phys.
@@ -5528,6 +5769,79 @@ dump_mos_leaks(spa_t *spa)
return (rv);
}
+typedef struct log_sm_obsolete_stats_arg {
+ uint64_t lsos_current_txg;
+
+ uint64_t lsos_total_entries;
+ uint64_t lsos_valid_entries;
+
+ uint64_t lsos_sm_entries;
+ uint64_t lsos_valid_sm_entries;
+} log_sm_obsolete_stats_arg_t;
+
+static int
+log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
+ uint64_t txg, void *arg)
+{
+ log_sm_obsolete_stats_arg_t *lsos = arg;
+
+ uint64_t offset = sme->sme_offset;
+ uint64_t vdev_id = sme->sme_vdev;
+
+ if (lsos->lsos_current_txg == 0) {
+ /* this is the first log */
+ lsos->lsos_current_txg = txg;
+ } else if (lsos->lsos_current_txg < txg) {
+ /* we just changed log - print stats and reset */
+ (void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
+ (u_longlong_t)lsos->lsos_valid_sm_entries,
+ (u_longlong_t)lsos->lsos_sm_entries,
+ (u_longlong_t)lsos->lsos_current_txg);
+ lsos->lsos_valid_sm_entries = 0;
+ lsos->lsos_sm_entries = 0;
+ lsos->lsos_current_txg = txg;
+ }
+ ASSERT3U(lsos->lsos_current_txg, ==, txg);
+
+ lsos->lsos_sm_entries++;
+ lsos->lsos_total_entries++;
+
+ vdev_t *vd = vdev_lookup_top(spa, vdev_id);
+ if (!vdev_is_concrete(vd))
+ return (0);
+
+ metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+ ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
+
+ if (txg < metaslab_unflushed_txg(ms))
+ return (0);
+ lsos->lsos_valid_sm_entries++;
+ lsos->lsos_valid_entries++;
+ return (0);
+}
+
+static void
+dump_log_spacemap_obsolete_stats(spa_t *spa)
+{
+ log_sm_obsolete_stats_arg_t lsos;
+ bzero(&lsos, sizeof (lsos));
+
+ (void) printf("Log Space Map Obsolete Entry Statistics:\n");
+
+ iterate_through_spacemap_logs(spa,
+ log_spacemap_obsolete_stats_cb, &lsos);
+
+ /* print stats for latest log */
+ (void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
+ (u_longlong_t)lsos.lsos_valid_sm_entries,
+ (u_longlong_t)lsos.lsos_sm_entries,
+ (u_longlong_t)lsos.lsos_current_txg);
+
+ (void) printf("%-8llu valid entries out of %-8llu - total\n\n",
+ (u_longlong_t)lsos.lsos_valid_entries,
+ (u_longlong_t)lsos.lsos_total_entries);
+}
+
static void
dump_zpool(spa_t *spa)
{
@@ -5557,6 +5871,10 @@ dump_zpool(spa_t *spa)
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa);
+ if (dump_opt['d'] > 2 || dump_opt['m']) {
+ dump_log_spacemaps(spa);
+ dump_log_spacemap_obsolete_stats(spa);
+ }
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
@@ -5635,9 +5953,8 @@ dump_zpool(spa_t *spa)
}
}
- if (rc == 0) {
+ if (rc == 0)
rc = verify_device_removal_feature_counts(spa);
- }
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))