summaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
authorMatthew Macy <[email protected]>2020-07-25 20:07:44 -0700
committerGitHub <[email protected]>2020-07-25 20:07:44 -0700
commit6d8da84106de1fc8480e1758cc88e81393b4c0c2 (patch)
tree04d232d8f21a8c96978b7d1c329f7d1cc355248c /module/zfs
parentf5b189f9379b092600293ac3e7a670bf2087d88c (diff)
Make use of ZFS_DEBUG consistent within kmod sources
Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Signed-off-by: Matt Macy <[email protected]> Closes #10623
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/dbuf.c4
-rw-r--r--module/zfs/dnode_sync.c4
-rw-r--r--module/zfs/dsl_dir.c2
-rw-r--r--module/zfs/metaslab.c2
-rw-r--r--module/zfs/multilist.c2
-rw-r--r--module/zfs/rrwlock.c4
-rw-r--r--module/zfs/space_map.c4
-rw-r--r--module/zfs/zfs_ioctl.c4
8 files changed, 13 insertions, 13 deletions
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index aa44cc31e..83b2c3721 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -1989,7 +1989,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
* objects may be dirtied in syncing context, but only if they
* were already pre-dirtied in open context.
*/
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
RW_READER, FTAG);
@@ -2062,7 +2062,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
*/
os = dn->dn_objset;
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c
index 4178d6f07..eafea3403 100644
--- a/module/zfs/dnode_sync.c
+++ b/module/zfs/dnode_sync.c
@@ -91,7 +91,7 @@ dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
if (child == NULL)
continue;
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
DB_DNODE_ENTER(child);
ASSERT3P(DB_DNODE(child), ==, dn);
DB_DNODE_EXIT(child);
@@ -462,7 +462,7 @@ dnode_evict_dbufs(dnode_t *dn)
mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
DB_DNODE_ENTER(db);
ASSERT3P(DB_DNODE(db), ==, dn);
DB_DNODE_EXIT(db);
diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c
index da6103b4b..af369d1c7 100644
--- a/module/zfs/dsl_dir.c
+++ b/module/zfs/dsl_dir.c
@@ -1553,7 +1553,7 @@ dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
ASSERT(used > 0 ||
dsl_dir_phys(dd)->dd_used_breakdown[type] >= -used);
dsl_dir_phys(dd)->dd_used_breakdown[type] += used;
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
{
dd_used_t t;
uint64_t u = 0;
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index a935f33cb..1bc27824e 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -4401,7 +4401,7 @@ metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
*/
if (zal->zal_size == metaslab_trace_max_entries) {
metaslab_alloc_trace_t *mat_next;
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
panic("too many entries in allocation list");
#endif
METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
diff --git a/module/zfs/multilist.c b/module/zfs/multilist.c
index 27b17c8f3..a3adfd317 100644
--- a/module/zfs/multilist.c
+++ b/module/zfs/multilist.c
@@ -33,7 +33,7 @@ int zfs_multilist_num_sublists = 0;
* Given the object contained on the list, return a pointer to the
* object's multilist_node_t structure it contains.
*/
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
static multilist_node_t *
multilist_d2l(multilist_t *ml, void *obj)
{
diff --git a/module/zfs/rrwlock.c b/module/zfs/rrwlock.c
index c6d358b34..a2b784837 100644
--- a/module/zfs/rrwlock.c
+++ b/module/zfs/rrwlock.c
@@ -164,7 +164,7 @@ static void
rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
{
mutex_enter(&rrl->rr_lock);
-#if !defined(DEBUG) && defined(_KERNEL)
+#if !defined(ZFS_DEBUG) && defined(_KERNEL)
if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
!rrl->rr_track_all) {
rrl->rr_anon_rcount.rc_count++;
@@ -241,7 +241,7 @@ void
rrw_exit(rrwlock_t *rrl, void *tag)
{
mutex_enter(&rrl->rr_lock);
-#if !defined(DEBUG) && defined(_KERNEL)
+#if !defined(ZFS_DEBUG) && defined(_KERNEL)
if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) {
rrl->rr_anon_rcount.rc_count--;
if (rrl->rr_anon_rcount.rc_count == 0)
diff --git a/module/zfs/space_map.c b/module/zfs/space_map.c
index 25da0e63c..723fa0578 100644
--- a/module/zfs/space_map.c
+++ b/module/zfs/space_map.c
@@ -675,7 +675,7 @@ space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
space_map_write_intro_debug(sm, maptype, tx);
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
/*
* We do this right after we write the intro debug entry
* because the estimate does not take it into account.
@@ -736,7 +736,7 @@ space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
dmu_buf_rele(db, FTAG);
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
/*
* We expect our estimation to be based on the worst case
* scenario [see comment in space_map_estimate_optimal_size()].
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 1d2ae6270..672eec9cc 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -4806,7 +4806,7 @@ zfs_allow_log_destroy(void *arg)
kmem_strfree(poolname);
}
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
static boolean_t zfs_ioc_recv_inject_err;
#endif
@@ -5019,7 +5019,7 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin, nvlist_t *recvprops,
}
*read_bytes = off - noff;
-#ifdef DEBUG
+#ifdef ZFS_DEBUG
if (zfs_ioc_recv_inject_err) {
zfs_ioc_recv_inject_err = B_FALSE;
error = 1;