aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/metaslab.c
diff options
context:
space:
mode:
authorTim Schumacher <[email protected]>2018-10-01 19:42:05 +0200
committerBrian Behlendorf <[email protected]>2018-10-01 10:42:05 -0700
commit424fd7c3e080255935646d2beaa2655c116cc37a (patch)
tree16618fd0366f3c6431bfdb8b531a96764535ee54 /module/zfs/metaslab.c
parentfc23d59fa09f3cd803438986ba70ffcb32b8a036 (diff)
Prefix all refcount functions with zfs_
Recent changes in the Linux kernel made it necessary to prefix the refcount_add() function with zfs_ due to a name collision. To bring the other functions in line with that and to avoid future collisions, prefix the other refcount functions as well. Reviewed by: Matthew Ahrens <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Tim Schumacher <[email protected]> Closes #7963
Diffstat (limited to 'module/zfs/metaslab.c')
-rw-r--r--module/zfs/metaslab.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index f657128d0..a117dc446 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -251,7 +251,7 @@ metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
sizeof (uint64_t), KM_SLEEP);
for (int i = 0; i < spa->spa_alloc_count; i++)
- refcount_create_tracked(&mc->mc_alloc_slots[i]);
+ zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]);
return (mc);
}
@@ -266,7 +266,7 @@ metaslab_class_destroy(metaslab_class_t *mc)
ASSERT(mc->mc_dspace == 0);
for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
- refcount_destroy(&mc->mc_alloc_slots[i]);
+ zfs_refcount_destroy(&mc->mc_alloc_slots[i]);
kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
sizeof (zfs_refcount_t));
kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
@@ -653,7 +653,7 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
sizeof (uint64_t), KM_SLEEP);
for (int i = 0; i < allocators; i++) {
- refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
+ zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
mg->mg_cur_max_alloc_queue_depth[i] = 0;
}
@@ -683,7 +683,7 @@ metaslab_group_destroy(metaslab_group_t *mg)
mutex_destroy(&mg->mg_lock);
for (int i = 0; i < mg->mg_allocators; i++) {
- refcount_destroy(&mg->mg_alloc_queue_depth[i]);
+ zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]);
mg->mg_cur_max_alloc_queue_depth[i] = 0;
}
kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
@@ -1048,7 +1048,8 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
*/
qmax = qmax * (4 + d) / 4;
- qdepth = refcount_count(&mg->mg_alloc_queue_depth[allocator]);
+ qdepth = zfs_refcount_count(
+ &mg->mg_alloc_queue_depth[allocator]);
/*
* If this metaslab group is below its qmax or it's
@@ -1069,7 +1070,7 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
qmax = qmax * (4 + d) / 4;
- qdepth = refcount_count(
+ qdepth = zfs_refcount_count(
&mgp->mg_alloc_queue_depth[allocator]);
/*
@@ -2936,7 +2937,7 @@ metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
- (void) refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
+ (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
if (io_complete)
metaslab_group_increment_qdepth(mg, allocator);
}
@@ -2952,8 +2953,8 @@ metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
for (int d = 0; d < ndvas; d++) {
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
- VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth[allocator],
- tag));
+ VERIFY(zfs_refcount_not_held(
+ &mg->mg_alloc_queue_depth[allocator], tag));
}
#endif
}
@@ -3840,7 +3841,7 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
mutex_enter(&mc->mc_lock);
uint64_t reserved_slots =
- refcount_count(&mc->mc_alloc_slots[allocator]);
+ zfs_refcount_count(&mc->mc_alloc_slots[allocator]);
if (reserved_slots < max)
available_slots = max - reserved_slots;
@@ -3870,7 +3871,7 @@ metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
ASSERT(mc->mc_alloc_throttle_enabled);
mutex_enter(&mc->mc_lock);
for (int d = 0; d < slots; d++) {
- (void) refcount_remove(&mc->mc_alloc_slots[allocator],
+ (void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator],
zio);
}
mutex_exit(&mc->mc_lock);