summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorMateusz Guzik <[email protected]>2020-11-02 20:51:12 +0100
committerGitHub <[email protected]>2020-11-02 11:51:12 -0800
commit09eb36ce3d98e0eeaeec9c333ac818b2bc3f85bf (patch)
tree967d479b3fe081cc9e6cdd2d6ffc4daa412fc7d0 /module
parent8583540c6e04135626fe301ace8aa51212826965 (diff)
Introduce CPU_SEQID_UNSTABLE
Current CPU_SEQID users don't care about possibly changing CPU ID, but enclose it within kpreempt disable/enable in order to fend off warnings from Linux's CONFIG_DEBUG_PREEMPT. There is no need to do it. The expected way to get CPU ID while allowing for migration is to use raw_smp_processor_id. In order to make this future-proof this patch keeps CPU_SEQID as is and introduces CPU_SEQID_UNSTABLE instead, to make it clear that consumers explicitly want this behavior. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Reviewed-by: Matt Macy <[email protected]> Signed-off-by: Mateusz Guzik <[email protected]> Closes #11142
Diffstat (limited to 'module')
-rw-r--r--module/icp/core/kcf_sched.c4
-rw-r--r--module/zfs/aggsum.c4
-rw-r--r--module/zfs/dmu_object.c4
-rw-r--r--module/zfs/txg.c4
-rw-r--r--module/zfs/zio.c4
5 files changed, 5 insertions, 15 deletions
diff --git a/module/icp/core/kcf_sched.c b/module/icp/core/kcf_sched.c
index 40d50553d..81fd15f8e 100644
--- a/module/icp/core/kcf_sched.c
+++ b/module/icp/core/kcf_sched.c
@@ -1308,9 +1308,7 @@ kcf_reqid_insert(kcf_areq_node_t *areq)
kcf_areq_node_t *headp;
kcf_reqid_table_t *rt;
- kpreempt_disable();
- rt = kcf_reqid_table[CPU_SEQID & REQID_TABLE_MASK];
- kpreempt_enable();
+ rt = kcf_reqid_table[CPU_SEQID_UNSTABLE & REQID_TABLE_MASK];
mutex_enter(&rt->rt_lock);
diff --git a/module/zfs/aggsum.c b/module/zfs/aggsum.c
index a2fec2774..e38f4a66c 100644
--- a/module/zfs/aggsum.c
+++ b/module/zfs/aggsum.c
@@ -167,9 +167,7 @@ aggsum_add(aggsum_t *as, int64_t delta)
struct aggsum_bucket *asb;
int64_t borrow;
- kpreempt_disable();
- asb = &as->as_buckets[CPU_SEQID % as->as_numbuckets];
- kpreempt_enable();
+ asb = &as->as_buckets[CPU_SEQID_UNSTABLE % as->as_numbuckets];
/* Try fast path if we already borrowed enough before. */
mutex_enter(&asb->asc_lock);
diff --git a/module/zfs/dmu_object.c b/module/zfs/dmu_object.c
index 453a2842c..12cdbd68b 100644
--- a/module/zfs/dmu_object.c
+++ b/module/zfs/dmu_object.c
@@ -58,10 +58,8 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
int error;
- kpreempt_disable();
- cpuobj = &os->os_obj_next_percpu[CPU_SEQID %
+ cpuobj = &os->os_obj_next_percpu[CPU_SEQID_UNSTABLE %
os->os_obj_next_percpu_len];
- kpreempt_enable();
if (dn_slots == 0) {
dn_slots = DNODE_MIN_SLOTS;
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index 65375b579..420244abb 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -305,9 +305,7 @@ txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
* significance to the chosen tx_cpu. Because.. Why not use
* the current cpu to index into the array?
*/
- kpreempt_disable();
- tc = &tx->tx_cpu[CPU_SEQID];
- kpreempt_enable();
+ tc = &tx->tx_cpu[CPU_SEQID_UNSTABLE];
mutex_enter(&tc->tc_open_lock);
txg = tx->tx_open_txg;
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index 260e88b0b..55c2f1ea1 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -2246,9 +2246,7 @@ zio_nowait(zio_t *zio)
* will ensure they complete prior to unloading the pool.
*/
spa_t *spa = zio->io_spa;
- kpreempt_disable();
- pio = spa->spa_async_zio_root[CPU_SEQID];
- kpreempt_enable();
+ pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
zio_add_child(pio, zio);
}