diff options
author | Alexander Motin <[email protected]> | 2021-06-07 12:02:47 -0400 |
---|---|---|
committer | GitHub <[email protected]> | 2021-06-07 09:02:47 -0700 |
commit | ea400129c376c958e32bd912ea29905107ebe0bb (patch) | |
tree | 531f6dc237cd48a05889e90f6080626235375515 /lib/libspl | |
parent | e5e76bd6432de9592c4b4319fa826ad39971abd7 (diff) |
More aggsum optimizations
- Avoid atomic_add() when updating as_lower_bound/as_upper_bound.
Previous code was excessively strong on 64bit systems while not
strong enough on 32bit ones. Instead introduce and use real
atomic_load() and atomic_store() operations, just an assignments
on 64bit machines, but using proper atomics on 32bit ones to avoid
torn reads/writes.
- Reduce number of buckets on large systems. Extra buckets not as
much improve add speed, as hurt reads. Unlike wmsum for aggsum
reads are still important.
Reviewed-by: Brian Behlendorf <[email protected]>
Reviewed-by: Ryan Moeller <[email protected]>
Signed-off-by: Alexander Motin <[email protected]>
Sponsored-By: iXsystems, Inc.
Closes #12145
Diffstat (limited to 'lib/libspl')
-rw-r--r-- | lib/libspl/atomic.c | 13 | ||||
-rw-r--r-- | lib/libspl/include/atomic.h | 43 |
2 files changed, 56 insertions, 0 deletions
diff --git a/lib/libspl/atomic.c b/lib/libspl/atomic.c index d1a71b777..4717d818c 100644 --- a/lib/libspl/atomic.c +++ b/lib/libspl/atomic.c @@ -313,6 +313,19 @@ atomic_swap_ptr(volatile void *target, void *bits) return (__atomic_exchange_n((void **)target, bits, __ATOMIC_SEQ_CST)); } +#ifndef _LP64 +uint64_t +atomic_load_64(volatile uint64_t *target) +{ + return (__atomic_load_n(target, __ATOMIC_RELAXED)); +} + +void +atomic_store_64(volatile uint64_t *target, uint64_t bits) +{ + return (__atomic_store_n(target, bits, __ATOMIC_RELAXED)); +} +#endif int atomic_set_long_excl(volatile ulong_t *target, uint_t value) diff --git a/lib/libspl/include/atomic.h b/lib/libspl/include/atomic.h index f8c257f96..8dd1d654a 100644 --- a/lib/libspl/include/atomic.h +++ b/lib/libspl/include/atomic.h @@ -246,6 +246,49 @@ extern uint64_t atomic_swap_64(volatile uint64_t *, uint64_t); #endif /* + * Atomically read variable. + */ +#define atomic_load_char(p) (*(volatile uchar_t *)(p)) +#define atomic_load_short(p) (*(volatile ushort_t *)(p)) +#define atomic_load_int(p) (*(volatile uint_t *)(p)) +#define atomic_load_long(p) (*(volatile ulong_t *)(p)) +#define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p)) +#define atomic_load_8(p) (*(volatile uint8_t *)(p)) +#define atomic_load_16(p) (*(volatile uint16_t *)(p)) +#define atomic_load_32(p) (*(volatile uint32_t *)(p)) +#ifdef _LP64 +#define atomic_load_64(p) (*(volatile uint64_t *)(p)) +#elif defined(_INT64_TYPE) +extern uint64_t atomic_load_64(volatile uint64_t *); +#endif + +/* + * Atomically write variable. + */ +#define atomic_store_char(p, v) \ + (*(volatile uchar_t *)(p) = (uchar_t)(v)) +#define atomic_store_short(p, v) \ + (*(volatile ushort_t *)(p) = (ushort_t)(v)) +#define atomic_store_int(p, v) \ + (*(volatile uint_t *)(p) = (uint_t)(v)) +#define atomic_store_long(p, v) \ + (*(volatile ulong_t *)(p) = (ulong_t)(v)) +#define atomic_store_ptr(p, v) \ + (*(volatile __typeof(*p) *)(p) = (v)) +#define atomic_store_8(p, v) \ + (*(volatile uint8_t *)(p) = (uint8_t)(v)) +#define atomic_store_16(p, v) \ + (*(volatile uint16_t *)(p) = (uint16_t)(v)) +#define atomic_store_32(p, v) \ + (*(volatile uint32_t *)(p) = (uint32_t)(v)) +#ifdef _LP64 +#define atomic_store_64(p, v) \ + (*(volatile uint64_t *)(p) = (uint64_t)(v)) +#elif defined(_INT64_TYPE) +extern void atomic_store_64(volatile uint64_t *, uint64_t); +#endif + +/* * Perform an exclusive atomic bit set/clear on a target. * Returns 0 if bit was successfully set/cleared, or -1 * if the bit was already set/cleared. |