diff options
author | Sven Gothel <[email protected]> | 2021-01-03 20:03:04 +0100 |
---|---|---|
committer | Sven Gothel <[email protected]> | 2021-01-03 20:03:04 +0100 |
commit | 01753664afcca9a0d27b0893a738822f39a51a20 (patch) | |
tree | e4a2220fbe166168f66594f09903b427566bfdaa /include/jau/cow_darray.hpp | |
parent | 2b588a08cbdf32581d08566ad85383db91da5f62 (diff) |
cow_[darray|vector]: Use 'mutable' modifier for always mutable used sync_atomic, mtx_write; Don't use constexpr with non-literal variables, mark '__constexpr_non_literal_atomic__'
Introducing cpp_lang_macros.hpp, i.e. documenting reason for not using a constexpr via '__constexpr_non_literal_atomic__'.
Using mutable keyword is more elegant than const_cast.
Also remove const from std::lock_guard, as it is not used as a const (unlock at destruction).
Code is clang++ 11 clean.
Diffstat (limited to 'include/jau/cow_darray.hpp')
-rw-r--r-- | include/jau/cow_darray.hpp | 110 |
1 files changed, 73 insertions, 37 deletions
diff --git a/include/jau/cow_darray.hpp b/include/jau/cow_darray.hpp index 07c44b0..d996ac7 100644 --- a/include/jau/cow_darray.hpp +++ b/include/jau/cow_darray.hpp @@ -35,6 +35,7 @@ #include <condition_variable> #include <algorithm> +#include <jau/cpp_lang_macros.hpp> #include <jau/debug.hpp> #include <jau/darray.hpp> #include <jau/basic_types.hpp> @@ -145,8 +146,8 @@ namespace jau { static constexpr size_type DIFF_MAX = std::numeric_limits<difference_type>::max(); storage_ref_t store_ref; - sc_atomic_bool sync_atomic; - std::recursive_mutex mtx_write; + mutable sc_atomic_bool sync_atomic; + mutable std::recursive_mutex mtx_write; public: // ctor w/o elements @@ -187,11 +188,12 @@ namespace jau { * Capacity and size will equal the given array, i.e. the result is a trimmed array. * @param x the given cow_darray, all elements will be copied into the new instance. */ - constexpr cow_darray(const cow_darray& x) + __constexpr_non_literal_atomic__ + cow_darray(const cow_darray& x) : sync_atomic(false) { storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_darray *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } store_ref = std::make_shared<storage_t>( *x_store_ref ); @@ -204,11 +206,12 @@ namespace jau { * @param growth_factor custom growth factor * @param alloc custom allocator_type instance */ - constexpr explicit cow_darray(const cow_darray& x, const float growth_factor, const allocator_type& alloc) + __constexpr_non_literal_atomic__ + explicit cow_darray(const cow_darray& x, const float growth_factor, const allocator_type& alloc) : sync_atomic(false) { storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_darray *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } store_ref = std::make_shared<storage_t>( *x_store_ref, growth_factor, alloc ); @@ -225,11 +228,12 @@ namespace jau { * @param growth_factor custom growth factor * @param alloc custom allocator_type instance */ - constexpr explicit cow_darray(const cow_darray& x, const size_type _capacity, const float growth_factor, const allocator_type& alloc) + __constexpr_non_literal_atomic__ + explicit cow_darray(const cow_darray& x, const size_type _capacity, const float growth_factor, const allocator_type& alloc) : sync_atomic(false) { storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_darray *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } store_ref = std::make_shared<storage_t>( *x_store_ref, _capacity, growth_factor, alloc ); @@ -325,8 +329,9 @@ namespace jau { * @see jau::cow_darray::copy_store() * @see jau::cow_darray::set_store() */ + __constexpr_non_literal_atomic__ storage_ref_t copy_store() { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); return std::make_shared<storage_t>( *store_ref ); } @@ -342,7 +347,7 @@ namespace jau { * cow_darray<std::shared_ptr<Thing>> list; * ... * { - * const std::lock_guard<std::recursive_mutex> lock(list.get_write_mutex()); + * std::lock_guard<std::recursive_mutex> lock(list.get_write_mutex()); * std::shared_ptr<std::vector<std::shared_ptr<Thing>>> snapshot = list.copy_store(); * ... * some fancy mutation @@ -357,8 +362,9 @@ namespace jau { * @see jau::cow_darray::copy_store() * @see jau::cow_darray::set_store() */ - constexpr void set_store(storage_ref_t && new_store_ref) noexcept { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + __constexpr_non_literal_atomic__ + void set_store(storage_ref_t && new_store_ref) noexcept { + std::lock_guard<std::recursive_mutex> lock(mtx_write); sc_atomic_critical sync(sync_atomic); store_ref = std::move( new_store_ref ); } @@ -375,8 +381,9 @@ namespace jau { * </p> * @see jau::for_each_cow */ - constexpr storage_ref_t get_snapshot() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + storage_ref_t get_snapshot() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref; } @@ -411,17 +418,34 @@ namespace jau { // read access const allocator_type& get_allocator_ref() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + sc_atomic_critical sync( sync_atomic ); return store_ref->get_allocator_ref(); } allocator_type get_allocator() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + sc_atomic_critical sync( sync_atomic ); return store_ref->get_allocator(); } - constexpr size_type capacity() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + /** + * Returns the growth factor + */ + __constexpr_non_literal_atomic__ + float growth_factor() const noexcept { + sc_atomic_critical sync( sync_atomic ); + return store_ref->growth_factor(); + } + + /** + * Like std::vector::empty(). + * <p> + * This read operation is <i>lock-free</i>. + * </p> + * @return + */ + __constexpr_non_literal_atomic__ + size_type capacity() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref->capacity(); } @@ -431,8 +455,9 @@ namespace jau { * This read operation is <i>lock-free</i>. * </p> */ - constexpr bool empty() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + bool empty() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref->empty(); } @@ -442,8 +467,9 @@ namespace jau { * This read operation is <i>lock-free</i>. * </p> */ - constexpr size_type size() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + size_type size() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref->size(); } @@ -460,12 +486,12 @@ namespace jau { * </p> */ void reserve(size_type new_capacity) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( new_capacity > old_store_ref->capacity() ) { storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, new_capacity, old_store_ref->growth_factor(), old_store_ref->get_allocator_ref() ); + std::lock_guard<std::recursive_mutex> lock(mtx_write); sc_atomic_critical sync( sync_atomic ); store_ref = std::move(new_store_ref); } @@ -478,7 +504,7 @@ namespace jau { * </p> */ cow_darray& operator=(const storage_t& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( x ); sc_atomic_critical sync(sync_atomic); store_ref = std::move(new_store_ref); @@ -492,7 +518,7 @@ namespace jau { * </p> */ cow_darray& operator=(storage_t&& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( std::move(x) ); sc_atomic_critical sync(sync_atomic); store_ref = std::move(new_store_ref); @@ -506,10 +532,10 @@ namespace jau { * </p> */ cow_darray& operator=(const cow_darray& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_darray *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } storage_ref_t new_store_ref = std::make_shared<storage_t>( *x_store_ref ); @@ -546,8 +572,9 @@ namespace jau { * This write operation uses a mutex lock and is blocking this instances' write operations. * </p> */ + __constexpr_non_literal_atomic__ void clear() noexcept { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>(); { sc_atomic_critical sync(sync_atomic); @@ -561,6 +588,7 @@ namespace jau { * This write operation uses a mutex lock and is blocking both cow_darray instance's write operations. * </p> */ + __constexpr_non_literal_atomic__ void swap(cow_darray& x) noexcept { std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock); // utilize std::lock(a, b), allowing mixed order waiting on either object std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor @@ -580,8 +608,8 @@ namespace jau { * This write operation uses a mutex lock and is blocking this instances' write operations only. * </p> */ + __constexpr_non_literal_atomic__ void pop_back() noexcept { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( !old_store_ref->empty() ) { storage_ref_t new_store_ref = std::make_shared<storage_t>( old_store_ref->capacity(), @@ -589,6 +617,7 @@ namespace jau { old_store_ref->cend()-1, old_store_ref->growth_factor(), old_store_ref->get_allocator_ref() ); + std::lock_guard<std::recursive_mutex> lock(mtx_write); { sc_atomic_critical sync(sync_atomic); store_ref = std::move(new_store_ref); @@ -603,10 +632,11 @@ namespace jau { * </p> * @param x the value to be added at the tail. */ + __constexpr_non_literal_atomic__ void push_back(const value_type& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( old_store_ref->capacity_reached() ) { + std::lock_guard<std::recursive_mutex> lock(mtx_write); // grow and swap all refs storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->grow_capacity(), old_store_ref->growth_factor(), @@ -628,10 +658,11 @@ namespace jau { * This write operation uses a mutex lock and is blocking this instances' write operations only. * </p> */ + __constexpr_non_literal_atomic__ void push_back(value_type&& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( old_store_ref->capacity_reached() ) { + std::lock_guard<std::recursive_mutex> lock(mtx_write); // grow and swap all refs storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->grow_capacity(), old_store_ref->growth_factor(), @@ -657,10 +688,11 @@ namespace jau { * @param last last foreign input-iterator to range of value_type [first, last) */ template< class InputIt > - constexpr void push_back( InputIt first, InputIt last ) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; const size_type new_size_ = old_store_ref->size() + size_type(last - first); + __constexpr_non_literal_atomic__ + void push_back( InputIt first, InputIt last ) { + std::lock_guard<std::recursive_mutex> lock(mtx_write); if( new_size_ > old_store_ref->capacity() ) { // grow and swap all refs @@ -739,8 +771,9 @@ namespace jau { * @param comparator the equal comparator to return true if both given elements are equal * @return true if the element has been uniquely added, otherwise false */ + __constexpr_non_literal_atomic__ bool push_back_unique(const value_type& x, equal_comparator comparator) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); for(auto it = store_ref->begin(); it != store_ref->end(); ) { if( comparator( *it, x ) ) { return false; // already included @@ -776,9 +809,10 @@ namespace jau { * @param comparator the equal comparator to return true if both given elements are equal * @return number of erased elements */ + __constexpr_non_literal_atomic__ int erase_matching(const value_type& x, const bool all_matching, equal_comparator comparator) { int count = 0; - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref ); for(auto it = new_store_ref->begin(); it != new_store_ref->end(); ) { if( comparator( *it, x ) ) { @@ -809,8 +843,9 @@ namespace jau { * @param i the position within this store * @param x the value to be assigned to the object at the given position */ + __constexpr_non_literal_atomic__ void put(size_type i, const value_type& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref ); new_store_ref->at(i) = x; { @@ -830,8 +865,9 @@ namespace jau { * @param i the position within this store * @param x the value to be assigned to the object at the given position */ + __constexpr_non_literal_atomic__ void put(size_type i, value_type&& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref ); new_store_ref->at(i) = std::move(x); { |