diff options
author | Sven Gothel <[email protected]> | 2021-01-03 20:03:04 +0100 |
---|---|---|
committer | Sven Gothel <[email protected]> | 2021-01-03 20:03:04 +0100 |
commit | 01753664afcca9a0d27b0893a738822f39a51a20 (patch) | |
tree | e4a2220fbe166168f66594f09903b427566bfdaa /include | |
parent | 2b588a08cbdf32581d08566ad85383db91da5f62 (diff) |
cow_[darray|vector]: Use 'mutable' modifier for always mutable used sync_atomic, mtx_write; Don't use constexpr with non-literal variables, mark '__constexpr_non_literal_atomic__'
Introducing cpp_lang_macros.hpp, i.e. documenting reason for not using a constexpr via '__constexpr_non_literal_atomic__'.
Using mutable keyword is more elegant than const_cast.
Also remove const from std::lock_guard, as it is not used as a const (unlock at destruction).
Code is clang++ 11 clean.
Diffstat (limited to 'include')
-rw-r--r-- | include/jau/cow_darray.hpp | 110 | ||||
-rw-r--r-- | include/jau/cow_vector.hpp | 73 | ||||
-rw-r--r-- | include/jau/cpp_lang_macros.hpp | 56 |
3 files changed, 174 insertions, 65 deletions
diff --git a/include/jau/cow_darray.hpp b/include/jau/cow_darray.hpp index 07c44b0..d996ac7 100644 --- a/include/jau/cow_darray.hpp +++ b/include/jau/cow_darray.hpp @@ -35,6 +35,7 @@ #include <condition_variable> #include <algorithm> +#include <jau/cpp_lang_macros.hpp> #include <jau/debug.hpp> #include <jau/darray.hpp> #include <jau/basic_types.hpp> @@ -145,8 +146,8 @@ namespace jau { static constexpr size_type DIFF_MAX = std::numeric_limits<difference_type>::max(); storage_ref_t store_ref; - sc_atomic_bool sync_atomic; - std::recursive_mutex mtx_write; + mutable sc_atomic_bool sync_atomic; + mutable std::recursive_mutex mtx_write; public: // ctor w/o elements @@ -187,11 +188,12 @@ namespace jau { * Capacity and size will equal the given array, i.e. the result is a trimmed array. * @param x the given cow_darray, all elements will be copied into the new instance. */ - constexpr cow_darray(const cow_darray& x) + __constexpr_non_literal_atomic__ + cow_darray(const cow_darray& x) : sync_atomic(false) { storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_darray *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } store_ref = std::make_shared<storage_t>( *x_store_ref ); @@ -204,11 +206,12 @@ namespace jau { * @param growth_factor custom growth factor * @param alloc custom allocator_type instance */ - constexpr explicit cow_darray(const cow_darray& x, const float growth_factor, const allocator_type& alloc) + __constexpr_non_literal_atomic__ + explicit cow_darray(const cow_darray& x, const float growth_factor, const allocator_type& alloc) : sync_atomic(false) { storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_darray *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } store_ref = std::make_shared<storage_t>( *x_store_ref, growth_factor, alloc ); @@ -225,11 +228,12 @@ namespace jau { * @param growth_factor custom growth factor * @param alloc custom allocator_type instance */ - constexpr explicit cow_darray(const cow_darray& x, const size_type _capacity, const float growth_factor, const allocator_type& alloc) + __constexpr_non_literal_atomic__ + explicit cow_darray(const cow_darray& x, const size_type _capacity, const float growth_factor, const allocator_type& alloc) : sync_atomic(false) { storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_darray *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } store_ref = std::make_shared<storage_t>( *x_store_ref, _capacity, growth_factor, alloc ); @@ -325,8 +329,9 @@ namespace jau { * @see jau::cow_darray::copy_store() * @see jau::cow_darray::set_store() */ + __constexpr_non_literal_atomic__ storage_ref_t copy_store() { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); return std::make_shared<storage_t>( *store_ref ); } @@ -342,7 +347,7 @@ namespace jau { * cow_darray<std::shared_ptr<Thing>> list; * ... * { - * const std::lock_guard<std::recursive_mutex> lock(list.get_write_mutex()); + * std::lock_guard<std::recursive_mutex> lock(list.get_write_mutex()); * std::shared_ptr<std::vector<std::shared_ptr<Thing>>> snapshot = list.copy_store(); * ... * some fancy mutation @@ -357,8 +362,9 @@ namespace jau { * @see jau::cow_darray::copy_store() * @see jau::cow_darray::set_store() */ - constexpr void set_store(storage_ref_t && new_store_ref) noexcept { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + __constexpr_non_literal_atomic__ + void set_store(storage_ref_t && new_store_ref) noexcept { + std::lock_guard<std::recursive_mutex> lock(mtx_write); sc_atomic_critical sync(sync_atomic); store_ref = std::move( new_store_ref ); } @@ -375,8 +381,9 @@ namespace jau { * </p> * @see jau::for_each_cow */ - constexpr storage_ref_t get_snapshot() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + storage_ref_t get_snapshot() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref; } @@ -411,17 +418,34 @@ namespace jau { // read access const allocator_type& get_allocator_ref() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + sc_atomic_critical sync( sync_atomic ); return store_ref->get_allocator_ref(); } allocator_type get_allocator() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + sc_atomic_critical sync( sync_atomic ); return store_ref->get_allocator(); } - constexpr size_type capacity() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + /** + * Returns the growth factor + */ + __constexpr_non_literal_atomic__ + float growth_factor() const noexcept { + sc_atomic_critical sync( sync_atomic ); + return store_ref->growth_factor(); + } + + /** + * Like std::vector::empty(). + * <p> + * This read operation is <i>lock-free</i>. + * </p> + * @return + */ + __constexpr_non_literal_atomic__ + size_type capacity() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref->capacity(); } @@ -431,8 +455,9 @@ namespace jau { * This read operation is <i>lock-free</i>. * </p> */ - constexpr bool empty() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + bool empty() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref->empty(); } @@ -442,8 +467,9 @@ namespace jau { * This read operation is <i>lock-free</i>. * </p> */ - constexpr size_type size() const noexcept { - sc_atomic_critical sync( const_cast<cow_darray *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + size_type size() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref->size(); } @@ -460,12 +486,12 @@ namespace jau { * </p> */ void reserve(size_type new_capacity) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( new_capacity > old_store_ref->capacity() ) { storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, new_capacity, old_store_ref->growth_factor(), old_store_ref->get_allocator_ref() ); + std::lock_guard<std::recursive_mutex> lock(mtx_write); sc_atomic_critical sync( sync_atomic ); store_ref = std::move(new_store_ref); } @@ -478,7 +504,7 @@ namespace jau { * </p> */ cow_darray& operator=(const storage_t& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( x ); sc_atomic_critical sync(sync_atomic); store_ref = std::move(new_store_ref); @@ -492,7 +518,7 @@ namespace jau { * </p> */ cow_darray& operator=(storage_t&& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( std::move(x) ); sc_atomic_critical sync(sync_atomic); store_ref = std::move(new_store_ref); @@ -506,10 +532,10 @@ namespace jau { * </p> */ cow_darray& operator=(const cow_darray& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_darray *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } storage_ref_t new_store_ref = std::make_shared<storage_t>( *x_store_ref ); @@ -546,8 +572,9 @@ namespace jau { * This write operation uses a mutex lock and is blocking this instances' write operations. * </p> */ + __constexpr_non_literal_atomic__ void clear() noexcept { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>(); { sc_atomic_critical sync(sync_atomic); @@ -561,6 +588,7 @@ namespace jau { * This write operation uses a mutex lock and is blocking both cow_darray instance's write operations. * </p> */ + __constexpr_non_literal_atomic__ void swap(cow_darray& x) noexcept { std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock); // utilize std::lock(a, b), allowing mixed order waiting on either object std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor @@ -580,8 +608,8 @@ namespace jau { * This write operation uses a mutex lock and is blocking this instances' write operations only. * </p> */ + __constexpr_non_literal_atomic__ void pop_back() noexcept { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( !old_store_ref->empty() ) { storage_ref_t new_store_ref = std::make_shared<storage_t>( old_store_ref->capacity(), @@ -589,6 +617,7 @@ namespace jau { old_store_ref->cend()-1, old_store_ref->growth_factor(), old_store_ref->get_allocator_ref() ); + std::lock_guard<std::recursive_mutex> lock(mtx_write); { sc_atomic_critical sync(sync_atomic); store_ref = std::move(new_store_ref); @@ -603,10 +632,11 @@ namespace jau { * </p> * @param x the value to be added at the tail. */ + __constexpr_non_literal_atomic__ void push_back(const value_type& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( old_store_ref->capacity_reached() ) { + std::lock_guard<std::recursive_mutex> lock(mtx_write); // grow and swap all refs storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->grow_capacity(), old_store_ref->growth_factor(), @@ -628,10 +658,11 @@ namespace jau { * This write operation uses a mutex lock and is blocking this instances' write operations only. * </p> */ + __constexpr_non_literal_atomic__ void push_back(value_type&& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( old_store_ref->capacity_reached() ) { + std::lock_guard<std::recursive_mutex> lock(mtx_write); // grow and swap all refs storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->grow_capacity(), old_store_ref->growth_factor(), @@ -657,10 +688,11 @@ namespace jau { * @param last last foreign input-iterator to range of value_type [first, last) */ template< class InputIt > - constexpr void push_back( InputIt first, InputIt last ) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; const size_type new_size_ = old_store_ref->size() + size_type(last - first); + __constexpr_non_literal_atomic__ + void push_back( InputIt first, InputIt last ) { + std::lock_guard<std::recursive_mutex> lock(mtx_write); if( new_size_ > old_store_ref->capacity() ) { // grow and swap all refs @@ -739,8 +771,9 @@ namespace jau { * @param comparator the equal comparator to return true if both given elements are equal * @return true if the element has been uniquely added, otherwise false */ + __constexpr_non_literal_atomic__ bool push_back_unique(const value_type& x, equal_comparator comparator) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); for(auto it = store_ref->begin(); it != store_ref->end(); ) { if( comparator( *it, x ) ) { return false; // already included @@ -776,9 +809,10 @@ namespace jau { * @param comparator the equal comparator to return true if both given elements are equal * @return number of erased elements */ + __constexpr_non_literal_atomic__ int erase_matching(const value_type& x, const bool all_matching, equal_comparator comparator) { int count = 0; - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref ); for(auto it = new_store_ref->begin(); it != new_store_ref->end(); ) { if( comparator( *it, x ) ) { @@ -809,8 +843,9 @@ namespace jau { * @param i the position within this store * @param x the value to be assigned to the object at the given position */ + __constexpr_non_literal_atomic__ void put(size_type i, const value_type& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref ); new_store_ref->at(i) = x; { @@ -830,8 +865,9 @@ namespace jau { * @param i the position within this store * @param x the value to be assigned to the object at the given position */ + __constexpr_non_literal_atomic__ void put(size_type i, value_type&& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref ); new_store_ref->at(i) = std::move(x); { diff --git a/include/jau/cow_vector.hpp b/include/jau/cow_vector.hpp index 35e7ac7..69143f8 100644 --- a/include/jau/cow_vector.hpp +++ b/include/jau/cow_vector.hpp @@ -37,6 +37,7 @@ #include <vector> #include <algorithm> +#include <jau/cpp_lang_macros.hpp> #include <jau/debug.hpp> #include <jau/basic_types.hpp> #include <jau/ordered_atomic.hpp> @@ -134,8 +135,8 @@ namespace jau { static constexpr size_type DIFF_MAX = std::numeric_limits<difference_type>::max(); storage_ref_t store_ref; - sc_atomic_bool sync_atomic; - std::recursive_mutex mtx_write; + mutable sc_atomic_bool sync_atomic; + mutable std::recursive_mutex mtx_write; public: // ctor @@ -152,11 +153,12 @@ namespace jau { constexpr cow_vector(size_type n, const value_type& value, const allocator_type& a = allocator_type()) : store_ref( std::make_shared<storage_t>(n, value, a) ), sync_atomic(false) { } - constexpr cow_vector(const cow_vector& x) + __constexpr_non_literal_atomic__ + cow_vector(const cow_vector& x) : sync_atomic(false) { storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_vector *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } store_ref = std::make_shared<storage_t>( *x_store_ref, x_store_ref->get_allocator() ); @@ -212,8 +214,9 @@ namespace jau { * @see jau::cow_vector::copy_store() * @see jau::cow_vector::set_store() */ + __constexpr_non_literal_atomic__ storage_ref_t copy_store() { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); return std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() ); } @@ -229,7 +232,7 @@ namespace jau { * cow_vector<std::shared_ptr<Thing>> list; * ... * { - * const std::lock_guard<std::recursive_mutex> lock(list.get_write_mutex()); + * std::lock_guard<std::recursive_mutex> lock(list.get_write_mutex()); * std::shared_ptr<std::vector<std::shared_ptr<Thing>>> snapshot = list.copy_store(); * ... * some fancy mutation @@ -244,8 +247,9 @@ namespace jau { * @see jau::cow_vector::copy_store() * @see jau::cow_vector::set_store() */ - constexpr void set_store(storage_ref_t && new_store_ref) noexcept { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + __constexpr_non_literal_atomic__ + void set_store(storage_ref_t && new_store_ref) noexcept { + std::lock_guard<std::recursive_mutex> lock(mtx_write); sc_atomic_critical sync(sync_atomic); store_ref = std::move( new_store_ref ); } @@ -262,8 +266,9 @@ namespace jau { * </p> * @see jau::for_each_cow */ - constexpr storage_ref_t get_snapshot() const noexcept { - sc_atomic_critical sync( const_cast<cow_vector *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + storage_ref_t get_snapshot() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref; } @@ -298,12 +303,13 @@ namespace jau { // read access allocator_type get_allocator() const noexcept { - sc_atomic_critical sync( const_cast<cow_vector *>(this)->sync_atomic ); + sc_atomic_critical sync( sync_atomic ); return store_ref->get_allocator(); } - constexpr size_type capacity() const noexcept { - sc_atomic_critical sync( const_cast<cow_vector *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + size_type capacity() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref->capacity(); } @@ -313,8 +319,9 @@ namespace jau { * This read operation is <i>lock-free</i>. * </p> */ - constexpr bool empty() const noexcept { - sc_atomic_critical sync( const_cast<cow_vector *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + bool empty() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref->empty(); } @@ -324,8 +331,9 @@ namespace jau { * This read operation is <i>lock-free</i>. * </p> */ - constexpr size_type size() const noexcept { - sc_atomic_critical sync( const_cast<cow_vector *>(this)->sync_atomic ); + __constexpr_non_literal_atomic__ + size_type size() const noexcept { + sc_atomic_critical sync( sync_atomic ); return store_ref->size(); } @@ -388,7 +396,7 @@ namespace jau { // write access void reserve(size_type new_capacity) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( new_capacity > old_store_ref->capacity() ) { storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->get_allocator() ); @@ -405,10 +413,10 @@ namespace jau { * </p> */ cow_vector& operator=(const cow_vector& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t x_store_ref; { - sc_atomic_critical sync_x( const_cast<cow_vector *>(&x)->sync_atomic ); + sc_atomic_critical sync_x( x.sync_atomic ); x_store_ref = x.store_ref; } storage_ref_t new_store_ref = std::make_shared<storage_t>( *x_store_ref, x_store_ref->get_allocator() ); @@ -445,8 +453,9 @@ namespace jau { * This write operation uses a mutex lock and is blocking this instances' write operations. * </p> */ + __constexpr_non_literal_atomic__ void clear() noexcept { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>(); { sc_atomic_critical sync(sync_atomic); @@ -460,6 +469,7 @@ namespace jau { * This write operation uses a mutex lock and is blocking both cow_vector instance's write operations. * </p> */ + __constexpr_non_literal_atomic__ void swap(cow_vector& x) noexcept { std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock); // utilize std::lock(a, b), allowing mixed order waiting on either object std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor @@ -479,8 +489,9 @@ namespace jau { * This write operation uses a mutex lock and is blocking this instances' write operations only. * </p> */ + __constexpr_non_literal_atomic__ void pop_back() noexcept { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t old_store_ref = store_ref; if( 0 < old_store_ref->size() ) { storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->get_allocator() ); @@ -499,8 +510,9 @@ namespace jau { * </p> * @param x the value to be added at the tail. */ + __constexpr_non_literal_atomic__ void push_back(const value_type& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() ); new_store_ref->push_back(x); { @@ -515,8 +527,9 @@ namespace jau { * This write operation uses a mutex lock and is blocking this instances' write operations only. * </p> */ + __constexpr_non_literal_atomic__ void push_back(value_type&& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() ); new_store_ref->push_back( std::move(x) ); { @@ -557,8 +570,9 @@ namespace jau { * @param comparator the equal comparator to return true if both given elements are equal * @return true if the element has been uniquely added, otherwise false */ + __constexpr_non_literal_atomic__ bool push_back_unique(const value_type& x, equal_comparator comparator) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); for(auto it = store_ref->begin(); it != store_ref->end(); ) { if( comparator( *it, x ) ) { return false; // already included @@ -599,9 +613,10 @@ namespace jau { * @param comparator the equal comparator to return true if both given elements are equal * @return number of erased elements */ + __constexpr_non_literal_atomic__ int erase_matching(const value_type& x, const bool all_matching, equal_comparator comparator) { int count = 0; - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() ); for(auto it = new_store_ref->begin(); it != new_store_ref->end(); ) { if( comparator( *it, x ) ) { @@ -632,8 +647,9 @@ namespace jau { * @param i the position within this store * @param x the value to be assigned to the object at the given position */ + __constexpr_non_literal_atomic__ void put(size_type i, const value_type& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() ); new_store_ref->at(i) = x; { @@ -653,8 +669,9 @@ namespace jau { * @param i the position within this store * @param x the value to be assigned to the object at the given position */ + __constexpr_non_literal_atomic__ void put(size_type i, value_type&& x) { - const std::lock_guard<std::recursive_mutex> lock(mtx_write); + std::lock_guard<std::recursive_mutex> lock(mtx_write); storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() ); new_store_ref->at(i) = std::move(x); { diff --git a/include/jau/cpp_lang_macros.hpp b/include/jau/cpp_lang_macros.hpp new file mode 100644 index 0000000..2f090b2 --- /dev/null +++ b/include/jau/cpp_lang_macros.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2020 Gothel Software e.K. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef CPP_LANG_MACROS_HPP_ +#define CPP_LANG_MACROS_HPP_ + +#if __cplusplus > 201703L + #define __constexpr_cxx20_ constexpr + #warning CXX 20 detected but not evaluated yet +#else + #define __constexpr_cxx20_ +#endif + + /** + * Used when attempting to define a function 'constexpr', + * but uses non-literal variables, such as std::lock_guard etc. + * As these can't be evaluated at compile time, the standard does + * not allow using 'constexpr' here. + * <p> + * Empty until standard defines otherwise. + * </p> + */ + #define __constexpr_non_literal_var__ + + /** + * See __constexpr_non_literal_var__ + */ + #define __constexpr_non_literal_atomic__ + + /** + * See __constexpr_non_literal_var__ + */ + #define __constexpr_non_literal_mutex__ + +#endif /* CPP_LANG_MACROS_HPP_ */ |