aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorSven Gothel <[email protected]>2021-02-07 03:16:54 +0100
committerSven Gothel <[email protected]>2021-02-07 03:16:54 +0100
commit112528c73f09b262bd53f817f9d9ff9343af0ee9 (patch)
treefa45743017193d5e8f145fe148e647d7faf7eada /include
parent0beca1db7651b710a4514656fa2ac936d03b735b (diff)
Refine constexpr_* macros for functions: Add '_func' hinting function and use alternative constexpr function qualifier 'inline'
Diffstat (limited to 'include')
-rw-r--r--include/jau/cow_darray.hpp48
-rw-r--r--include/jau/cow_iterator.hpp12
-rw-r--r--include/jau/cow_vector.hpp36
-rw-r--r--include/jau/cpp_lang_macros.hpp20
-rw-r--r--include/jau/darray.hpp4
5 files changed, 66 insertions, 54 deletions
diff --git a/include/jau/cow_darray.hpp b/include/jau/cow_darray.hpp
index 46918e8..b59c5d5 100644
--- a/include/jau/cow_darray.hpp
+++ b/include/jau/cow_darray.hpp
@@ -301,7 +301,7 @@ namespace jau {
* Capacity and size will equal the given array, i.e. the result is a trimmed array.
* @param x the given cow_darray, all elements will be copied into the new instance.
*/
- constexpr_atomic
+ constexpr_func_atomic
cow_darray(const cow_darray& x)
: sync_atomic(false) {
storage_ref_t x_store_ref;
@@ -321,7 +321,7 @@ namespace jau {
* @param growth_factor custom growth factor
* @param alloc custom allocator_type instance
*/
- constexpr_atomic
+ constexpr_func_atomic
explicit cow_darray(const cow_darray& x, const float growth_factor, const allocator_type& alloc)
: sync_atomic(false) {
storage_ref_t x_store_ref;
@@ -345,7 +345,7 @@ namespace jau {
* @param growth_factor custom growth factor
* @param alloc custom allocator_type instance
*/
- constexpr_atomic
+ constexpr_func_atomic
explicit cow_darray(const cow_darray& x, const size_type _capacity, const float growth_factor, const allocator_type& alloc)
: sync_atomic(false) {
storage_ref_t x_store_ref;
@@ -364,7 +364,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking this instances' write operations only.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
cow_darray& operator=(const cow_darray& x) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
storage_ref_t x_store_ref;
@@ -384,7 +384,7 @@ namespace jau {
// move_ctor on cow_darray elements
- constexpr_atomic
+ constexpr_func_atomic
cow_darray(cow_darray && x) noexcept {
// Strategy-1: Acquire lock, blocking
// - If somebody else holds the lock, we wait.
@@ -409,7 +409,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking both cow_vector instance's write operations.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
cow_darray& operator=(cow_darray&& x) noexcept {
// Strategy-2: Acquire locks of both, blocking
// - If somebody else holds the lock, we wait.
@@ -546,7 +546,7 @@ namespace jau {
* @see jau::cow_darray::copy_store()
* @see jau::cow_darray::set_store()
*/
- constexpr_atomic
+ constexpr_func_atomic
storage_ref_t copy_store() {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
DARRAY_PRINTF("copy_store: %s\n", get_info().c_str());
@@ -583,7 +583,7 @@ namespace jau {
* @see jau::cow_rw_iterator
* @see jau::cow_rw_iterator::write_back()
*/
- constexpr_atomic
+ constexpr_func_atomic
void set_store(storage_ref_t && new_store_ref) noexcept {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
sc_atomic_critical sync(sync_atomic);
@@ -606,7 +606,7 @@ namespace jau {
* This read operation is <i>lock-free</i>.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
storage_ref_t snapshot() const noexcept {
sc_atomic_critical sync( sync_atomic );
return store_ref;
@@ -675,7 +675,7 @@ namespace jau {
/**
* Returns the growth factor
*/
- constexpr_atomic
+ constexpr_func_atomic
float growth_factor() const noexcept {
sc_atomic_critical sync( sync_atomic );
return store_ref->growth_factor();
@@ -688,7 +688,7 @@ namespace jau {
* </p>
* @return
*/
- constexpr_atomic
+ constexpr_func_atomic
size_type capacity() const noexcept {
sc_atomic_critical sync( sync_atomic );
return store_ref->capacity();
@@ -700,7 +700,7 @@ namespace jau {
* This read operation is <i>lock-free</i>.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
bool empty() const noexcept {
sc_atomic_critical sync( sync_atomic );
return store_ref->empty();
@@ -712,7 +712,7 @@ namespace jau {
* This read operation is <i>lock-free</i>.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
size_type size() const noexcept {
sc_atomic_critical sync( sync_atomic );
return store_ref->size();
@@ -747,7 +747,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking this instances' write operations.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
void clear() noexcept {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
storage_ref_t new_store_ref = std::make_shared<storage_t>();
@@ -763,7 +763,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking both cow_darray instance's write operations.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
void swap(cow_darray& x) noexcept {
std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock); // utilize std::lock(a, b), allowing mixed order waiting on either object
std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor
@@ -783,7 +783,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking this instances' write operations only.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
void pop_back() noexcept {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
if( !store_ref->empty() ) {
@@ -806,7 +806,7 @@ namespace jau {
* </p>
* @param x the value to be added at the tail.
*/
- constexpr_atomic
+ constexpr_func_atomic
void push_back(const value_type& x) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
if( store_ref->capacity_reached() ) {
@@ -831,7 +831,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking this instances' write operations only.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
void push_back(value_type&& x) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
if( store_ref->capacity_reached() ) {
@@ -861,7 +861,7 @@ namespace jau {
* @param args arguments to forward to the constructor of the element
*/
template<typename... Args>
- constexpr_atomic
+ constexpr_func_atomic
reference emplace_back(Args&&... args) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
if( store_ref->capacity_reached() ) {
@@ -891,7 +891,7 @@ namespace jau {
* @param last last foreign input-iterator to range of value_type [first, last)
*/
template< class InputIt >
- constexpr_atomic
+ constexpr_func_atomic
void push_back( InputIt first, InputIt last ) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
const size_type new_size_ = store_ref->size() + size_type(last - first);
@@ -944,7 +944,7 @@ namespace jau {
* @param comparator the equal comparator to return true if both given elements are equal
* @return true if the element has been uniquely added, otherwise false
*/
- constexpr_atomic
+ constexpr_func_atomic
bool push_back_unique(const value_type& x, equal_comparator comparator) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
for(auto it = store_ref->begin(); it != store_ref->end(); ) {
@@ -982,7 +982,7 @@ namespace jau {
* @param comparator the equal comparator to return true if both given elements are equal
* @return number of erased elements
*/
- constexpr_atomic
+ constexpr_func_atomic
int erase_matching(const value_type& x, const bool all_matching, equal_comparator comparator) {
int count = 0;
@@ -1004,7 +1004,7 @@ namespace jau {
return count;
}
- constexpr_cxx20 std::string toString() const noexcept {
+ constexpr_func_cxx20 std::string toString() const noexcept {
std::string res("{ " + std::to_string( size() ) + ": ");
int i=0;
jau::for_each_const(*this, [&res, &i](const value_type & e) {
@@ -1015,7 +1015,7 @@ namespace jau {
return res;
}
- constexpr_cxx20 std::string get_info() const noexcept {
+ constexpr_func_cxx20 std::string get_info() const noexcept {
return ("cow_darray[this "+jau::aptrHexString(this)+
", "+store_ref->get_info()+
"]");
diff --git a/include/jau/cow_iterator.hpp b/include/jau/cow_iterator.hpp
index 0be2455..4133c1d 100644
--- a/include/jau/cow_iterator.hpp
+++ b/include/jau/cow_iterator.hpp
@@ -456,15 +456,15 @@ namespace jau {
constexpr difference_type operator-(const cow_rw_iterator& rhs) const noexcept
{ return iterator_ - rhs.iterator_; }
- constexpr_cxx20 std::string toString() const noexcept {
+ constexpr_func_cxx20 std::string toString() const noexcept {
return jau::to_string(iterator_);
}
#if 0
- constexpr_cxx20 operator std::string() const noexcept {
+ constexpr_func_cxx20 operator std::string() const noexcept {
return toString();
}
#endif
- constexpr_cxx20 std::string get_info() const noexcept {
+ constexpr_func_cxx20 std::string get_info() const noexcept {
return "cow_rw_iterator[this "+jau::aptrHexString(this)+", CoW "+jau::aptrHexString(&cow_parent_)+
", store "+jau::aptrHexString(&store_ref_)+
", "+jau::to_string(iterator_)+"]";
@@ -922,15 +922,15 @@ namespace jau {
constexpr difference_type distance(const cow_rw_iterator<storage_t, storage_ref_t, cow_container_t>& rhs) const noexcept
{ return iterator_ - rhs.iterator_; }
- constexpr_cxx20 std::string toString() const noexcept {
+ constexpr_func_cxx20 std::string toString() const noexcept {
return jau::to_string(iterator_);
}
#if 0
- constexpr_cxx20 operator std::string() const noexcept {
+ constexpr_func_cxx20 operator std::string() const noexcept {
return toString();
}
#endif
- constexpr_cxx20 std::string get_info() const noexcept {
+ constexpr_func_cxx20 std::string get_info() const noexcept {
return "cow_ro_iterator[this "+jau::aptrHexString(this)+
", store "+jau::aptrHexString(&store_ref_)+
", "+jau::to_string(iterator_)+"]";
diff --git a/include/jau/cow_vector.hpp b/include/jau/cow_vector.hpp
index b4262aa..6539e91 100644
--- a/include/jau/cow_vector.hpp
+++ b/include/jau/cow_vector.hpp
@@ -159,7 +159,7 @@ namespace jau {
constexpr explicit cow_vector(const storage_t& x)
: store_ref( std::make_shared<storage_t>(x, x->get_allocator()) ), sync_atomic(false) { }
- constexpr_atomic
+ constexpr_func_atomic
cow_vector(const cow_vector& x)
: sync_atomic(false) {
storage_ref_t x_store_ref;
@@ -191,7 +191,7 @@ namespace jau {
return *this;
}
- constexpr_atomic
+ constexpr_func_atomic
cow_vector(cow_vector && x) noexcept {
// Strategy-1: Acquire lock, blocking
// - If somebody else holds the lock, we wait.
@@ -214,7 +214,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking both cow_vector instance's write operations.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
cow_vector& operator=(cow_vector&& x) {
// Strategy-2: Acquire locks of both, blocking
// - If somebody else holds the lock, we wait.
@@ -298,7 +298,7 @@ namespace jau {
* @see jau::cow_vector::copy_store()
* @see jau::cow_vector::set_store()
*/
- constexpr_atomic
+ constexpr_func_atomic
storage_ref_t copy_store() {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
return std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
@@ -331,7 +331,7 @@ namespace jau {
* @see jau::cow_vector::copy_store()
* @see jau::cow_vector::set_store()
*/
- constexpr_atomic
+ constexpr_func_atomic
void set_store(storage_ref_t && new_store_ref) noexcept {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
sc_atomic_critical sync(sync_atomic);
@@ -350,7 +350,7 @@ namespace jau {
* </p>
* @see jau::for_each_cow
*/
- constexpr_atomic
+ constexpr_func_atomic
storage_ref_t snapshot() const noexcept {
sc_atomic_critical sync( sync_atomic );
return store_ref;
@@ -383,7 +383,7 @@ namespace jau {
return store_ref->get_allocator();
}
- constexpr_atomic
+ constexpr_func_atomic
size_type capacity() const noexcept {
sc_atomic_critical sync( sync_atomic );
return store_ref->capacity();
@@ -395,7 +395,7 @@ namespace jau {
* This read operation is <i>lock-free</i>.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
bool empty() const noexcept {
sc_atomic_critical sync( sync_atomic );
return store_ref->empty();
@@ -407,7 +407,7 @@ namespace jau {
* This read operation is <i>lock-free</i>.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
size_type size() const noexcept {
sc_atomic_critical sync( sync_atomic );
return store_ref->size();
@@ -432,7 +432,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking this instances' write operations.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
void clear() noexcept {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
storage_ref_t new_store_ref = std::make_shared<storage_t>();
@@ -448,7 +448,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking both cow_vector instance's write operations.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
void swap(cow_vector& x) noexcept {
std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock); // utilize std::lock(a, b), allowing mixed order waiting on either object
std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor
@@ -468,7 +468,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking this instances' write operations only.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
void pop_back() noexcept {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
storage_ref_t old_store_ref = store_ref;
@@ -489,7 +489,7 @@ namespace jau {
* </p>
* @param x the value to be added at the tail.
*/
- constexpr_atomic
+ constexpr_func_atomic
void push_back(const value_type& x) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
@@ -506,7 +506,7 @@ namespace jau {
* This write operation uses a mutex lock and is blocking this instances' write operations only.
* </p>
*/
- constexpr_atomic
+ constexpr_func_atomic
void push_back(value_type&& x) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
@@ -528,7 +528,7 @@ namespace jau {
* @param args arguments to forward to the constructor of the element
*/
template<typename... Args>
- constexpr_atomic
+ constexpr_func_atomic
reference emplace_back(Args&&... args) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
@@ -572,7 +572,7 @@ namespace jau {
* @param comparator the equal comparator to return true if both given elements are equal
* @return true if the element has been uniquely added, otherwise false
*/
- constexpr_atomic
+ constexpr_func_atomic
bool push_back_unique(const value_type& x, equal_comparator comparator) {
std::lock_guard<std::recursive_mutex> lock(mtx_write);
for(auto it = store_ref->begin(); it != store_ref->end(); ) {
@@ -610,7 +610,7 @@ namespace jau {
* @param comparator the equal comparator to return true if both given elements are equal
* @return number of erased elements
*/
- constexpr_atomic
+ constexpr_func_atomic
int erase_matching(const value_type& x, const bool all_matching, equal_comparator comparator) {
int count = 0;
std::lock_guard<std::recursive_mutex> lock(mtx_write);
@@ -633,7 +633,7 @@ namespace jau {
return count;
}
- constexpr_cxx20 std::string toString() const noexcept {
+ constexpr_func_cxx20 std::string toString() const noexcept {
std::string res("{ " + std::to_string( size() ) + ": ");
int i=0;
jau::for_each_const(*this, [&res, &i](const value_type & e) {
diff --git a/include/jau/cpp_lang_macros.hpp b/include/jau/cpp_lang_macros.hpp
index a5400ee..7d45922 100644
--- a/include/jau/cpp_lang_macros.hpp
+++ b/include/jau/cpp_lang_macros.hpp
@@ -27,17 +27,25 @@
/**
* <code>constexpr</code> enabled for C++20.
+ * <p>
+ * The alternative qualifier used is `inline`,
+ * as it is implied for for `constexpr` used for functions.
+ * </p>
*/
#if __cplusplus > 201703L
- #define constexpr_cxx20 constexpr
+ #define constexpr_func_cxx20 constexpr
#else
- #define constexpr_cxx20
+ #define constexpr_func_cxx20 inline
#endif
/**
* Used when designed to declare a function <code>constexpr</code>,
* but prohibited by its specific implementation.
* <p>
+ * The alternative qualifier used is `inline`,
+ * as it is implied for for `constexpr` used for functions.
+ * </p>
+ * <p>
* Here it but uses non-literal variables, such as std::lock_guard etc.
* As these can't be evaluated at compile time, the standard does
* not allow using <code>constexpr</code> here.
@@ -46,19 +54,23 @@
* Empty until standard defines otherwise.
* </p>
*/
- #define constexpr_non_literal_var
+ #define constexpr_fun_non_literal_var inline
/**
* Used when designed to declare a function <code>constexpr</code>,
* but prohibited by its specific implementation.
* <p>
+ * The alternative qualifier used is `inline`,
+ * as it is implied for for `constexpr` used for functions.
+ * </p>
+ * <p>
* Here it uses thread-safety related measures like atomic storage
* or mutex locks, which are non-literal variables and hence
* prohibit the use of <code>constexpr</code>.
* </p>
* @see constexpr_non_literal_var
*/
- #define constexpr_atomic
+ #define constexpr_func_atomic inline
/**
* Set define if RTTI is enabled during compilation,
diff --git a/include/jau/darray.hpp b/include/jau/darray.hpp
index c0d993a..dd1e057 100644
--- a/include/jau/darray.hpp
+++ b/include/jau/darray.hpp
@@ -1122,7 +1122,7 @@ namespace jau {
return count;
}
- constexpr_cxx20 std::string toString() const noexcept {
+ constexpr_func_cxx20 std::string toString() const noexcept {
std::string res("{ " + std::to_string( size() ) + ": ");
int i=0;
jau::for_each_const(*this, [&res, &i](const value_type & e) {
@@ -1133,7 +1133,7 @@ namespace jau {
return res;
}
- constexpr_cxx20 std::string get_info() const noexcept {
+ constexpr_func_cxx20 std::string get_info() const noexcept {
difference_type cap_ = (storage_end_-begin_);
difference_type size_ = (end_-begin_);
std::string res("darray[this "+jau::aptrHexString(this)+