From 1723d63c3b9a26fadfc8c6e414f3815b30b237f1 Mon Sep 17 00:00:00 2001 From: Jack Lloyd Date: Sat, 29 Aug 2015 12:04:35 -0400 Subject: Remove alloc module; move secmem.h to base and locking_allocator to utils --- src/lib/alloc/info.txt | 3 - src/lib/alloc/locking_allocator/info.txt | 10 - .../alloc/locking_allocator/locking_allocator.cpp | 304 --------------------- .../alloc/locking_allocator/locking_allocator.h | 44 --- src/lib/alloc/secmem.h | 205 -------------- src/lib/base/info.txt | 2 +- src/lib/base/secmem.h | 205 ++++++++++++++ src/lib/entropy/proc_walk/info.txt | 4 - src/lib/hash/keccak/info.txt | 4 - src/lib/math/bigint/info.txt | 1 - src/lib/pk_pad/info.txt | 1 - src/lib/pubkey/ecc_key/info.txt | 1 - src/lib/pubkey/ecdh/info.txt | 1 - src/lib/pubkey/gost_3410/info.txt | 1 - src/lib/pubkey/info.txt | 1 - src/lib/utils/datastor/info.txt | 3 - src/lib/utils/locking_allocator/info.txt | 10 + .../utils/locking_allocator/locking_allocator.cpp | 304 +++++++++++++++++++++ .../utils/locking_allocator/locking_allocator.h | 44 +++ 19 files changed, 564 insertions(+), 584 deletions(-) delete mode 100644 src/lib/alloc/info.txt delete mode 100644 src/lib/alloc/locking_allocator/info.txt delete mode 100644 src/lib/alloc/locking_allocator/locking_allocator.cpp delete mode 100644 src/lib/alloc/locking_allocator/locking_allocator.h delete mode 100644 src/lib/alloc/secmem.h create mode 100644 src/lib/base/secmem.h create mode 100644 src/lib/utils/locking_allocator/info.txt create mode 100644 src/lib/utils/locking_allocator/locking_allocator.cpp create mode 100644 src/lib/utils/locking_allocator/locking_allocator.h (limited to 'src') diff --git a/src/lib/alloc/info.txt b/src/lib/alloc/info.txt deleted file mode 100644 index 0ab7fa768..000000000 --- a/src/lib/alloc/info.txt +++ /dev/null @@ -1,3 +0,0 @@ - -secmem.h - diff --git a/src/lib/alloc/locking_allocator/info.txt b/src/lib/alloc/locking_allocator/info.txt deleted file mode 100644 index d3b5e86f8..000000000 --- a/src/lib/alloc/locking_allocator/info.txt +++ /dev/null @@ -1,10 +0,0 @@ -define LOCKING_ALLOCATOR 20131128 - - -android -linux -freebsd -netbsd -openbsd -solaris - diff --git a/src/lib/alloc/locking_allocator/locking_allocator.cpp b/src/lib/alloc/locking_allocator/locking_allocator.cpp deleted file mode 100644 index c145cfd7f..000000000 --- a/src/lib/alloc/locking_allocator/locking_allocator.cpp +++ /dev/null @@ -1,304 +0,0 @@ -/* -* Mlock Allocator -* (C) 2012,2014 Jack Lloyd -* -* Botan is released under the Simplified BSD License (see license.txt) -*/ - -#include -#include -#include -#include -#include - -#include -#include - -namespace Botan { - -namespace { - -size_t reset_mlock_limit(size_t max_req) - { -#if defined(RLIMIT_MEMLOCK) - struct rlimit limits; - - ::getrlimit(RLIMIT_MEMLOCK, &limits); - - if(limits.rlim_cur < limits.rlim_max) - { - limits.rlim_cur = limits.rlim_max; - ::setrlimit(RLIMIT_MEMLOCK, &limits); - ::getrlimit(RLIMIT_MEMLOCK, &limits); - } - - return std::min(limits.rlim_cur, max_req); -#endif - - return 0; - } - -size_t mlock_limit() - { - /* - * Linux defaults to only 64 KiB of mlockable memory per process - * (too small) but BSDs offer a small fraction of total RAM (more - * than we need). Bound the total mlock size to 512 KiB which is - * enough to run the entire test suite without spilling to non-mlock - * memory (and thus presumably also enough for many useful - * programs), but small enough that we should not cause problems - * even if many processes are mlocking on the same machine. - */ - size_t mlock_requested = 512; - - /* - * Allow override via env variable - */ - if(const char* env = ::getenv("BOTAN_MLOCK_POOL_SIZE")) - { - try - { - const size_t user_req = std::stoul(env, nullptr); - mlock_requested = std::min(user_req, mlock_requested); - } - catch(std::exception&) { /* ignore it */ } - } - - return reset_mlock_limit(mlock_requested*1024); - } - -bool ptr_in_pool(const void* pool_ptr, size_t poolsize, - const void* buf_ptr, size_t bufsize) - { - const uintptr_t pool = reinterpret_cast(pool_ptr); - const uintptr_t buf = reinterpret_cast(buf_ptr); - - if(buf < pool || buf >= pool + poolsize) - return false; - - BOTAN_ASSERT(buf + bufsize <= pool + poolsize, - "Pointer does not partially overlap pool"); - - return true; - } - -size_t padding_for_alignment(size_t offset, size_t desired_alignment) - { - size_t mod = offset % desired_alignment; - if(mod == 0) - return 0; // already right on - return desired_alignment - mod; - } - -} - -void* mlock_allocator::allocate(size_t num_elems, size_t elem_size) - { - if(!m_pool) - return nullptr; - - const size_t n = num_elems * elem_size; - const size_t alignment = 16; - - if(n / elem_size != num_elems) - return nullptr; // overflow! - - if(n > m_poolsize) - return nullptr; - if(n < BOTAN_MLOCK_ALLOCATOR_MIN_ALLOCATION || n > BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION) - return nullptr; - - std::lock_guard lock(m_mutex); - - auto best_fit = m_freelist.end(); - - for(auto i = m_freelist.begin(); i != m_freelist.end(); ++i) - { - // If we have a perfect fit, use it immediately - if(i->second == n && (i->first % alignment) == 0) - { - const size_t offset = i->first; - m_freelist.erase(i); - clear_mem(m_pool + offset, n); - - BOTAN_ASSERT((reinterpret_cast(m_pool) + offset) % alignment == 0, - "Returning correctly aligned pointer"); - - return m_pool + offset; - } - - if((i->second >= (n + padding_for_alignment(i->first, alignment)) && - ((best_fit == m_freelist.end()) || (best_fit->second > i->second)))) - { - best_fit = i; - } - } - - if(best_fit != m_freelist.end()) - { - const size_t offset = best_fit->first; - - const size_t alignment_padding = padding_for_alignment(offset, alignment); - - best_fit->first += n + alignment_padding; - best_fit->second -= n + alignment_padding; - - // Need to realign, split the block - if(alignment_padding) - { - /* - If we used the entire block except for small piece used for - alignment at the beginning, so just update the entry already - in place (as it is in the correct location), rather than - deleting the empty range and inserting the new one in the - same location. - */ - if(best_fit->second == 0) - { - best_fit->first = offset; - best_fit->second = alignment_padding; - } - else - m_freelist.insert(best_fit, std::make_pair(offset, alignment_padding)); - } - - clear_mem(m_pool + offset + alignment_padding, n); - - BOTAN_ASSERT((reinterpret_cast(m_pool) + offset + alignment_padding) % alignment == 0, - "Returning correctly aligned pointer"); - - return m_pool + offset + alignment_padding; - } - - return nullptr; - } - -bool mlock_allocator::deallocate(void* p, size_t num_elems, size_t elem_size) - { - if(!m_pool) - return false; - - /* - We do not have to zero the memory here, as - secure_allocator::deallocate does that for all arguments before - invoking the deallocator (us or delete[]) - */ - - size_t n = num_elems * elem_size; - - /* - We return nullptr in allocate if there was an overflow, so we - should never ever see an overflow in a deallocation. - */ - BOTAN_ASSERT(n / elem_size == num_elems, - "No overflow in deallocation"); - - if(!ptr_in_pool(m_pool, m_poolsize, p, n)) - return false; - - std::lock_guard lock(m_mutex); - - const size_t start = static_cast(p) - m_pool; - - auto comp = [](std::pair x, std::pair y){ return x.first < y.first; }; - - auto i = std::lower_bound(m_freelist.begin(), m_freelist.end(), - std::make_pair(start, 0), comp); - - // try to merge with later block - if(i != m_freelist.end() && start + n == i->first) - { - i->first = start; - i->second += n; - n = 0; - } - - // try to merge with previous block - if(i != m_freelist.begin()) - { - auto prev = std::prev(i); - - if(prev->first + prev->second == start) - { - if(n) - { - prev->second += n; - n = 0; - } - else - { - // merge adjoining - prev->second += i->second; - m_freelist.erase(i); - } - } - } - - if(n != 0) // no merge possible? - m_freelist.insert(i, std::make_pair(start, n)); - - return true; - } - -mlock_allocator::mlock_allocator() : - m_poolsize(mlock_limit()), - m_pool(nullptr) - { -#if !defined(MAP_NOCORE) - #define MAP_NOCORE 0 -#endif - -#if !defined(MAP_ANONYMOUS) - #define MAP_ANONYMOUS MAP_ANON -#endif - - if(m_poolsize) - { - m_pool = static_cast( - ::mmap( - nullptr, m_poolsize, - PROT_READ | PROT_WRITE, - MAP_ANONYMOUS | MAP_SHARED | MAP_NOCORE, - -1, 0)); - - if(m_pool == static_cast(MAP_FAILED)) - { - m_pool = nullptr; - throw std::runtime_error("Failed to mmap locking_allocator pool"); - } - - clear_mem(m_pool, m_poolsize); - - if(::mlock(m_pool, m_poolsize) != 0) - { - ::munmap(m_pool, m_poolsize); - m_pool = nullptr; - throw std::runtime_error("Could not mlock " + std::to_string(m_poolsize) + " bytes"); - } - -#if defined(MADV_DONTDUMP) - ::madvise(m_pool, m_poolsize, MADV_DONTDUMP); -#endif - - m_freelist.push_back(std::make_pair(0, m_poolsize)); - } - } - -mlock_allocator::~mlock_allocator() - { - if(m_pool) - { - clear_mem(m_pool, m_poolsize); - ::munlock(m_pool, m_poolsize); - ::munmap(m_pool, m_poolsize); - m_pool = nullptr; - } - } - -mlock_allocator& mlock_allocator::instance() - { - static mlock_allocator mlock; - return mlock; - } - -} diff --git a/src/lib/alloc/locking_allocator/locking_allocator.h b/src/lib/alloc/locking_allocator/locking_allocator.h deleted file mode 100644 index 2aca2dfa9..000000000 --- a/src/lib/alloc/locking_allocator/locking_allocator.h +++ /dev/null @@ -1,44 +0,0 @@ -/* -* Mlock Allocator -* (C) 2012 Jack Lloyd -* -* Botan is released under the Simplified BSD License (see license.txt) -*/ - -#ifndef BOTAN_MLOCK_ALLOCATOR_H__ -#define BOTAN_MLOCK_ALLOCATOR_H__ - -#include -#include -#include - -namespace Botan { - -class BOTAN_DLL mlock_allocator - { - public: - static mlock_allocator& instance(); - - void* allocate(size_t num_elems, size_t elem_size); - - bool deallocate(void* p, size_t num_elems, size_t elem_size); - - mlock_allocator(const mlock_allocator&) = delete; - - mlock_allocator& operator=(const mlock_allocator&) = delete; - - private: - mlock_allocator(); - - ~mlock_allocator(); - - const size_t m_poolsize; - - std::mutex m_mutex; - std::vector> m_freelist; - byte* m_pool; - }; - -} - -#endif diff --git a/src/lib/alloc/secmem.h b/src/lib/alloc/secmem.h deleted file mode 100644 index 63d4e5296..000000000 --- a/src/lib/alloc/secmem.h +++ /dev/null @@ -1,205 +0,0 @@ -/* -* Secure Memory Buffers -* (C) 1999-2007,2012 Jack Lloyd -* -* Botan is released under the Simplified BSD License (see license.txt) -*/ - -#ifndef BOTAN_SECURE_MEMORY_BUFFERS_H__ -#define BOTAN_SECURE_MEMORY_BUFFERS_H__ - -#include -#include -#include -#include - -#if defined(BOTAN_HAS_LOCKING_ALLOCATOR) - #include -#endif - -namespace Botan { - -template -class secure_allocator - { - public: - typedef T value_type; - - typedef T* pointer; - typedef const T* const_pointer; - - typedef T& reference; - typedef const T& const_reference; - - typedef std::size_t size_type; - typedef std::ptrdiff_t difference_type; - - secure_allocator() BOTAN_NOEXCEPT {} - - template - secure_allocator(const secure_allocator&) BOTAN_NOEXCEPT {} - - ~secure_allocator() BOTAN_NOEXCEPT {} - - pointer address(reference x) const BOTAN_NOEXCEPT - { return std::addressof(x); } - - const_pointer address(const_reference x) const BOTAN_NOEXCEPT - { return std::addressof(x); } - - pointer allocate(size_type n, const void* = 0) - { -#if defined(BOTAN_HAS_LOCKING_ALLOCATOR) - if(pointer p = static_cast(mlock_allocator::instance().allocate(n, sizeof(T)))) - return p; -#endif - - pointer p = new T[n]; - clear_mem(p, n); - return p; - } - - void deallocate(pointer p, size_type n) - { - zero_mem(p, n); - -#if defined(BOTAN_HAS_LOCKING_ALLOCATOR) - if(mlock_allocator::instance().deallocate(p, n, sizeof(T))) - return; -#endif - - delete [] p; - } - - size_type max_size() const BOTAN_NOEXCEPT - { - return static_cast(-1) / sizeof(T); - } - - template - void construct(U* p, Args&&... args) - { - ::new(static_cast(p)) U(std::forward(args)...); - } - - template void destroy(U* p) { p->~U(); } - }; - -template inline bool -operator==(const secure_allocator&, const secure_allocator&) - { return true; } - -template inline bool -operator!=(const secure_allocator&, const secure_allocator&) - { return false; } - -template using secure_vector = std::vector>; -template using secure_deque = std::deque>; - -template -std::vector unlock(const secure_vector& in) - { - std::vector out(in.size()); - copy_mem(out.data(), in.data(), in.size()); - return out; - } - -template -size_t buffer_insert(std::vector& buf, - size_t buf_offset, - const T input[], - size_t input_length) - { - const size_t to_copy = std::min(input_length, buf.size() - buf_offset); - if (to_copy > 0) - { - copy_mem(&buf[buf_offset], input, to_copy); - } - return to_copy; - } - -template -size_t buffer_insert(std::vector& buf, - size_t buf_offset, - const std::vector& input) - { - const size_t to_copy = std::min(input.size(), buf.size() - buf_offset); - if (to_copy > 0) - { - copy_mem(&buf[buf_offset], input.data(), to_copy); - } - return to_copy; - } - -template -std::vector& -operator+=(std::vector& out, - const std::vector& in) - { - const size_t copy_offset = out.size(); - out.resize(out.size() + in.size()); - if (in.size() > 0) - { - copy_mem(&out[copy_offset], in.data(), in.size()); - } - return out; - } - -template -std::vector& operator+=(std::vector& out, T in) - { - out.push_back(in); - return out; - } - -template -std::vector& operator+=(std::vector& out, - const std::pair& in) - { - const size_t copy_offset = out.size(); - out.resize(out.size() + in.second); - if (in.second > 0) - { - copy_mem(&out[copy_offset], in.first, in.second); - } - return out; - } - -template -std::vector& operator+=(std::vector& out, - const std::pair& in) - { - const size_t copy_offset = out.size(); - out.resize(out.size() + in.second); - if (in.second > 0) - { - copy_mem(&out[copy_offset], in.first, in.second); - } - return out; - } - -/** -* Zeroise the values; length remains unchanged -* @param vec the vector to zeroise -*/ -template -void zeroise(std::vector& vec) - { - clear_mem(vec.data(), vec.size()); - } - -/** -* Zeroise the values then free the memory -* @param vec the vector to zeroise and free -*/ -template -void zap(std::vector& vec) - { - zeroise(vec); - vec.clear(); - vec.shrink_to_fit(); - } - -} - -#endif diff --git a/src/lib/base/info.txt b/src/lib/base/info.txt index e09351596..19eee6608 100644 --- a/src/lib/base/info.txt +++ b/src/lib/base/info.txt @@ -4,6 +4,7 @@ buf_comp.h init.h key_spec.h lookup.h +secmem.h scan_name.h sym_algo.h symkey.h @@ -17,7 +18,6 @@ algo_registry.h define TRANSFORM 20131209 -alloc block hash hex diff --git a/src/lib/base/secmem.h b/src/lib/base/secmem.h new file mode 100644 index 000000000..63d4e5296 --- /dev/null +++ b/src/lib/base/secmem.h @@ -0,0 +1,205 @@ +/* +* Secure Memory Buffers +* (C) 1999-2007,2012 Jack Lloyd +* +* Botan is released under the Simplified BSD License (see license.txt) +*/ + +#ifndef BOTAN_SECURE_MEMORY_BUFFERS_H__ +#define BOTAN_SECURE_MEMORY_BUFFERS_H__ + +#include +#include +#include +#include + +#if defined(BOTAN_HAS_LOCKING_ALLOCATOR) + #include +#endif + +namespace Botan { + +template +class secure_allocator + { + public: + typedef T value_type; + + typedef T* pointer; + typedef const T* const_pointer; + + typedef T& reference; + typedef const T& const_reference; + + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + secure_allocator() BOTAN_NOEXCEPT {} + + template + secure_allocator(const secure_allocator&) BOTAN_NOEXCEPT {} + + ~secure_allocator() BOTAN_NOEXCEPT {} + + pointer address(reference x) const BOTAN_NOEXCEPT + { return std::addressof(x); } + + const_pointer address(const_reference x) const BOTAN_NOEXCEPT + { return std::addressof(x); } + + pointer allocate(size_type n, const void* = 0) + { +#if defined(BOTAN_HAS_LOCKING_ALLOCATOR) + if(pointer p = static_cast(mlock_allocator::instance().allocate(n, sizeof(T)))) + return p; +#endif + + pointer p = new T[n]; + clear_mem(p, n); + return p; + } + + void deallocate(pointer p, size_type n) + { + zero_mem(p, n); + +#if defined(BOTAN_HAS_LOCKING_ALLOCATOR) + if(mlock_allocator::instance().deallocate(p, n, sizeof(T))) + return; +#endif + + delete [] p; + } + + size_type max_size() const BOTAN_NOEXCEPT + { + return static_cast(-1) / sizeof(T); + } + + template + void construct(U* p, Args&&... args) + { + ::new(static_cast(p)) U(std::forward(args)...); + } + + template void destroy(U* p) { p->~U(); } + }; + +template inline bool +operator==(const secure_allocator&, const secure_allocator&) + { return true; } + +template inline bool +operator!=(const secure_allocator&, const secure_allocator&) + { return false; } + +template using secure_vector = std::vector>; +template using secure_deque = std::deque>; + +template +std::vector unlock(const secure_vector& in) + { + std::vector out(in.size()); + copy_mem(out.data(), in.data(), in.size()); + return out; + } + +template +size_t buffer_insert(std::vector& buf, + size_t buf_offset, + const T input[], + size_t input_length) + { + const size_t to_copy = std::min(input_length, buf.size() - buf_offset); + if (to_copy > 0) + { + copy_mem(&buf[buf_offset], input, to_copy); + } + return to_copy; + } + +template +size_t buffer_insert(std::vector& buf, + size_t buf_offset, + const std::vector& input) + { + const size_t to_copy = std::min(input.size(), buf.size() - buf_offset); + if (to_copy > 0) + { + copy_mem(&buf[buf_offset], input.data(), to_copy); + } + return to_copy; + } + +template +std::vector& +operator+=(std::vector& out, + const std::vector& in) + { + const size_t copy_offset = out.size(); + out.resize(out.size() + in.size()); + if (in.size() > 0) + { + copy_mem(&out[copy_offset], in.data(), in.size()); + } + return out; + } + +template +std::vector& operator+=(std::vector& out, T in) + { + out.push_back(in); + return out; + } + +template +std::vector& operator+=(std::vector& out, + const std::pair& in) + { + const size_t copy_offset = out.size(); + out.resize(out.size() + in.second); + if (in.second > 0) + { + copy_mem(&out[copy_offset], in.first, in.second); + } + return out; + } + +template +std::vector& operator+=(std::vector& out, + const std::pair& in) + { + const size_t copy_offset = out.size(); + out.resize(out.size() + in.second); + if (in.second > 0) + { + copy_mem(&out[copy_offset], in.first, in.second); + } + return out; + } + +/** +* Zeroise the values; length remains unchanged +* @param vec the vector to zeroise +*/ +template +void zeroise(std::vector& vec) + { + clear_mem(vec.data(), vec.size()); + } + +/** +* Zeroise the values then free the memory +* @param vec the vector to zeroise and free +*/ +template +void zap(std::vector& vec) + { + zeroise(vec); + vec.clear(); + vec.shrink_to_fit(); + } + +} + +#endif diff --git a/src/lib/entropy/proc_walk/info.txt b/src/lib/entropy/proc_walk/info.txt index c713d3b8e..8c3947dc6 100644 --- a/src/lib/entropy/proc_walk/info.txt +++ b/src/lib/entropy/proc_walk/info.txt @@ -24,7 +24,3 @@ openbsd qnx solaris - - -alloc - diff --git a/src/lib/hash/keccak/info.txt b/src/lib/hash/keccak/info.txt index ecdfba19c..6fcd286a3 100644 --- a/src/lib/hash/keccak/info.txt +++ b/src/lib/hash/keccak/info.txt @@ -1,5 +1 @@ define KECCAK 20131128 - - -alloc - diff --git a/src/lib/math/bigint/info.txt b/src/lib/math/bigint/info.txt index b5dabb7bc..53edcb1f1 100644 --- a/src/lib/math/bigint/info.txt +++ b/src/lib/math/bigint/info.txt @@ -18,7 +18,6 @@ divide.cpp -alloc mp hex rng diff --git a/src/lib/pk_pad/info.txt b/src/lib/pk_pad/info.txt index d77e1defd..cc3a3fb3b 100644 --- a/src/lib/pk_pad/info.txt +++ b/src/lib/pk_pad/info.txt @@ -3,7 +3,6 @@ define PK_PADDING 20131128 load_on auto -alloc rng diff --git a/src/lib/pubkey/ecc_key/info.txt b/src/lib/pubkey/ecc_key/info.txt index 6d6d5f0e9..fc4d4c91c 100644 --- a/src/lib/pubkey/ecc_key/info.txt +++ b/src/lib/pubkey/ecc_key/info.txt @@ -1,7 +1,6 @@ define ECC_PUBLIC_KEY_CRYPTO 20131128 -alloc asn1 bigint ec_gfp diff --git a/src/lib/pubkey/ecdh/info.txt b/src/lib/pubkey/ecdh/info.txt index 32d944728..cfff0b304 100644 --- a/src/lib/pubkey/ecdh/info.txt +++ b/src/lib/pubkey/ecdh/info.txt @@ -1,7 +1,6 @@ define ECDH 20131128 -alloc asn1 ec_group ecc_key diff --git a/src/lib/pubkey/gost_3410/info.txt b/src/lib/pubkey/gost_3410/info.txt index 611449ebc..eb2255ad2 100644 --- a/src/lib/pubkey/gost_3410/info.txt +++ b/src/lib/pubkey/gost_3410/info.txt @@ -3,7 +3,6 @@ define GOST_34_10_2001 20131128 load_on auto -alloc asn1 ec_group ecc_key diff --git a/src/lib/pubkey/info.txt b/src/lib/pubkey/info.txt index ff28f2689..77ae820c7 100644 --- a/src/lib/pubkey/info.txt +++ b/src/lib/pubkey/info.txt @@ -28,7 +28,6 @@ pk_ops_impl.h -alloc asn1 bigint kdf diff --git a/src/lib/utils/datastor/info.txt b/src/lib/utils/datastor/info.txt index b91fe5082..e69de29bb 100644 --- a/src/lib/utils/datastor/info.txt +++ b/src/lib/utils/datastor/info.txt @@ -1,3 +0,0 @@ - -alloc - diff --git a/src/lib/utils/locking_allocator/info.txt b/src/lib/utils/locking_allocator/info.txt new file mode 100644 index 000000000..d3b5e86f8 --- /dev/null +++ b/src/lib/utils/locking_allocator/info.txt @@ -0,0 +1,10 @@ +define LOCKING_ALLOCATOR 20131128 + + +android +linux +freebsd +netbsd +openbsd +solaris + diff --git a/src/lib/utils/locking_allocator/locking_allocator.cpp b/src/lib/utils/locking_allocator/locking_allocator.cpp new file mode 100644 index 000000000..c145cfd7f --- /dev/null +++ b/src/lib/utils/locking_allocator/locking_allocator.cpp @@ -0,0 +1,304 @@ +/* +* Mlock Allocator +* (C) 2012,2014 Jack Lloyd +* +* Botan is released under the Simplified BSD License (see license.txt) +*/ + +#include +#include +#include +#include +#include + +#include +#include + +namespace Botan { + +namespace { + +size_t reset_mlock_limit(size_t max_req) + { +#if defined(RLIMIT_MEMLOCK) + struct rlimit limits; + + ::getrlimit(RLIMIT_MEMLOCK, &limits); + + if(limits.rlim_cur < limits.rlim_max) + { + limits.rlim_cur = limits.rlim_max; + ::setrlimit(RLIMIT_MEMLOCK, &limits); + ::getrlimit(RLIMIT_MEMLOCK, &limits); + } + + return std::min(limits.rlim_cur, max_req); +#endif + + return 0; + } + +size_t mlock_limit() + { + /* + * Linux defaults to only 64 KiB of mlockable memory per process + * (too small) but BSDs offer a small fraction of total RAM (more + * than we need). Bound the total mlock size to 512 KiB which is + * enough to run the entire test suite without spilling to non-mlock + * memory (and thus presumably also enough for many useful + * programs), but small enough that we should not cause problems + * even if many processes are mlocking on the same machine. + */ + size_t mlock_requested = 512; + + /* + * Allow override via env variable + */ + if(const char* env = ::getenv("BOTAN_MLOCK_POOL_SIZE")) + { + try + { + const size_t user_req = std::stoul(env, nullptr); + mlock_requested = std::min(user_req, mlock_requested); + } + catch(std::exception&) { /* ignore it */ } + } + + return reset_mlock_limit(mlock_requested*1024); + } + +bool ptr_in_pool(const void* pool_ptr, size_t poolsize, + const void* buf_ptr, size_t bufsize) + { + const uintptr_t pool = reinterpret_cast(pool_ptr); + const uintptr_t buf = reinterpret_cast(buf_ptr); + + if(buf < pool || buf >= pool + poolsize) + return false; + + BOTAN_ASSERT(buf + bufsize <= pool + poolsize, + "Pointer does not partially overlap pool"); + + return true; + } + +size_t padding_for_alignment(size_t offset, size_t desired_alignment) + { + size_t mod = offset % desired_alignment; + if(mod == 0) + return 0; // already right on + return desired_alignment - mod; + } + +} + +void* mlock_allocator::allocate(size_t num_elems, size_t elem_size) + { + if(!m_pool) + return nullptr; + + const size_t n = num_elems * elem_size; + const size_t alignment = 16; + + if(n / elem_size != num_elems) + return nullptr; // overflow! + + if(n > m_poolsize) + return nullptr; + if(n < BOTAN_MLOCK_ALLOCATOR_MIN_ALLOCATION || n > BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION) + return nullptr; + + std::lock_guard lock(m_mutex); + + auto best_fit = m_freelist.end(); + + for(auto i = m_freelist.begin(); i != m_freelist.end(); ++i) + { + // If we have a perfect fit, use it immediately + if(i->second == n && (i->first % alignment) == 0) + { + const size_t offset = i->first; + m_freelist.erase(i); + clear_mem(m_pool + offset, n); + + BOTAN_ASSERT((reinterpret_cast(m_pool) + offset) % alignment == 0, + "Returning correctly aligned pointer"); + + return m_pool + offset; + } + + if((i->second >= (n + padding_for_alignment(i->first, alignment)) && + ((best_fit == m_freelist.end()) || (best_fit->second > i->second)))) + { + best_fit = i; + } + } + + if(best_fit != m_freelist.end()) + { + const size_t offset = best_fit->first; + + const size_t alignment_padding = padding_for_alignment(offset, alignment); + + best_fit->first += n + alignment_padding; + best_fit->second -= n + alignment_padding; + + // Need to realign, split the block + if(alignment_padding) + { + /* + If we used the entire block except for small piece used for + alignment at the beginning, so just update the entry already + in place (as it is in the correct location), rather than + deleting the empty range and inserting the new one in the + same location. + */ + if(best_fit->second == 0) + { + best_fit->first = offset; + best_fit->second = alignment_padding; + } + else + m_freelist.insert(best_fit, std::make_pair(offset, alignment_padding)); + } + + clear_mem(m_pool + offset + alignment_padding, n); + + BOTAN_ASSERT((reinterpret_cast(m_pool) + offset + alignment_padding) % alignment == 0, + "Returning correctly aligned pointer"); + + return m_pool + offset + alignment_padding; + } + + return nullptr; + } + +bool mlock_allocator::deallocate(void* p, size_t num_elems, size_t elem_size) + { + if(!m_pool) + return false; + + /* + We do not have to zero the memory here, as + secure_allocator::deallocate does that for all arguments before + invoking the deallocator (us or delete[]) + */ + + size_t n = num_elems * elem_size; + + /* + We return nullptr in allocate if there was an overflow, so we + should never ever see an overflow in a deallocation. + */ + BOTAN_ASSERT(n / elem_size == num_elems, + "No overflow in deallocation"); + + if(!ptr_in_pool(m_pool, m_poolsize, p, n)) + return false; + + std::lock_guard lock(m_mutex); + + const size_t start = static_cast(p) - m_pool; + + auto comp = [](std::pair x, std::pair y){ return x.first < y.first; }; + + auto i = std::lower_bound(m_freelist.begin(), m_freelist.end(), + std::make_pair(start, 0), comp); + + // try to merge with later block + if(i != m_freelist.end() && start + n == i->first) + { + i->first = start; + i->second += n; + n = 0; + } + + // try to merge with previous block + if(i != m_freelist.begin()) + { + auto prev = std::prev(i); + + if(prev->first + prev->second == start) + { + if(n) + { + prev->second += n; + n = 0; + } + else + { + // merge adjoining + prev->second += i->second; + m_freelist.erase(i); + } + } + } + + if(n != 0) // no merge possible? + m_freelist.insert(i, std::make_pair(start, n)); + + return true; + } + +mlock_allocator::mlock_allocator() : + m_poolsize(mlock_limit()), + m_pool(nullptr) + { +#if !defined(MAP_NOCORE) + #define MAP_NOCORE 0 +#endif + +#if !defined(MAP_ANONYMOUS) + #define MAP_ANONYMOUS MAP_ANON +#endif + + if(m_poolsize) + { + m_pool = static_cast( + ::mmap( + nullptr, m_poolsize, + PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_SHARED | MAP_NOCORE, + -1, 0)); + + if(m_pool == static_cast(MAP_FAILED)) + { + m_pool = nullptr; + throw std::runtime_error("Failed to mmap locking_allocator pool"); + } + + clear_mem(m_pool, m_poolsize); + + if(::mlock(m_pool, m_poolsize) != 0) + { + ::munmap(m_pool, m_poolsize); + m_pool = nullptr; + throw std::runtime_error("Could not mlock " + std::to_string(m_poolsize) + " bytes"); + } + +#if defined(MADV_DONTDUMP) + ::madvise(m_pool, m_poolsize, MADV_DONTDUMP); +#endif + + m_freelist.push_back(std::make_pair(0, m_poolsize)); + } + } + +mlock_allocator::~mlock_allocator() + { + if(m_pool) + { + clear_mem(m_pool, m_poolsize); + ::munlock(m_pool, m_poolsize); + ::munmap(m_pool, m_poolsize); + m_pool = nullptr; + } + } + +mlock_allocator& mlock_allocator::instance() + { + static mlock_allocator mlock; + return mlock; + } + +} diff --git a/src/lib/utils/locking_allocator/locking_allocator.h b/src/lib/utils/locking_allocator/locking_allocator.h new file mode 100644 index 000000000..2aca2dfa9 --- /dev/null +++ b/src/lib/utils/locking_allocator/locking_allocator.h @@ -0,0 +1,44 @@ +/* +* Mlock Allocator +* (C) 2012 Jack Lloyd +* +* Botan is released under the Simplified BSD License (see license.txt) +*/ + +#ifndef BOTAN_MLOCK_ALLOCATOR_H__ +#define BOTAN_MLOCK_ALLOCATOR_H__ + +#include +#include +#include + +namespace Botan { + +class BOTAN_DLL mlock_allocator + { + public: + static mlock_allocator& instance(); + + void* allocate(size_t num_elems, size_t elem_size); + + bool deallocate(void* p, size_t num_elems, size_t elem_size); + + mlock_allocator(const mlock_allocator&) = delete; + + mlock_allocator& operator=(const mlock_allocator&) = delete; + + private: + mlock_allocator(); + + ~mlock_allocator(); + + const size_t m_poolsize; + + std::mutex m_mutex; + std::vector> m_freelist; + byte* m_pool; + }; + +} + +#endif -- cgit v1.2.3