aboutsummaryrefslogtreecommitdiffstats
path: root/src/lib/utils/locking_allocator
diff options
context:
space:
mode:
authorJack Lloyd <[email protected]>2018-03-09 07:16:52 -0500
committerJack Lloyd <[email protected]>2018-03-09 07:16:52 -0500
commit4b8cd92d78e7c0cdeca111d528db7d19918c34b3 (patch)
tree8aa0668eca591591039f47c9557c3a595bbd2aad /src/lib/utils/locking_allocator
parent869909163d08144e69c7f2b11e8053d68a07d390 (diff)
Split out the memory pool logic
Making a clear seperation between the OS specific code to get the pool, the singleton mlock allocator, and the general allocator logic.
Diffstat (limited to 'src/lib/utils/locking_allocator')
-rw-r--r--src/lib/utils/locking_allocator/info.txt4
-rw-r--r--src/lib/utils/locking_allocator/locking_allocator.cpp171
-rw-r--r--src/lib/utils/locking_allocator/locking_allocator.h11
3 files changed, 25 insertions, 161 deletions
diff --git a/src/lib/utils/locking_allocator/info.txt b/src/lib/utils/locking_allocator/info.txt
index 7dc3c059d..5d7a2ba83 100644
--- a/src/lib/utils/locking_allocator/info.txt
+++ b/src/lib/utils/locking_allocator/info.txt
@@ -6,3 +6,7 @@ LOCKING_ALLOCATOR -> 20131128
posix1
virtual_lock
</os_features>
+
+<requires>
+mem_pool
+</requires>
diff --git a/src/lib/utils/locking_allocator/locking_allocator.cpp b/src/lib/utils/locking_allocator/locking_allocator.cpp
index e457a203c..ba301adfb 100644
--- a/src/lib/utils/locking_allocator/locking_allocator.cpp
+++ b/src/lib/utils/locking_allocator/locking_allocator.cpp
@@ -7,114 +7,20 @@
#include <botan/locking_allocator.h>
#include <botan/internal/os_utils.h>
-#include <botan/mem_ops.h>
-#include <algorithm>
-#include <cstdlib>
-#include <string>
-#include <botan/mutex.h>
+#include <botan/internal/mem_pool.h>
namespace Botan {
-namespace {
-
-inline bool ptr_in_pool(const void* pool_ptr, size_t poolsize,
- const void* buf_ptr, size_t bufsize)
- {
- const uintptr_t pool = reinterpret_cast<uintptr_t>(pool_ptr);
- const uintptr_t buf = reinterpret_cast<uintptr_t>(buf_ptr);
- return (buf >= pool) && (buf + bufsize <= pool + poolsize);
- }
-
-inline size_t padding_for_alignment(size_t offset, size_t desired_alignment)
- {
- size_t mod = offset % desired_alignment;
- if(mod == 0)
- return 0; // already right on
- return desired_alignment - mod;
- }
-
-}
-
void* mlock_allocator::allocate(size_t num_elems, size_t elem_size)
{
if(!m_pool)
return nullptr;
const size_t n = num_elems * elem_size;
- const size_t alignment = 16;
-
if(n / elem_size != num_elems)
return nullptr; // overflow!
- if(n > m_poolsize)
- return nullptr;
- if(n < BOTAN_MLOCK_ALLOCATOR_MIN_ALLOCATION || n > BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION)
- return nullptr;
-
- lock_guard_type<mutex_type> lock(m_mutex);
-
- auto best_fit = m_freelist.end();
-
- for(auto i = m_freelist.begin(); i != m_freelist.end(); ++i)
- {
- // If we have a perfect fit, use it immediately
- if(i->second == n && (i->first % alignment) == 0)
- {
- const size_t offset = i->first;
- m_freelist.erase(i);
- clear_mem(m_pool + offset, n);
-
- BOTAN_ASSERT((reinterpret_cast<uintptr_t>(m_pool) + offset) % alignment == 0,
- "Returning correctly aligned pointer");
-
- return m_pool + offset;
- }
-
-
- if(((best_fit == m_freelist.end()) || (best_fit->second > i->second)) &&
- (i->second >= (n + padding_for_alignment(i->first, alignment))))
- {
- best_fit = i;
- }
- }
-
- if(best_fit != m_freelist.end())
- {
- const size_t offset = best_fit->first;
-
- const size_t alignment_padding = padding_for_alignment(offset, alignment);
-
- best_fit->first += n + alignment_padding;
- best_fit->second -= n + alignment_padding;
-
- // Need to realign, split the block
- if(alignment_padding)
- {
- /*
- If we used the entire block except for small piece used for
- alignment at the beginning, so just update the entry already
- in place (as it is in the correct location), rather than
- deleting the empty range and inserting the new one in the
- same location.
- */
- if(best_fit->second == 0)
- {
- best_fit->first = offset;
- best_fit->second = alignment_padding;
- }
- else
- m_freelist.insert(best_fit, std::make_pair(offset, alignment_padding));
- }
-
- clear_mem(m_pool + offset + alignment_padding, n);
-
- BOTAN_ASSERT((reinterpret_cast<uintptr_t>(m_pool) + offset + alignment_padding) % alignment == 0,
- "Returning correctly aligned pointer");
-
- return m_pool + offset + alignment_padding;
- }
-
- return nullptr;
+ return m_pool->allocate(n);
}
bool mlock_allocator::deallocate(void* p, size_t num_elems, size_t elem_size) BOTAN_NOEXCEPT
@@ -131,73 +37,26 @@ bool mlock_allocator::deallocate(void* p, size_t num_elems, size_t elem_size) BO
if(n / elem_size != num_elems)
return false;
- if(!ptr_in_pool(m_pool, m_poolsize, p, n))
- return false;
-
- std::memset(p, 0, n);
-
- lock_guard_type<mutex_type> lock(m_mutex);
-
- const size_t start = static_cast<uint8_t*>(p) - m_pool;
-
- auto comp = [](std::pair<size_t, size_t> x, std::pair<size_t, size_t> y){ return x.first < y.first; };
-
- auto i = std::lower_bound(m_freelist.begin(), m_freelist.end(),
- std::make_pair(start, 0), comp);
-
- // try to merge with later block
- if(i != m_freelist.end() && start + n == i->first)
- {
- i->first = start;
- i->second += n;
- n = 0;
- }
-
- // try to merge with previous block
- if(i != m_freelist.begin())
- {
- auto prev = std::prev(i);
-
- if(prev->first + prev->second == start)
- {
- if(n)
- {
- prev->second += n;
- n = 0;
- }
- else
- {
- // merge adjoining
- prev->second += i->second;
- m_freelist.erase(i);
- }
- }
- }
-
- if(n != 0) // no merge possible?
- m_freelist.insert(i, std::make_pair(start, n));
-
- return true;
+ return m_pool->deallocate(p, n);
}
mlock_allocator::mlock_allocator()
{
const size_t mem_to_lock = OS::get_memory_locking_limit();
- /*
- TODO: split into multiple single page allocations to
- help ASLR and guard pages to help reduce the damage of
- a wild reads or write by the application.
- */
-
if(mem_to_lock)
{
- m_pool = static_cast<uint8_t*>(OS::allocate_locked_pages(mem_to_lock));
+ m_locked_pages = static_cast<uint8_t*>(OS::allocate_locked_pages(mem_to_lock));
- if(m_pool != nullptr)
+ if(m_locked_pages)
{
- m_poolsize = mem_to_lock;
- m_freelist.push_back(std::make_pair(0, m_poolsize));
+ m_locked_pages_size = mem_to_lock;
+ m_pool.reset(new Memory_Pool(m_locked_pages,
+ m_locked_pages_size,
+ OS::system_page_size(),
+ BOTAN_MLOCK_ALLOCATOR_MIN_ALLOCATION,
+ BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION,
+ 4));
}
}
}
@@ -206,9 +65,9 @@ mlock_allocator::~mlock_allocator()
{
if(m_pool)
{
- secure_scrub_memory(m_pool, m_poolsize);
- OS::free_locked_pages(m_pool, m_poolsize);
- m_pool = nullptr;
+ m_pool.reset();
+ // OS::free_locked_pages scrubs the memory before free
+ OS::free_locked_pages(m_locked_pages, m_locked_pages_size);
}
}
diff --git a/src/lib/utils/locking_allocator/locking_allocator.h b/src/lib/utils/locking_allocator/locking_allocator.h
index e9299c120..1a140f130 100644
--- a/src/lib/utils/locking_allocator/locking_allocator.h
+++ b/src/lib/utils/locking_allocator/locking_allocator.h
@@ -10,10 +10,12 @@
#include <botan/types.h>
#include <vector>
-#include <botan/mutex.h>
+#include <memory>
namespace Botan {
+class Memory_Pool;
+
class BOTAN_PUBLIC_API(2,0) mlock_allocator final
{
public:
@@ -32,10 +34,9 @@ class BOTAN_PUBLIC_API(2,0) mlock_allocator final
~mlock_allocator();
- mutex_type m_mutex;
- std::vector<std::pair<size_t, size_t>> m_freelist;
- uint8_t* m_pool = nullptr;
- size_t m_poolsize = 0;
+ std::unique_ptr<Memory_Pool> m_pool;
+ uint8_t* m_locked_pages = nullptr;
+ size_t m_locked_pages_size = 0;
};
}