aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/lib/utils/locking_allocator/info.txt4
-rw-r--r--src/lib/utils/locking_allocator/locking_allocator.cpp171
-rw-r--r--src/lib/utils/locking_allocator/locking_allocator.h11
-rw-r--r--src/lib/utils/mem_pool/info.txt7
-rw-r--r--src/lib/utils/mem_pool/mem_pool.cpp190
-rw-r--r--src/lib/utils/mem_pool/mem_pool.h60
6 files changed, 282 insertions, 161 deletions
diff --git a/src/lib/utils/locking_allocator/info.txt b/src/lib/utils/locking_allocator/info.txt
index 7dc3c059d..5d7a2ba83 100644
--- a/src/lib/utils/locking_allocator/info.txt
+++ b/src/lib/utils/locking_allocator/info.txt
@@ -6,3 +6,7 @@ LOCKING_ALLOCATOR -> 20131128
posix1
virtual_lock
</os_features>
+
+<requires>
+mem_pool
+</requires>
diff --git a/src/lib/utils/locking_allocator/locking_allocator.cpp b/src/lib/utils/locking_allocator/locking_allocator.cpp
index e457a203c..ba301adfb 100644
--- a/src/lib/utils/locking_allocator/locking_allocator.cpp
+++ b/src/lib/utils/locking_allocator/locking_allocator.cpp
@@ -7,114 +7,20 @@
#include <botan/locking_allocator.h>
#include <botan/internal/os_utils.h>
-#include <botan/mem_ops.h>
-#include <algorithm>
-#include <cstdlib>
-#include <string>
-#include <botan/mutex.h>
+#include <botan/internal/mem_pool.h>
namespace Botan {
-namespace {
-
-inline bool ptr_in_pool(const void* pool_ptr, size_t poolsize,
- const void* buf_ptr, size_t bufsize)
- {
- const uintptr_t pool = reinterpret_cast<uintptr_t>(pool_ptr);
- const uintptr_t buf = reinterpret_cast<uintptr_t>(buf_ptr);
- return (buf >= pool) && (buf + bufsize <= pool + poolsize);
- }
-
-inline size_t padding_for_alignment(size_t offset, size_t desired_alignment)
- {
- size_t mod = offset % desired_alignment;
- if(mod == 0)
- return 0; // already right on
- return desired_alignment - mod;
- }
-
-}
-
void* mlock_allocator::allocate(size_t num_elems, size_t elem_size)
{
if(!m_pool)
return nullptr;
const size_t n = num_elems * elem_size;
- const size_t alignment = 16;
-
if(n / elem_size != num_elems)
return nullptr; // overflow!
- if(n > m_poolsize)
- return nullptr;
- if(n < BOTAN_MLOCK_ALLOCATOR_MIN_ALLOCATION || n > BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION)
- return nullptr;
-
- lock_guard_type<mutex_type> lock(m_mutex);
-
- auto best_fit = m_freelist.end();
-
- for(auto i = m_freelist.begin(); i != m_freelist.end(); ++i)
- {
- // If we have a perfect fit, use it immediately
- if(i->second == n && (i->first % alignment) == 0)
- {
- const size_t offset = i->first;
- m_freelist.erase(i);
- clear_mem(m_pool + offset, n);
-
- BOTAN_ASSERT((reinterpret_cast<uintptr_t>(m_pool) + offset) % alignment == 0,
- "Returning correctly aligned pointer");
-
- return m_pool + offset;
- }
-
-
- if(((best_fit == m_freelist.end()) || (best_fit->second > i->second)) &&
- (i->second >= (n + padding_for_alignment(i->first, alignment))))
- {
- best_fit = i;
- }
- }
-
- if(best_fit != m_freelist.end())
- {
- const size_t offset = best_fit->first;
-
- const size_t alignment_padding = padding_for_alignment(offset, alignment);
-
- best_fit->first += n + alignment_padding;
- best_fit->second -= n + alignment_padding;
-
- // Need to realign, split the block
- if(alignment_padding)
- {
- /*
- If we used the entire block except for small piece used for
- alignment at the beginning, so just update the entry already
- in place (as it is in the correct location), rather than
- deleting the empty range and inserting the new one in the
- same location.
- */
- if(best_fit->second == 0)
- {
- best_fit->first = offset;
- best_fit->second = alignment_padding;
- }
- else
- m_freelist.insert(best_fit, std::make_pair(offset, alignment_padding));
- }
-
- clear_mem(m_pool + offset + alignment_padding, n);
-
- BOTAN_ASSERT((reinterpret_cast<uintptr_t>(m_pool) + offset + alignment_padding) % alignment == 0,
- "Returning correctly aligned pointer");
-
- return m_pool + offset + alignment_padding;
- }
-
- return nullptr;
+ return m_pool->allocate(n);
}
bool mlock_allocator::deallocate(void* p, size_t num_elems, size_t elem_size) BOTAN_NOEXCEPT
@@ -131,73 +37,26 @@ bool mlock_allocator::deallocate(void* p, size_t num_elems, size_t elem_size) BO
if(n / elem_size != num_elems)
return false;
- if(!ptr_in_pool(m_pool, m_poolsize, p, n))
- return false;
-
- std::memset(p, 0, n);
-
- lock_guard_type<mutex_type> lock(m_mutex);
-
- const size_t start = static_cast<uint8_t*>(p) - m_pool;
-
- auto comp = [](std::pair<size_t, size_t> x, std::pair<size_t, size_t> y){ return x.first < y.first; };
-
- auto i = std::lower_bound(m_freelist.begin(), m_freelist.end(),
- std::make_pair(start, 0), comp);
-
- // try to merge with later block
- if(i != m_freelist.end() && start + n == i->first)
- {
- i->first = start;
- i->second += n;
- n = 0;
- }
-
- // try to merge with previous block
- if(i != m_freelist.begin())
- {
- auto prev = std::prev(i);
-
- if(prev->first + prev->second == start)
- {
- if(n)
- {
- prev->second += n;
- n = 0;
- }
- else
- {
- // merge adjoining
- prev->second += i->second;
- m_freelist.erase(i);
- }
- }
- }
-
- if(n != 0) // no merge possible?
- m_freelist.insert(i, std::make_pair(start, n));
-
- return true;
+ return m_pool->deallocate(p, n);
}
mlock_allocator::mlock_allocator()
{
const size_t mem_to_lock = OS::get_memory_locking_limit();
- /*
- TODO: split into multiple single page allocations to
- help ASLR and guard pages to help reduce the damage of
- a wild reads or write by the application.
- */
-
if(mem_to_lock)
{
- m_pool = static_cast<uint8_t*>(OS::allocate_locked_pages(mem_to_lock));
+ m_locked_pages = static_cast<uint8_t*>(OS::allocate_locked_pages(mem_to_lock));
- if(m_pool != nullptr)
+ if(m_locked_pages)
{
- m_poolsize = mem_to_lock;
- m_freelist.push_back(std::make_pair(0, m_poolsize));
+ m_locked_pages_size = mem_to_lock;
+ m_pool.reset(new Memory_Pool(m_locked_pages,
+ m_locked_pages_size,
+ OS::system_page_size(),
+ BOTAN_MLOCK_ALLOCATOR_MIN_ALLOCATION,
+ BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION,
+ 4));
}
}
}
@@ -206,9 +65,9 @@ mlock_allocator::~mlock_allocator()
{
if(m_pool)
{
- secure_scrub_memory(m_pool, m_poolsize);
- OS::free_locked_pages(m_pool, m_poolsize);
- m_pool = nullptr;
+ m_pool.reset();
+ // OS::free_locked_pages scrubs the memory before free
+ OS::free_locked_pages(m_locked_pages, m_locked_pages_size);
}
}
diff --git a/src/lib/utils/locking_allocator/locking_allocator.h b/src/lib/utils/locking_allocator/locking_allocator.h
index e9299c120..1a140f130 100644
--- a/src/lib/utils/locking_allocator/locking_allocator.h
+++ b/src/lib/utils/locking_allocator/locking_allocator.h
@@ -10,10 +10,12 @@
#include <botan/types.h>
#include <vector>
-#include <botan/mutex.h>
+#include <memory>
namespace Botan {
+class Memory_Pool;
+
class BOTAN_PUBLIC_API(2,0) mlock_allocator final
{
public:
@@ -32,10 +34,9 @@ class BOTAN_PUBLIC_API(2,0) mlock_allocator final
~mlock_allocator();
- mutex_type m_mutex;
- std::vector<std::pair<size_t, size_t>> m_freelist;
- uint8_t* m_pool = nullptr;
- size_t m_poolsize = 0;
+ std::unique_ptr<Memory_Pool> m_pool;
+ uint8_t* m_locked_pages = nullptr;
+ size_t m_locked_pages_size = 0;
};
}
diff --git a/src/lib/utils/mem_pool/info.txt b/src/lib/utils/mem_pool/info.txt
new file mode 100644
index 000000000..a8a5a53e1
--- /dev/null
+++ b/src/lib/utils/mem_pool/info.txt
@@ -0,0 +1,7 @@
+<defines>
+MEM_POOL -> 20180309
+</defines>
+
+<header:internal>
+mem_pool.h
+</header:internal>
diff --git a/src/lib/utils/mem_pool/mem_pool.cpp b/src/lib/utils/mem_pool/mem_pool.cpp
new file mode 100644
index 000000000..d25343383
--- /dev/null
+++ b/src/lib/utils/mem_pool/mem_pool.cpp
@@ -0,0 +1,190 @@
+/*
+* (C) 2018 Jack Lloyd
+*
+* Botan is released under the Simplified BSD License (see license.txt)
+*/
+
+#include <botan/internal/mem_pool.h>
+#include <botan/mem_ops.h>
+#include <botan/exceptn.h>
+#include <algorithm>
+#include <cstdlib>
+#include <string>
+
+namespace Botan {
+
+namespace {
+
+inline bool ptr_in_pool(const void* pool_ptr, size_t poolsize,
+ const void* buf_ptr, size_t bufsize)
+ {
+ const uintptr_t pool = reinterpret_cast<uintptr_t>(pool_ptr);
+ const uintptr_t buf = reinterpret_cast<uintptr_t>(buf_ptr);
+ return (buf >= pool) && (buf + bufsize <= pool + poolsize);
+ }
+
+inline size_t padding_for_alignment(size_t n, size_t alignment)
+ {
+ const size_t mod = n % alignment;
+ if(mod == 0)
+ return 0;
+ return alignment - mod;
+ }
+
+}
+
+Memory_Pool::Memory_Pool(uint8_t* pool,
+ size_t pool_size,
+ size_t page_size,
+ size_t min_alloc,
+ size_t max_alloc,
+ uint8_t align_bit) :
+ m_page_size(page_size),
+ m_min_alloc(min_alloc),
+ m_max_alloc(max_alloc),
+ m_align_bit(align_bit)
+ {
+ if(pool == nullptr)
+ throw Invalid_Argument("Memory_Pool pool was null");
+
+ if(m_min_alloc > m_max_alloc)
+ throw Invalid_Argument("Memory_Pool min_alloc > max_alloc");
+
+ if(m_align_bit > 6)
+ throw Invalid_Argument("Memory_Pool invalid align_bit");
+
+ // This is basically just to verify that the range is valid
+ clear_mem(pool, pool_size);
+
+ m_pool = pool;
+ m_pool_size = pool_size;
+ m_freelist.push_back(std::make_pair(0, m_pool_size));
+ }
+
+void* Memory_Pool::allocate(size_t req)
+ {
+ const size_t alignment = (1 << m_align_bit);
+
+ if(req > m_pool_size)
+ return nullptr;
+ if(req < m_min_alloc || req > m_max_alloc)
+ return nullptr;
+
+ lock_guard_type<mutex_type> lock(m_mutex);
+
+ auto best_fit = m_freelist.end();
+
+ for(auto i = m_freelist.begin(); i != m_freelist.end(); ++i)
+ {
+ // If we have a perfect fit, use it immediately
+ if(i->second == req && (i->first % alignment) == 0)
+ {
+ const size_t offset = i->first;
+ m_freelist.erase(i);
+ clear_mem(m_pool + offset, req);
+
+ BOTAN_ASSERT((reinterpret_cast<uintptr_t>(m_pool) + offset) % alignment == 0,
+ "Returning correctly aligned pointer");
+
+ return m_pool + offset;
+ }
+
+
+ if(((best_fit == m_freelist.end()) || (best_fit->second > i->second)) &&
+ (i->second >= (req + padding_for_alignment(i->first, alignment))))
+ {
+ best_fit = i;
+ }
+ }
+
+ if(best_fit != m_freelist.end())
+ {
+ const size_t offset = best_fit->first;
+
+ const size_t alignment_padding = padding_for_alignment(offset, alignment);
+
+ best_fit->first += req + alignment_padding;
+ best_fit->second -= req + alignment_padding;
+
+ // Need to realign, split the block
+ if(alignment_padding)
+ {
+ /*
+ If we used the entire block except for small piece used for
+ alignment at the beginning, so just update the entry already
+ in place (as it is in the correct location), rather than
+ deleting the empty range and inserting the new one in the
+ same location.
+ */
+ if(best_fit->second == 0)
+ {
+ best_fit->first = offset;
+ best_fit->second = alignment_padding;
+ }
+ else
+ m_freelist.insert(best_fit, std::make_pair(offset, alignment_padding));
+ }
+
+ clear_mem(m_pool + offset + alignment_padding, req);
+
+ BOTAN_ASSERT((reinterpret_cast<uintptr_t>(m_pool) + offset + alignment_padding) % alignment == 0,
+ "Returning correctly aligned pointer");
+
+ return m_pool + offset + alignment_padding;
+ }
+
+ return nullptr;
+ }
+
+bool Memory_Pool::deallocate(void* p, size_t n) BOTAN_NOEXCEPT
+ {
+ if(!ptr_in_pool(m_pool, m_pool_size, p, n))
+ return false;
+
+ std::memset(p, 0, n);
+
+ lock_guard_type<mutex_type> lock(m_mutex);
+
+ const size_t start = static_cast<uint8_t*>(p) - m_pool;
+
+ auto comp = [](std::pair<size_t, size_t> x, std::pair<size_t, size_t> y){ return x.first < y.first; };
+
+ auto i = std::lower_bound(m_freelist.begin(), m_freelist.end(),
+ std::make_pair(start, 0), comp);
+
+ // try to merge with later block
+ if(i != m_freelist.end() && start + n == i->first)
+ {
+ i->first = start;
+ i->second += n;
+ n = 0;
+ }
+
+ // try to merge with previous block
+ if(i != m_freelist.begin())
+ {
+ auto prev = std::prev(i);
+
+ if(prev->first + prev->second == start)
+ {
+ if(n)
+ {
+ prev->second += n;
+ n = 0;
+ }
+ else
+ {
+ // merge adjoining
+ prev->second += i->second;
+ m_freelist.erase(i);
+ }
+ }
+ }
+
+ if(n != 0) // no merge possible?
+ m_freelist.insert(i, std::make_pair(start, n));
+
+ return true;
+ }
+
+}
diff --git a/src/lib/utils/mem_pool/mem_pool.h b/src/lib/utils/mem_pool/mem_pool.h
new file mode 100644
index 000000000..418963864
--- /dev/null
+++ b/src/lib/utils/mem_pool/mem_pool.h
@@ -0,0 +1,60 @@
+/*
+* (C) 2018 Jack Lloyd
+*
+* Botan is released under the Simplified BSD License (see license.txt)
+*/
+
+#ifndef BOTAN_MEM_POOL_H_
+#define BOTAN_MEM_POOL_H_
+
+#include <botan/types.h>
+#include <botan/mutex.h>
+#include <vector>
+
+namespace Botan {
+
+class Memory_Pool final
+ {
+ public:
+ /**
+ * Initialize a memory pool. The memory is not owned by *this,
+ * it must be freed by the caller.
+ * @param pool the pool
+ * @param pool_size size of pool
+ * @param page_size some nominal page size (does not need to match
+ * the system page size)
+ * @param min_allocation return null for allocs for smaller amounts
+ * @param max_allocation return null for allocs of larger amounts
+ * @param align_bit align all returned memory to (1<<align_bit) bytes
+ */
+ Memory_Pool(uint8_t* pool,
+ size_t pool_size,
+ size_t page_size,
+ size_t min_allocation,
+ size_t max_allocation,
+ uint8_t align_bit);
+
+ void* allocate(size_t size);
+
+ bool deallocate(void* p, size_t size) BOTAN_NOEXCEPT;
+
+ Memory_Pool(const Memory_Pool&) = delete;
+
+ Memory_Pool& operator=(const Memory_Pool&) = delete;
+
+ private:
+ const size_t m_page_size = 0;
+ const size_t m_min_alloc = 0;
+ const size_t m_max_alloc = 0;
+ const uint8_t m_align_bit = 0;
+
+ mutex_type m_mutex;
+
+ std::vector<std::pair<size_t, size_t>> m_freelist;
+ uint8_t* m_pool = nullptr;
+ size_t m_pool_size = 0;
+ };
+
+}
+
+#endif