aboutsummaryrefslogtreecommitdiffstats
path: root/src/lib/utils
diff options
context:
space:
mode:
authorJack Lloyd <[email protected]>2018-12-31 17:52:04 -0500
committerJack Lloyd <[email protected]>2019-01-05 08:50:40 -0500
commit1d14dff64b4848c8df60c5498ae810d862205f3d (patch)
tree08f143e983214b1fb75eeda1b468580789b44a37 /src/lib/utils
parentb40d4c0e9c134a3daf9d77ce6f8a7b1271feb5ca (diff)
Split up allocations into pages
Diffstat (limited to 'src/lib/utils')
-rw-r--r--src/lib/utils/locking_allocator/locking_allocator.cpp12
-rw-r--r--src/lib/utils/locking_allocator/locking_allocator.h3
-rw-r--r--src/lib/utils/mem_pool/mem_pool.cpp158
-rw-r--r--src/lib/utils/mem_pool/mem_pool.h14
-rw-r--r--src/lib/utils/os_utils.cpp135
-rw-r--r--src/lib/utils/os_utils.h35
6 files changed, 188 insertions, 169 deletions
diff --git a/src/lib/utils/locking_allocator/locking_allocator.cpp b/src/lib/utils/locking_allocator/locking_allocator.cpp
index 401f00848..1c10c362b 100644
--- a/src/lib/utils/locking_allocator/locking_allocator.cpp
+++ b/src/lib/utils/locking_allocator/locking_allocator.cpp
@@ -47,15 +47,11 @@ mlock_allocator::mlock_allocator()
if(mem_to_lock > 0 && mem_to_lock % page_size == 0)
{
- m_locked_pages = static_cast<uint8_t*>(OS::allocate_locked_pages(mem_to_lock));
+ m_locked_pages = OS::allocate_locked_pages(mem_to_lock / page_size);
- if(m_locked_pages)
+ if(m_locked_pages.size() > 0)
{
- m_locked_pages_size = mem_to_lock;
-
- m_pool.reset(new Memory_Pool(m_locked_pages,
- m_locked_pages_size / page_size,
- page_size));
+ m_pool.reset(new Memory_Pool(m_locked_pages, page_size));
}
}
}
@@ -66,7 +62,7 @@ mlock_allocator::~mlock_allocator()
{
m_pool.reset();
// OS::free_locked_pages scrubs the memory before free
- OS::free_locked_pages(m_locked_pages, m_locked_pages_size);
+ OS::free_locked_pages(m_locked_pages);
}
}
diff --git a/src/lib/utils/locking_allocator/locking_allocator.h b/src/lib/utils/locking_allocator/locking_allocator.h
index 7325e79ac..21baa8ec2 100644
--- a/src/lib/utils/locking_allocator/locking_allocator.h
+++ b/src/lib/utils/locking_allocator/locking_allocator.h
@@ -35,8 +35,7 @@ class BOTAN_PUBLIC_API(2,0) mlock_allocator final
~mlock_allocator();
std::unique_ptr<Memory_Pool> m_pool;
- uint8_t* m_locked_pages = nullptr;
- size_t m_locked_pages_size = 0;
+ std::vector<void*> m_locked_pages;
};
}
diff --git a/src/lib/utils/mem_pool/mem_pool.cpp b/src/lib/utils/mem_pool/mem_pool.cpp
index d89ed6631..c9708a7c1 100644
--- a/src/lib/utils/mem_pool/mem_pool.cpp
+++ b/src/lib/utils/mem_pool/mem_pool.cpp
@@ -5,6 +5,7 @@
*/
#include <botan/internal/mem_pool.h>
+#include <botan/internal/os_utils.h>
#include <botan/mem_ops.h>
namespace Botan {
@@ -14,18 +15,16 @@ namespace Botan {
*
* This allocator is not useful for general purpose but works well within the
* context of allocating cryptographic keys. It makes several assumptions which
-* don't work for a malloc but simplify and speed up the implementation:
+* don't work for implementing malloc but simplify and speed up the implementation:
*
-* - There is a single fixed block of memory, which cannot be expanded. This is
-* the block that was allocated, mlocked and passed to the Memory_Pool
-* constructor. It is assumed to be page-aligned.
+* - There is some set of pages, which cannot be expanded later. These are pages
+* which were allocated, mlocked and passed to the Memory_Pool constructor.
*
* - The allocator is allowed to return null anytime it feels like not servicing
* a request, in which case the request will be sent to calloc instead. In
-* particular values which are too small or too large are given to calloc.
+* particular, requests which are too small or too large are rejected.
*
-* - Most allocations are powers of 2, the remainder are usually a multiple of 4
-* or 8.
+* - Most allocations are powers of 2, the remainder are usually a multiple of 8
*
* - Free requests include the size of the allocation, so there is no need to
* track this within the pool.
@@ -33,10 +32,10 @@ namespace Botan {
* - Alignment is important to the caller. For this allocator, any allocation of
* size N is aligned evenly at N bytes.
*
-* The block of memory is split up into pages. Initially each page is in the free
-* page list. Each page is used for just one size of allocation, with requests
-* bucketed into a small number of common sizes. If the allocation would be too
-* big, too small, or with too much slack, it is rejected by the pool.
+* Initially each page is in the free page list. Each page is used for just one
+* size of allocation, with requests bucketed into a small number of common
+* sizes. If the allocation would be too big, too small, or with too much slack,
+* it is rejected by the pool.
*
* The free list is maintained by a bitmap, one per page/Bucket. Since each
* Bucket only maintains objects of a single size, each bit set or clear
@@ -53,9 +52,12 @@ namespace Botan {
* A deallocation also walks the list of buckets for the size and asks each
* Bucket in turn if it recognizes the pointer. When a Bucket becomes empty as a
* result of a deallocation, it is recycled back into the free pool. When this
-* happens, the Buckets pointer goes to the end of the free list. This will delay
-* slightly the reuse of this page, which may offer some slight help wrt use
-* after free issues.
+* happens, the Buckets page goes to the end of the free list. All pages on the
+* free list are marked in the MMU as noaccess, so anything touching them will
+* immediately crash. They are only marked R/W once placed into a new bucket.
+* Making the free list FIFO maximizes the time between the last free of a bucket
+* and that page being writable again, maximizing chances of crashing after a
+* use-after-free.
*
* It may be worthwhile to optimize deallocation by storing the Buckets in order
* (by pointer value) which would allow binary search to find the owning bucket.
@@ -66,7 +68,7 @@ namespace {
size_t choose_bucket(size_t n)
{
const size_t MINIMUM_ALLOCATION = 16;
- const size_t MAXIMUM_ALLOCATION = 512;
+ const size_t MAXIMUM_ALLOCATION = 256;
const size_t MAXIMUM_SLACK = 31;
if(n < MINIMUM_ALLOCATION|| n > MAXIMUM_ALLOCATION)
@@ -74,7 +76,7 @@ size_t choose_bucket(size_t n)
// Need to tune these
const size_t buckets[] = {
- 16, 24, 32, 48, 64, 80, 96, 112, 128, 160, 192, 256, 320, 384, 448, 512, 0
+ 16, 24, 32, 48, 64, 80, 96, 112, 128, 160, 192, 256, 0,
};
for(size_t i = 0; buckets[i]; ++i)
@@ -264,6 +266,12 @@ bool Bucket::free(void* p)
if(!in_this_bucket(p))
return false;
+ /*
+ Zero also any trailing bytes, which should not have been written to,
+ but maybe the user was bad and wrote past the end.
+ */
+ std::memset(p, 0, m_item_size);
+
const size_t offset = (reinterpret_cast<uintptr_t>(p) - reinterpret_cast<uintptr_t>(m_range)) / m_item_size;
m_bitmap.free(offset);
@@ -272,25 +280,23 @@ bool Bucket::free(void* p)
return true;
}
-Memory_Pool::Memory_Pool(uint8_t* pool, size_t num_pages, size_t page_size) :
+Memory_Pool::Memory_Pool(const std::vector<void*>& pages, size_t page_size) :
m_page_size(page_size)
{
- BOTAN_ARG_CHECK(pool != nullptr, "Memory_Pool pool was null");
-
- // This is basically just to verify that the range is valid
- clear_mem(pool, num_pages * page_size);
-
- m_pool = pool;
- m_pool_size = num_pages * page_size;
-
- for(size_t i = 0; i != num_pages; ++i)
+ for(size_t i = 0; i != pages.size(); ++i)
{
- m_free_pages.push_back(pool + page_size*i);
+ clear_bytes(pages[i], m_page_size);
+ OS::page_prohibit_access(pages[i]);
+ m_free_pages.push_back(static_cast<uint8_t*>(pages[i]));
}
}
Memory_Pool::~Memory_Pool()
{
+ for(size_t i = 0; i != m_free_pages.size(); ++i)
+ {
+ OS::page_allow_access(m_free_pages[i]);
+ }
}
void* Memory_Pool::allocate(size_t n)
@@ -300,30 +306,37 @@ void* Memory_Pool::allocate(size_t n)
const size_t n_bucket = choose_bucket(n);
- if(n_bucket == 0)
- return nullptr;
-
- lock_guard_type<mutex_type> lock(m_mutex);
+ if(n_bucket > 0)
+ {
+ lock_guard_type<mutex_type> lock(m_mutex);
- std::deque<Bucket>& buckets = m_buckets_for[n_bucket];
+ std::deque<Bucket>& buckets = m_buckets_for[n_bucket];
- for(auto& bucket : buckets)
- {
- if(uint8_t* p = bucket.alloc())
- return p;
+ /*
+ It would be optimal to pick the bucket with the most usage,
+ since a bucket with say 1 item allocated out of it has a high
+ chance of becoming later freed and then the whole page can be
+ recycled.
+ */
+ for(auto& bucket : buckets)
+ {
+ if(uint8_t* p = bucket.alloc())
+ return p;
- // If the bucket is full, maybe move it to the end of the list?
- // Otoh bucket search should be very fast
- }
+ // If the bucket is full, maybe move it to the end of the list?
+ // Otoh bucket search should be very fast
+ }
- if(m_free_pages.size() > 0)
- {
- uint8_t* ptr = m_free_pages[0];
- m_free_pages.pop_front();
- buckets.push_front(Bucket(ptr, m_page_size, n_bucket));
- void* p = buckets[0].alloc();
- BOTAN_ASSERT_NOMSG(p != nullptr);
- return p;
+ if(m_free_pages.size() > 0)
+ {
+ uint8_t* ptr = m_free_pages[0];
+ m_free_pages.pop_front();
+ OS::page_allow_access(ptr);
+ buckets.push_front(Bucket(ptr, m_page_size, n_bucket));
+ void* p = buckets[0].alloc();
+ BOTAN_ASSERT_NOMSG(p != nullptr);
+ return p;
+ }
}
// out of room
@@ -332,19 +345,10 @@ void* Memory_Pool::allocate(size_t n)
bool Memory_Pool::deallocate(void* p, size_t len) noexcept
{
- if(!ptr_in_pool(m_pool, m_pool_size, p, len))
- return false;
-
const size_t n_bucket = choose_bucket(len);
if(n_bucket != 0)
{
- /*
- Zero also any trailing bytes, which should not have been written to,
- but maybe the user was bad and wrote past the end.
- */
- std::memset(p, 0, n_bucket);
-
lock_guard_type<mutex_type> lock(m_mutex);
std::deque<Bucket>& buckets = m_buckets_for[n_bucket];
@@ -352,43 +356,21 @@ bool Memory_Pool::deallocate(void* p, size_t len) noexcept
for(size_t i = 0; i != buckets.size(); ++i)
{
Bucket& bucket = buckets[i];
- if(bucket.free(p) == false)
- continue;
-
- if(bucket.empty())
+ if(bucket.free(p))
{
- m_free_pages.push_back(bucket.ptr());
-
- if(i != buckets.size() - 1)
- std::swap(buckets.back(), buckets[i]);
- buckets.pop_back();
+ if(bucket.empty())
+ {
+ m_free_pages.push_back(bucket.ptr());
+
+ if(i != buckets.size() - 1)
+ std::swap(buckets.back(), buckets[i]);
+ buckets.pop_back();
+ }
+ return true;
}
-
- return true;
}
}
- /*
- * If we reach this point, something bad has occurred. We know the pointer
- * passed in is inside the range of the pool, but no bucket recognized it,
- * either because n_bucket was zero or no Bucket::free call returned true. Our
- * options (since this function is noexcept) are to either ignore it and
- * return false, ignore it and return true, or to crash.
- *
- * Returning false means the pointer will be considered a standard heap
- * pointer and passed on to free, which will almost certainly cause a heap
- * corruption.
- *
- * There is some robustness argument for just memseting the pointer and
- * returning true. In this case it will be assumed to be freed. But, since
- * this pointer *is* within the range of the pool, but no bucket claimed it,
- * that seems to indicate some existing allocator corruption.
- *
- * Crashing is bad, heap corruption is worse. So we crash, in this case by
- * calling BOTAN_ASSERT and letting the exception handling mechanism
- * terminate the process.
- */
- BOTAN_ASSERT(false, "Pointer from pool, but no bucket recognized it");
return false;
}
diff --git a/src/lib/utils/mem_pool/mem_pool.h b/src/lib/utils/mem_pool/mem_pool.h
index 8f497310c..e10ec3bb8 100644
--- a/src/lib/utils/mem_pool/mem_pool.h
+++ b/src/lib/utils/mem_pool/mem_pool.h
@@ -9,6 +9,7 @@
#include <botan/types.h>
#include <botan/mutex.h>
+#include <vector>
#include <deque>
#include <map>
@@ -22,13 +23,11 @@ class BOTAN_TEST_API Memory_Pool final
/**
* Initialize a memory pool. The memory is not owned by *this,
* it must be freed by the caller.
- * @param pool the pool
- * @param page_count size of pool in page_size chunks
- * @param page_size some nominal page size (does not need to match
- * the system page size)
+ * @param pages a list of pages to allocate from
+ * @param page_size the system page size, each page should
+ * point to exactly this much memory.
*/
- Memory_Pool(uint8_t* pool,
- size_t page_count,
+ Memory_Pool(const std::vector<void*>& pages,
size_t page_size);
~Memory_Pool();
@@ -50,9 +49,6 @@ class BOTAN_TEST_API Memory_Pool final
std::deque<uint8_t*> m_free_pages;
std::map<size_t, std::deque<Bucket>> m_buckets_for;
-
- uint8_t* m_pool = nullptr;
- size_t m_pool_size = 0;
};
}
diff --git a/src/lib/utils/os_utils.cpp b/src/lib/utils/os_utils.cpp
index e5267cad6..172fc27be 100644
--- a/src/lib/utils/os_utils.cpp
+++ b/src/lib/utils/os_utils.cpp
@@ -11,6 +11,7 @@
#include <botan/exceptn.h>
#include <botan/mem_ops.h>
+#include <algorithm>
#include <chrono>
#include <cstdlib>
@@ -306,18 +307,11 @@ size_t OS::get_memory_locking_limit()
// But the information in the book seems to be inaccurate/outdated
// I've tested this on Windows 8.1 x64, Windows 10 x64 and Windows 7 x86
// On all three OS the value is 11 instead of 8
- size_t overhead = OS::system_page_size() * 11ULL;
+ const size_t overhead = OS::system_page_size() * 11;
if(working_min > overhead)
{
- size_t lockable_bytes = working_min - overhead;
- if(lockable_bytes < (BOTAN_MLOCK_ALLOCATOR_MAX_LOCKED_KB * 1024ULL))
- {
- return lockable_bytes;
- }
- else
- {
- return BOTAN_MLOCK_ALLOCATOR_MAX_LOCKED_KB * 1024ULL;
- }
+ const size_t lockable_bytes = working_min - overhead;
+ return std::min<size_t>(lockable_bytes, BOTAN_MLOCK_ALLOCATOR_MAX_LOCKED_KB * 1024);
}
#endif
@@ -333,73 +327,106 @@ const char* OS::read_env_variable(const std::string& name)
return std::getenv(name.c_str());
}
-void* OS::allocate_locked_pages(size_t length)
+std::vector<void*> OS::allocate_locked_pages(size_t count)
{
-#if defined(BOTAN_TARGET_OS_HAS_POSIX1) && defined(BOTAN_TARGET_OS_HAS_POSIX_MLOCK)
+ std::vector<void*> result;
+ result.reserve(count);
const size_t page_size = OS::system_page_size();
- if(length % page_size != 0)
- return nullptr;
+ for(size_t i = 0; i != count; ++i)
+ {
+ void* ptr = nullptr;
- void* ptr = nullptr;
- int rc = ::posix_memalign(&ptr, page_size, length);
+#if defined(BOTAN_TARGET_OS_HAS_POSIX1) && defined(BOTAN_TARGET_OS_HAS_POSIX_MLOCK)
+ int rc = ::posix_memalign(&ptr, page_size, 2*page_size);
- if(rc != 0 || ptr == nullptr)
- return nullptr;
+ if(rc != 0 || ptr == nullptr)
+ continue;
+
+ // failed to lock
+ if(::mlock(ptr, page_size) != 0)
+ {
+ std::free(ptr);
+ continue;
+ }
#if defined(MADV_DONTDUMP)
- ::madvise(ptr, length, MADV_DONTDUMP);
+ // we ignore errors here, as DONTDUMP is just a bonus
+ ::madvise(ptr, page_size, MADV_DONTDUMP);
#endif
- if(::mlock(ptr, length) != 0)
- {
- std::free(ptr);
- return nullptr; // failed to lock
- }
+#elif defined(BOTAN_TARGET_OS_HAS_VIRTUAL_LOCK)
+ ptr = ::VirtualAlloc(nullptr, 2*page_size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
- ::memset(ptr, 0, length);
+ if(ptr == nullptr)
+ continue;
- return ptr;
-#elif defined(BOTAN_TARGET_OS_HAS_VIRTUAL_LOCK)
- LPVOID ptr = ::VirtualAlloc(nullptr, length, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
- if(!ptr)
- {
- return nullptr;
- }
+ if(::VirtualLock(ptr, page_size) == 0)
+ {
+ ::VirtualFree(ptr, 0, MEM_RELEASE);
+ continue;
+ }
+#endif
- if(::VirtualLock(ptr, length) == 0)
- {
- ::VirtualFree(ptr, 0, MEM_RELEASE);
- return nullptr; // failed to lock
+ if(ptr != nullptr)
+ {
+ // Make guard page following the data page
+ page_prohibit_access(static_cast<uint8_t*>(ptr) + page_size);
+
+ std::memset(ptr, 0, page_size);
+ result.push_back(ptr);
+ }
}
- return ptr;
-#else
- BOTAN_UNUSED(length);
- return nullptr; /* not implemented */
+ return result;
+ }
+
+void OS::page_allow_access(void* page)
+ {
+ const size_t page_size = OS::system_page_size();
+#if defined(BOTAN_TARGET_OS_HAS_POSIX1)
+ ::mprotect(page, page_size, PROT_READ | PROT_WRITE);
+#elif defined(BOTAN_TARGET_OS_HAS_VIRTUAL_LOCK)
+ DWORD old_perms = 0;
+ ::VirtualProtect(page, page_size, PAGE_READWRITE, &old_perms);
+ BOTAN_UNUSED(old_perms);
+#endif
+ }
+
+void OS::page_prohibit_access(void* page)
+ {
+ const size_t page_size = OS::system_page_size();
+#if defined(BOTAN_TARGET_OS_HAS_POSIX1)
+ ::mprotect(page, page_size, PROT_NONE);
+#elif defined(BOTAN_TARGET_OS_HAS_VIRTUAL_LOCK)
+ DWORD old_perms = 0;
+ ::VirtualProtect(page, page_size, PAGE_NOACCESS, &old_perms);
+ BOTAN_UNUSED(old_perms);
#endif
}
-void OS::free_locked_pages(void* ptr, size_t length)
+void OS::free_locked_pages(const std::vector<void*>& pages)
{
- if(ptr == nullptr || length == 0)
- return;
+ const size_t page_size = OS::system_page_size();
-#if defined(BOTAN_TARGET_OS_HAS_POSIX1) && defined(BOTAN_TARGET_OS_HAS_POSIX_MLOCK)
- secure_scrub_memory(ptr, length);
- ::munlock(ptr, length);
- std::free(ptr);
+ for(size_t i = 0; i != pages.size(); ++i)
+ {
+ void* ptr = pages[i];
-#elif defined(BOTAN_TARGET_OS_HAS_VIRTUAL_LOCK)
- secure_scrub_memory(ptr, length);
- ::VirtualUnlock(ptr, length);
- ::VirtualFree(ptr, 0, MEM_RELEASE);
+ secure_scrub_memory(ptr, page_size);
-#else
- // Invalid argument because no way this pointer was allocated by us
- throw Invalid_Argument("Invalid ptr to free_locked_pages");
+ // ptr points to the data page, guard page follows
+ page_allow_access(static_cast<uint8_t*>(ptr) + page_size);
+
+#if defined(BOTAN_TARGET_OS_HAS_POSIX1) && defined(BOTAN_TARGET_OS_HAS_POSIX_MLOCK)
+ ::munlock(ptr, page_size);
+ std::free(ptr);
+#elif defined(BOTAN_TARGET_OS_HAS_VIRTUAL_LOCK)
+ ::VirtualUnlock(ptr, page_size);
+ ::VirtualFree(ptr, 0, MEM_RELEASE);
#endif
+ }
}
#if defined(BOTAN_TARGET_OS_HAS_POSIX1) && !defined(BOTAN_TARGET_OS_IS_EMSCRIPTEN)
diff --git a/src/lib/utils/os_utils.h b/src/lib/utils/os_utils.h
index 1ead2f9d1..6ec64b2fd 100644
--- a/src/lib/utils/os_utils.h
+++ b/src/lib/utils/os_utils.h
@@ -11,6 +11,7 @@
#include <botan/types.h>
#include <functional>
#include <string>
+#include <vector>
namespace Botan {
@@ -89,19 +90,37 @@ size_t system_page_size();
const char* read_env_variable(const std::string& var_name);
/**
-* Request so many bytes of page-aligned RAM locked into memory using
-* mlock, VirtualLock, or similar. Returns null on failure. The memory
-* returned is zeroed. Free it with free_locked_pages.
-* @param length requested allocation in bytes
+* Request @count pages of RAM which are locked into memory using mlock,
+* VirtualLock, or some similar OS specific API. Free it with free_locked_pages.
+*
+* Returns an empty list on failure. This function is allowed to return fewer
+* than @count pages.
+*
+* The contents of the allocated pages are undefined.
+*
+* Each page is preceded by and followed by a page which is marked
+* as noaccess, such that accessing it will cause a crash. This turns
+* out of bound reads/writes into crash events.
+*
+* @param count requested number of locked pages
*/
-void* allocate_locked_pages(size_t length);
+std::vector<void*> allocate_locked_pages(size_t count);
/**
* Free memory allocated by allocate_locked_pages
-* @param ptr a pointer returned by allocate_locked_pages
-* @param length length passed to allocate_locked_pages
+* @param pages a list of pages returned by allocate_locked_pages
+*/
+void free_locked_pages(const std::vector<void*>& pages);
+
+/**
+* Set the MMU to prohibit access to this page
+*/
+void page_prohibit_access(void* page);
+
+/**
+* Set the MMU to allow R/W access to this page
*/
-void free_locked_pages(void* ptr, size_t length);
+void page_allow_access(void* page);
/**
* Run a probe instruction to test for support for a CPU instruction.