aboutsummaryrefslogtreecommitdiffstats
path: root/src/lib/alloc
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/alloc')
-rw-r--r--src/lib/alloc/info.txt3
-rw-r--r--src/lib/alloc/locking_allocator/info.txt9
-rw-r--r--src/lib/alloc/locking_allocator/locking_allocator.cpp262
-rw-r--r--src/lib/alloc/locking_allocator/locking_allocator.h44
-rw-r--r--src/lib/alloc/secmem.h185
5 files changed, 503 insertions, 0 deletions
diff --git a/src/lib/alloc/info.txt b/src/lib/alloc/info.txt
new file mode 100644
index 000000000..0ab7fa768
--- /dev/null
+++ b/src/lib/alloc/info.txt
@@ -0,0 +1,3 @@
+<header:public>
+secmem.h
+</header:public>
diff --git a/src/lib/alloc/locking_allocator/info.txt b/src/lib/alloc/locking_allocator/info.txt
new file mode 100644
index 000000000..09b59406c
--- /dev/null
+++ b/src/lib/alloc/locking_allocator/info.txt
@@ -0,0 +1,9 @@
+define LOCKING_ALLOCATOR 20131128
+
+<os>
+linux
+freebsd
+netbsd
+openbsd
+solaris
+</os>
diff --git a/src/lib/alloc/locking_allocator/locking_allocator.cpp b/src/lib/alloc/locking_allocator/locking_allocator.cpp
new file mode 100644
index 000000000..13effbb09
--- /dev/null
+++ b/src/lib/alloc/locking_allocator/locking_allocator.cpp
@@ -0,0 +1,262 @@
+/*
+* Mlock Allocator
+* (C) 2012 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#include <botan/locking_allocator.h>
+#include <botan/mem_ops.h>
+#include <algorithm>
+#include <sys/mman.h>
+#include <sys/resource.h>
+
+namespace Botan {
+
+namespace {
+
+size_t mlock_limit()
+ {
+ /*
+ * Linux defaults to only 64 KiB of mlockable memory per process
+ * (too small) but BSDs offer a small fraction of total RAM (more
+ * than we need). Bound the total mlock size to 512 KiB which is
+ * enough to run the entire test suite without spilling to non-mlock
+ * memory (and thus presumably also enough for many useful
+ * programs), but small enough that we should not cause problems
+ * even if many processes are mlocking on the same machine.
+ */
+ const size_t MLOCK_UPPER_BOUND = 512*1024;
+
+ struct rlimit limits;
+ ::getrlimit(RLIMIT_MEMLOCK, &limits);
+
+ if(limits.rlim_cur < limits.rlim_max)
+ {
+ limits.rlim_cur = limits.rlim_max;
+ ::setrlimit(RLIMIT_MEMLOCK, &limits);
+ ::getrlimit(RLIMIT_MEMLOCK, &limits);
+ }
+
+ return std::min<size_t>(limits.rlim_cur, MLOCK_UPPER_BOUND);
+ }
+
+bool ptr_in_pool(const void* pool_ptr, size_t poolsize,
+ const void* buf_ptr, size_t bufsize)
+ {
+ const uintptr_t pool = reinterpret_cast<uintptr_t>(pool_ptr);
+ const uintptr_t buf = reinterpret_cast<uintptr_t>(buf_ptr);
+
+ if(buf < pool || buf >= pool + poolsize)
+ return false;
+
+ BOTAN_ASSERT(buf + bufsize <= pool + poolsize,
+ "Pointer does not partially overlap pool");
+
+ return true;
+ }
+
+size_t padding_for_alignment(size_t offset, size_t desired_alignment)
+ {
+ size_t mod = offset % desired_alignment;
+ if(mod == 0)
+ return 0; // already right on
+ return desired_alignment - mod;
+ }
+
+}
+
+void* mlock_allocator::allocate(size_t num_elems, size_t elem_size)
+ {
+ if(!m_pool)
+ return nullptr;
+
+ const size_t n = num_elems * elem_size;
+ const size_t alignment = elem_size;
+
+ if(n / elem_size != num_elems)
+ return nullptr; // overflow!
+
+ if(n > m_poolsize || n > BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION)
+ return nullptr;
+
+ std::lock_guard<std::mutex> lock(m_mutex);
+
+ auto best_fit = m_freelist.end();
+
+ for(auto i = m_freelist.begin(); i != m_freelist.end(); ++i)
+ {
+ // If we have a perfect fit, use it immediately
+ if(i->second == n && (i->first % alignment) == 0)
+ {
+ const size_t offset = i->first;
+ m_freelist.erase(i);
+ clear_mem(m_pool + offset, n);
+
+ BOTAN_ASSERT((reinterpret_cast<size_t>(m_pool) + offset) % alignment == 0,
+ "Returning correctly aligned pointer");
+
+ return m_pool + offset;
+ }
+
+ if((i->second >= (n + padding_for_alignment(i->first, alignment)) &&
+ ((best_fit == m_freelist.end()) || (best_fit->second > i->second))))
+ {
+ best_fit = i;
+ }
+ }
+
+ if(best_fit != m_freelist.end())
+ {
+ const size_t offset = best_fit->first;
+
+ const size_t alignment_padding = padding_for_alignment(offset, alignment);
+
+ best_fit->first += n + alignment_padding;
+ best_fit->second -= n + alignment_padding;
+
+ // Need to realign, split the block
+ if(alignment_padding)
+ {
+ /*
+ If we used the entire block except for small piece used for
+ alignment at the beginning, so just update the entry already
+ in place (as it is in the correct location), rather than
+ deleting the empty range and inserting the new one in the
+ same location.
+ */
+ if(best_fit->second == 0)
+ {
+ best_fit->first = offset;
+ best_fit->second = alignment_padding;
+ }
+ else
+ m_freelist.insert(best_fit, std::make_pair(offset, alignment_padding));
+ }
+
+ clear_mem(m_pool + offset + alignment_padding, n);
+
+ BOTAN_ASSERT((reinterpret_cast<size_t>(m_pool) + offset + alignment_padding) % alignment == 0,
+ "Returning correctly aligned pointer");
+
+ return m_pool + offset + alignment_padding;
+ }
+
+ return nullptr;
+ }
+
+bool mlock_allocator::deallocate(void* p, size_t num_elems, size_t elem_size)
+ {
+ if(!m_pool)
+ return false;
+
+ size_t n = num_elems * elem_size;
+
+ /*
+ We return nullptr in allocate if there was an overflow, so we
+ should never ever see an overflow in a deallocation.
+ */
+ BOTAN_ASSERT(n / elem_size == num_elems,
+ "No overflow in deallocation");
+
+ if(!ptr_in_pool(m_pool, m_poolsize, p, n))
+ return false;
+
+ std::lock_guard<std::mutex> lock(m_mutex);
+
+ const size_t start = static_cast<byte*>(p) - m_pool;
+
+ auto comp = [](std::pair<size_t, size_t> x, std::pair<size_t, size_t> y){ return x.first < y.first; };
+
+ auto i = std::lower_bound(m_freelist.begin(), m_freelist.end(),
+ std::make_pair(start, 0), comp);
+
+ // try to merge with later block
+ if(i != m_freelist.end() && start + n == i->first)
+ {
+ i->first = start;
+ i->second += n;
+ n = 0;
+ }
+
+ // try to merge with previous block
+ if(i != m_freelist.begin())
+ {
+ auto prev = std::prev(i);
+
+ if(prev->first + prev->second == start)
+ {
+ if(n)
+ {
+ prev->second += n;
+ n = 0;
+ }
+ else
+ {
+ // merge adjoining
+ prev->second += i->second;
+ m_freelist.erase(i);
+ }
+ }
+ }
+
+ if(n != 0) // no merge possible?
+ m_freelist.insert(i, std::make_pair(start, n));
+
+ return true;
+ }
+
+mlock_allocator::mlock_allocator() :
+ m_poolsize(mlock_limit()),
+ m_pool(nullptr)
+ {
+#if !defined(MAP_NOCORE)
+ #define MAP_NOCORE 0
+#endif
+
+ if(m_poolsize)
+ {
+ m_pool = static_cast<byte*>(
+ ::mmap(
+ nullptr, m_poolsize,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_SHARED | MAP_NOCORE,
+ -1, 0));
+
+ if(m_pool == static_cast<byte*>(MAP_FAILED))
+ {
+ m_pool = nullptr;
+ throw std::runtime_error("Failed to mmap locking_allocator pool");
+ }
+
+ clear_mem(m_pool, m_poolsize);
+
+ if(::mlock(m_pool, m_poolsize) != 0)
+ {
+ ::munmap(m_pool, m_poolsize);
+ m_pool = nullptr;
+ throw std::runtime_error("Could not mlock " + std::to_string(m_poolsize) + " bytes");
+ }
+
+ m_freelist.push_back(std::make_pair(0, m_poolsize));
+ }
+ }
+
+mlock_allocator::~mlock_allocator()
+ {
+ if(m_pool)
+ {
+ clear_mem(m_pool, m_poolsize);
+ ::munlock(m_pool, m_poolsize);
+ ::munmap(m_pool, m_poolsize);
+ m_pool = nullptr;
+ }
+ }
+
+mlock_allocator& mlock_allocator::instance()
+ {
+ static mlock_allocator mlock;
+ return mlock;
+ }
+
+}
diff --git a/src/lib/alloc/locking_allocator/locking_allocator.h b/src/lib/alloc/locking_allocator/locking_allocator.h
new file mode 100644
index 000000000..3bebea5f2
--- /dev/null
+++ b/src/lib/alloc/locking_allocator/locking_allocator.h
@@ -0,0 +1,44 @@
+/*
+* Mlock Allocator
+* (C) 2012 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#ifndef BOTAN_MLOCK_ALLOCATOR_H__
+#define BOTAN_MLOCK_ALLOCATOR_H__
+
+#include <botan/types.h>
+#include <vector>
+#include <mutex>
+
+namespace Botan {
+
+class BOTAN_DLL mlock_allocator
+ {
+ public:
+ static mlock_allocator& instance();
+
+ void* allocate(size_t num_elems, size_t elem_size);
+
+ bool deallocate(void* p, size_t num_elems, size_t elem_size);
+
+ mlock_allocator(const mlock_allocator&) = delete;
+
+ mlock_allocator& operator=(const mlock_allocator&) = delete;
+
+ private:
+ mlock_allocator();
+
+ ~mlock_allocator();
+
+ const size_t m_poolsize;
+
+ std::mutex m_mutex;
+ std::vector<std::pair<size_t, size_t>> m_freelist;
+ byte* m_pool;
+ };
+
+}
+
+#endif
diff --git a/src/lib/alloc/secmem.h b/src/lib/alloc/secmem.h
new file mode 100644
index 000000000..2f4d65f33
--- /dev/null
+++ b/src/lib/alloc/secmem.h
@@ -0,0 +1,185 @@
+/*
+* Secure Memory Buffers
+* (C) 1999-2007,2012 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#ifndef BOTAN_SECURE_MEMORY_BUFFERS_H__
+#define BOTAN_SECURE_MEMORY_BUFFERS_H__
+
+#include <botan/mem_ops.h>
+#include <algorithm>
+#include <vector>
+
+#if defined(BOTAN_HAS_LOCKING_ALLOCATOR)
+ #include <botan/locking_allocator.h>
+#endif
+
+namespace Botan {
+
+template<typename T>
+class secure_allocator
+ {
+ public:
+ typedef T value_type;
+
+ typedef T* pointer;
+ typedef const T* const_pointer;
+
+ typedef T& reference;
+ typedef const T& const_reference;
+
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+
+ secure_allocator() noexcept {}
+
+ ~secure_allocator() noexcept {}
+
+ pointer address(reference x) const noexcept
+ { return std::addressof(x); }
+
+ const_pointer address(const_reference x) const noexcept
+ { return std::addressof(x); }
+
+ pointer allocate(size_type n, const void* = 0)
+ {
+#if defined(BOTAN_HAS_LOCKING_ALLOCATOR)
+ if(pointer p = static_cast<pointer>(mlock_allocator::instance().allocate(n, sizeof(T))))
+ return p;
+#endif
+
+ pointer p = new T[n];
+ clear_mem(p, n);
+ return p;
+ }
+
+ void deallocate(pointer p, size_type n)
+ {
+ clear_mem(p, n);
+
+#if defined(BOTAN_HAS_LOCKING_ALLOCATOR)
+ if(mlock_allocator::instance().deallocate(p, n, sizeof(T)))
+ return;
+#endif
+
+ delete [] p;
+ }
+
+ size_type max_size() const noexcept
+ {
+ return static_cast<size_type>(-1) / sizeof(T);
+ }
+
+ template<typename U, typename... Args>
+ void construct(U* p, Args&&... args)
+ {
+ ::new(static_cast<void*>(p)) U(std::forward<Args>(args)...);
+ }
+
+ template<typename U> void destroy(U* p) { p->~U(); }
+ };
+
+template<typename T> inline bool
+operator==(const secure_allocator<T>&, const secure_allocator<T>&)
+ { return true; }
+
+template<typename T> inline bool
+operator!=(const secure_allocator<T>&, const secure_allocator<T>&)
+ { return false; }
+
+template<typename T> using secure_vector = std::vector<T, secure_allocator<T>>;
+
+template<typename T>
+std::vector<T> unlock(const secure_vector<T>& in)
+ {
+ std::vector<T> out(in.size());
+ copy_mem(&out[0], &in[0], in.size());
+ return out;
+ }
+
+template<typename T, typename Alloc>
+size_t buffer_insert(std::vector<T, Alloc>& buf,
+ size_t buf_offset,
+ const T input[],
+ size_t input_length)
+ {
+ const size_t to_copy = std::min(input_length, buf.size() - buf_offset);
+ copy_mem(&buf[buf_offset], input, to_copy);
+ return to_copy;
+ }
+
+template<typename T, typename Alloc, typename Alloc2>
+size_t buffer_insert(std::vector<T, Alloc>& buf,
+ size_t buf_offset,
+ const std::vector<T, Alloc2>& input)
+ {
+ const size_t to_copy = std::min(input.size(), buf.size() - buf_offset);
+ copy_mem(&buf[buf_offset], &input[0], to_copy);
+ return to_copy;
+ }
+
+template<typename T, typename Alloc, typename Alloc2>
+std::vector<T, Alloc>&
+operator+=(std::vector<T, Alloc>& out,
+ const std::vector<T, Alloc2>& in)
+ {
+ const size_t copy_offset = out.size();
+ out.resize(out.size() + in.size());
+ copy_mem(&out[copy_offset], &in[0], in.size());
+ return out;
+ }
+
+template<typename T, typename Alloc>
+std::vector<T, Alloc>& operator+=(std::vector<T, Alloc>& out, T in)
+ {
+ out.push_back(in);
+ return out;
+ }
+
+template<typename T, typename Alloc, typename L>
+std::vector<T, Alloc>& operator+=(std::vector<T, Alloc>& out,
+ const std::pair<const T*, L>& in)
+ {
+ const size_t copy_offset = out.size();
+ out.resize(out.size() + in.second);
+ copy_mem(&out[copy_offset], in.first, in.second);
+ return out;
+ }
+
+template<typename T, typename Alloc, typename L>
+std::vector<T, Alloc>& operator+=(std::vector<T, Alloc>& out,
+ const std::pair<T*, L>& in)
+ {
+ const size_t copy_offset = out.size();
+ out.resize(out.size() + in.second);
+ copy_mem(&out[copy_offset], in.first, in.second);
+ return out;
+ }
+
+/**
+* Zeroise the values; length remains unchanged
+* @param vec the vector to zeroise
+*/
+template<typename T, typename Alloc>
+void zeroise(std::vector<T, Alloc>& vec)
+ {
+ clear_mem(&vec[0], vec.size());
+ }
+
+/**
+* Zeroise the values then free the memory
+* @param vec the vector to zeroise and free
+*/
+template<typename T, typename Alloc>
+void zap(std::vector<T, Alloc>& vec)
+ {
+ zeroise(vec);
+ vec.clear();
+ vec.shrink_to_fit();
+ }
+
+}
+
+#endif