aboutsummaryrefslogtreecommitdiffstats
path: root/src/lib/alloc
diff options
context:
space:
mode:
authorlloyd <[email protected]>2014-04-05 12:05:49 +0000
committerlloyd <[email protected]>2014-04-05 12:05:49 +0000
commit93b196d09ab89fe057ffb7fb32914eb5718514df (patch)
tree3557c4bdb9966720ec0cf352dc1e56bd4468ab09 /src/lib/alloc
parent763dd827af5c1a481963316ab602141723432e49 (diff)
NetBSD portability fix and some performance tweaks in locking allocator
Diffstat (limited to 'src/lib/alloc')
-rw-r--r--src/lib/alloc/locking_allocator/locking_allocator.cpp12
1 files changed, 11 insertions, 1 deletions
diff --git a/src/lib/alloc/locking_allocator/locking_allocator.cpp b/src/lib/alloc/locking_allocator/locking_allocator.cpp
index 84ccc73a8..8e3f7e142 100644
--- a/src/lib/alloc/locking_allocator/locking_allocator.cpp
+++ b/src/lib/alloc/locking_allocator/locking_allocator.cpp
@@ -17,6 +17,12 @@ namespace Botan {
namespace {
+/**
+* Requests for objects of sizeof(T) will be aligned at
+* sizeof(T)*ALIGNMENT_MULTIPLE bytes.
+*/
+const size_t ALIGNMENT_MULTIPLE = 2;
+
size_t mlock_limit()
{
/*
@@ -74,7 +80,7 @@ void* mlock_allocator::allocate(size_t num_elems, size_t elem_size)
return nullptr;
const size_t n = num_elems * elem_size;
- const size_t alignment = elem_size;
+ const size_t alignment = ALIGNMENT_MULTIPLE * elem_size;
if(n / elem_size != num_elems)
return nullptr; // overflow!
@@ -216,6 +222,10 @@ mlock_allocator::mlock_allocator() :
#define MAP_NOCORE 0
#endif
+#if !defined(MAP_ANONYMOUS)
+ #define MAP_ANONYMOUS MAP_ANON
+#endif
+
if(m_poolsize)
{
m_pool = static_cast<byte*>(