diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/lib/utils/mem_ops.cpp | 39 | ||||
-rw-r--r-- | src/lib/utils/os_utils.cpp | 28 |
2 files changed, 27 insertions, 40 deletions
diff --git a/src/lib/utils/mem_ops.cpp b/src/lib/utils/mem_ops.cpp deleted file mode 100644 index c81d4fac2..000000000 --- a/src/lib/utils/mem_ops.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/* -* Memory Scrubbing -* (C) 2012,2015,2016 Jack Lloyd -* -* Botan is released under the Simplified BSD License (see license.txt) -*/ - -#include <botan/mem_ops.h> - -#if defined(BOTAN_TARGET_OS_HAS_RTLSECUREZEROMEMORY) - #define NOMINMAX 1 - #include <windows.h> -#endif - -namespace Botan { - -void secure_scrub_memory(void* ptr, size_t n) - { -#if defined(BOTAN_TARGET_OS_HAS_RTLSECUREZEROMEMORY) - ::RtlSecureZeroMemory(ptr, n); -#elif defined(BOTAN_USE_VOLATILE_MEMSET_FOR_ZERO) && (BOTAN_USE_VOLATILE_MEMSET_FOR_ZERO == 1) - /* - Call memset through a static volatile pointer, which the compiler - should not elide. This construct should be safe in conforming - compilers, but who knows. I did confirm that on x86-64 GCC 6.1 and - Clang 3.8 both create code that saves the memset address in the - data segment and uncondtionally loads and jumps to that address. - */ - static void* (*const volatile memset_ptr)(void*, int, size_t) = std::memset; - (memset_ptr)(ptr, 0, n); -#else - volatile uint8_t* p = reinterpret_cast<volatile uint8_t*>(ptr); - - for(size_t i = 0; i != n; ++i) - p[i] = 0; -#endif - } - -} diff --git a/src/lib/utils/os_utils.cpp b/src/lib/utils/os_utils.cpp index b71568328..8b27fff0e 100644 --- a/src/lib/utils/os_utils.cpp +++ b/src/lib/utils/os_utils.cpp @@ -34,7 +34,7 @@ #include <netinet/in.h> #include <netdb.h> -#elif defined(BOTAN_TARGET_OS_IS_WINDOWS) || defined(BOTAN_TARGET_OS_IS_MINGW) +#elif defined(BOTAN_TARGET_OS_TYPE_IS_WINDOWS) #define NOMINMAX 1 #include <winsock2.h> #include <WS2tcpip.h> @@ -277,6 +277,32 @@ OS::open_socket(const std::string& hostname, #endif } +// Not defined in OS namespace for historical reasons +void secure_scrub_memory(void* ptr, size_t n) + { + // TODO support explicit_bzero + +#if defined(BOTAN_TARGET_OS_HAS_RTLSECUREZEROMEMORY) + ::RtlSecureZeroMemory(ptr, n); + +#elif defined(BOTAN_USE_VOLATILE_MEMSET_FOR_ZERO) && (BOTAN_USE_VOLATILE_MEMSET_FOR_ZERO == 1) + /* + Call memset through a static volatile pointer, which the compiler + should not elide. This construct should be safe in conforming + compilers, but who knows. I did confirm that on x86-64 GCC 6.1 and + Clang 3.8 both create code that saves the memset address in the + data segment and uncondtionally loads and jumps to that address. + */ + static void* (*const volatile memset_ptr)(void*, int, size_t) = std::memset; + (memset_ptr)(ptr, 0, n); +#else + + volatile uint8_t* p = reinterpret_cast<volatile uint8_t*>(ptr); + + for(size_t i = 0; i != n; ++i) + p[i] = 0; +#endif + } uint32_t OS::get_process_id() { |