8 #if defined(HAVE_CONFIG_H)
16 #define _WIN32_WINNT 0x0501
17 #define WIN32_LEAN_AND_MEAN 1
24 #include <sys/resource.h>
42 static inline size_t align_up(
size_t x,
size_t align)
44 return (x + align - 1) & ~(align - 1);
51 base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
81 const size_t size_remaining = size_ptr_it->first - size;
82 auto alloced =
chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
84 if (size_ptr_it->first == size) {
91 chunks_free_end.emplace(size_ptr_it->second + size_remaining, it_remaining);
95 return reinterpret_cast<void*
>(alloced->first);
101 if (ptr ==
nullptr) {
106 auto i =
chunks_used.find(
static_cast<char*
>(ptr));
108 throw std::runtime_error(
"Arena: invalid or double free");
110 std::pair<char*, size_t> freed = *i;
116 freed.first -= prev->second->first;
117 freed.second += prev->second->first;
123 auto next =
chunks_free.find(freed.first + freed.second);
125 freed.second += next->second->first;
140 r.used += chunk.second;
142 r.free += chunk.second->first;
143 r.total = r.used + r.free;
148 static void printchunk(
void* base,
size_t sz,
bool used) {
150 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
151 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
152 " 0x" << used << std::endl;
154 void Arena::walk()
const
157 printchunk(chunk.first, chunk.second,
true);
158 std::cout << std::endl;
160 printchunk(chunk.first, chunk.second->first,
false);
161 std::cout << std::endl;
174 Win32LockedPageAllocator();
182 Win32LockedPageAllocator::Win32LockedPageAllocator()
185 SYSTEM_INFO sSysInfo;
186 GetSystemInfo(&sSysInfo);
187 page_size = sSysInfo.dwPageSize;
189 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
191 len = align_up(len, page_size);
192 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
198 *lockingSuccess = VirtualLock(
const_cast<void*
>(addr), len) != 0;
202 void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
204 len = align_up(len, page_size);
206 VirtualUnlock(
const_cast<void*
>(addr), len);
209 size_t Win32LockedPageAllocator::GetLimit()
212 return std::numeric_limits<size_t>::max();
237 #if defined(PAGESIZE)
246 #ifndef MAP_ANONYMOUS
247 #define MAP_ANONYMOUS MAP_ANON
254 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|
MAP_ANONYMOUS, -1, 0);
255 if (addr == MAP_FAILED) {
259 *lockingSuccess = mlock(addr, len) == 0;
260 #if defined(MADV_DONTDUMP)
261 madvise(addr, len, MADV_DONTDUMP);
262 #elif defined(MADV_NOCORE)
263 madvise(addr, len, MADV_NOCORE);
277 #ifdef RLIMIT_MEMLOCK
279 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
280 if (rlim.rlim_cur != RLIM_INFINITY) {
281 return rlim.rlim_cur;
285 return std::numeric_limits<size_t>::max();
293 allocator(
std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
302 std::lock_guard<std::mutex> lock(
mutex);
309 for (
auto &arena:
arenas) {
310 void *addr = arena.alloc(size);
317 return arenas.back().alloc(size);
324 std::lock_guard<std::mutex> lock(
mutex);
327 for (
auto &arena:
arenas) {
328 if (arena.addressInArena(ptr)) {
333 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
338 std::lock_guard<std::mutex> lock(
mutex);
340 for (
const auto &arena:
arenas) {
361 size = std::min(size, limit);
364 void *addr =
allocator->AllocateLocked(size, &locked);
381 Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
411 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());
char * base
Base address of arena.
size_t alignment
Minimum chunk alignment.
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
void * alloc(size_t size)
Allocate size bytes from this arena.
std::unordered_map< char *, size_t > chunks_used
Map from begin of used chunk to its size.
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
Arena(void *base, size_t size, size_t alignment)
Stats stats() const
Get arena usage statistics.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
void free(void *ptr)
Free a previously allocated chunk of memory.
OS-dependent allocation and deallocation of locked/pinned memory pages.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
Pool for locked memory chunks.
void free(void *ptr)
Free a previously allocated chunk of memory.
Stats stats() const
Get pool usage statistics.
std::unique_ptr< LockedPageAllocator > allocator
void * alloc(size_t size)
Allocate size bytes from this arena.
size_t cumulative_bytes_locked
LockingFailed_Callback lf_cb
std::list< LockedPageArena > arenas
bool new_arena(size_t size, size_t align)
static const size_t ARENA_ALIGN
Chunk alignment.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=0)
Create a new LockedPool.
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
static std::once_flag init_flag
static bool LockingFailed()
Called when locking fails, warn the user here.
static LockedPoolManager * _instance
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
LockedPageAllocator specialized for OSes that don't try to be special snowflakes.
size_t GetLimit()
Get the total limit on the amount of memory that may be locked by this process, in bytes.
void FreeLocked(void *addr, size_t len)
Unlock and free memory pages.
void * AllocateLocked(size_t len, bool *lockingSuccess)
Allocate and lock memory pages.
PosixLockedPageAllocator()
void memory_cleanse(void *ptr, size_t len)