mutable
A Database System for Research and Fast Prototyping
Loading...
Searching...
No Matches
list_allocator.hpp
Go to the documentation of this file.
1#pragma once
2
3#include <algorithm>
4#include <cerrno>
5#include <climits>
6#include <cstdint>
7#include <cstdlib>
8#include <cstring>
9#include <iostream>
10#include <mutable/util/ADT.hpp>
12#include <stdexcept>
13
14#if __linux
15#include <sys/mman.h>
16#include <sys/stat.h>
17#include <sys/types.h>
18#include <unistd.h>
19#elif __APPLE__
20#include <fcntl.h>
21#include <sys/mman.h>
22#include <sys/stat.h>
23#include <unistd.h>
24#endif
25
26
27namespace m {
28
30{
31 Linear,
33};
34
35// forward declarations
36struct list_allocator;
37
38namespace {
39
41struct list_allocator_proxy : allocator<list_allocator_proxy>
42{
43 friend struct ::m::list_allocator;
44
45 using base_type = allocator<list_allocator_proxy>;
48
49 private:
50 list_allocator *alloc_;
51
52 public:
53 list_allocator_proxy(list_allocator *alloc) : alloc_(M_notnull(alloc)) { }
54 ~list_allocator_proxy() { }
58
59 list_allocator * get_allocator() const { return alloc_; }
60
61 void * allocate(size_type size, size_type alignment = 0);
62 void deallocate(void *ptr, size_type size);
63};
64
65}
66
93struct list_allocator : allocator<list_allocator>
94{
95 friend struct list_allocator_proxy;
96
100
101 private:
103 static constexpr size_type CHUNK_SPLIT_FACTOR = 2;
105 static constexpr size_type DEALLOCATION_BIT_MASK = size_type(1U) << (sizeof(size_type) * CHAR_BIT - 1U);
106
113
119 using header_type = chunk_list_type::node_type;
121 void *override_addr_ = nullptr;
122
123 public:
124 friend void swap(list_allocator &first, list_allocator &second) {
125 M_insist(first .override_addr_ == nullptr);
126 M_insist(second.override_addr_ == nullptr);
127 M_insist(first .chunks_.get_allocator().get_allocator() == &first);
128 M_insist(second.chunks_.get_allocator().get_allocator() == &second);
129
130 using std::swap;
131 swap(first.min_allocation_size_, second.min_allocation_size_);
132 swap(first.min_allocation_alignment_, second.min_allocation_alignment_);
133 swap(first.strategy_, second.strategy_);
134 swap(first.chunks_, second.chunks_);
135
136 /* Update allocator proxies. */
137 first .chunks_.get_allocator() = list_allocator_proxy(&first);
138 second.chunks_.get_allocator() = list_allocator_proxy(&second);
139
140 M_insist(first .override_addr_ == nullptr);
141 M_insist(second.override_addr_ == nullptr);
142 }
143
145 : strategy_(strategy)
147 {
148 min_allocation_size_ = ceil_to_multiple_of_pow_2(
149 initial_size ? initial_size : sizeof(header_type),
151 );
152 min_allocation_alignment_ = std::max(alignof(header_type), get_pagesize());
153
154 if (initial_size) {
155 add_fresh_chunk(1, 0);
156 M_insist(chunks_.size() == 1);
157 } else {
159 }
160 }
161
163 while (not chunks_.empty()) {
164 auto it = chunks_.begin();
165 header_type *header = header_of_unused_chunk(&*it);
166
167 const bool marked_for_deallocation = is_marked_for_deallocation(header);
168 override_addr_ = header; // don't deallocate when unlinking node
169 chunks_.erase(it); // unlink node
170 if (marked_for_deallocation)
171 munmap(header, extract_size(header));
172 }
173 }
174
177 explicit list_allocator(const list_allocator &other)
179 { }
180
181 list_allocator(list_allocator &&other) : list_allocator() { swap(*this, other); }
182
183 list_allocator & operator=(list_allocator other) { swap(*this, other); return *this; }
184
185 void * allocate(const size_type size, const size_type alignment = 0) {
186 if (size == 0) return nullptr;
187
188 M_insist(alignment == 0 or is_pow_2(alignment), "alignment must be 0 or a power of 2");
189
190 /* Search for a suitable chunk. */
192 for (; it != chunks_.end(); ++it) {
193 /* Get the size of the chunk. */
194 M_insist(*it > sizeof(size_type), "allocation of invalid size (too small)");
195 header_type *header = header_of_unused_chunk(&*it);
196 size_type usable_size = extract_size(header) - sizeof(size_type);
197
198 /* Check chunk size and alignment. */
199 if (usable_size < size or is_unaligned(header, alignment))
200 continue;
201
202 /* Found chunk. */
203 goto found_chunk;
204 }
205 /* No chunk found, so add a fresh chunk. */
206 M_insist(it == chunks_.end());
207 it = add_fresh_chunk(size, alignment);
208
209found_chunk:
210 header_type *header = header_of_unused_chunk(&*it);
211 size_type effective_size = extract_size(header);
212 M_insist(effective_size >= size + sizeof(size_type));
213
214#if 1
215 /* See whether we can split the chunk. */
216 if (effective_size >= sizeof(header_type) + size + sizeof(size_type) and
217 size * CHUNK_SPLIT_FACTOR <= effective_size - sizeof(size_type))
218 {
219 it = split_unused_chunk(it, size);
220 effective_size = extract_size(header);
221 M_insist(effective_size >= size + sizeof(size_type));
222 }
223#endif
224
225 unlink_chunk(it);
226 size_of_used_chunk(header, size) = header->value_;
227 return header;
228 }
229
230 void deallocate(void *ptr, const size_type size) {
231 if (ptr == nullptr) return;
232 const size_type masked_size = size_of_used_chunk(ptr, size);
233 reclaim_chunk(ptr, masked_size);
234 }
235
237
239 friend std::ostream & operator<<(std::ostream &out, const list_allocator &A) {
240 out << "allocation size = " << A.min_allocation_size_
241 << ", alignment = " << A.min_allocation_alignment_
242 << ", available chunks: ";
243 for (auto it = A.chunks_.begin(); it != A.chunks_.end(); ++it) {
244 if (it != A.chunks_.begin())
245 out << " -> ";
246 const header_type *header = header_of_unused_chunk(&*it);
247 out << '[';
248 if (is_marked_for_deallocation(header))
249 out << '*';
250 out << ((void*) header) << ", " << extract_size(header) << "B]";
251 }
252 return out;
253 }
254
255 void dump(std::ostream &out) const { out << *this << std::endl; }
256 void dump() const { dump(std::cerr); }
258
259 private:
260 /*==================================================================================================================
261 * Helper functions
262 *================================================================================================================*/
263
264 static bool is_unaligned(void *ptr, size_type alignment) {
265 M_insist(alignment == 0 or is_pow_2(alignment), "alignment must be a power of 2");
266 return alignment == 0 ? false : reinterpret_cast<std::uintptr_t>(ptr) & (alignment - size_type(1));
267 }
268
269 static bool is_aligned(void *ptr, size_type alignment) { return not is_unaligned(ptr, alignment); }
270
271
272 /*==================================================================================================================
273 * Chunk operations
274 *================================================================================================================*/
275
277 static size_type & size_of_used_chunk(void *chunk, const size_type size) {
278 return *reinterpret_cast<size_type*>(
279 ceil_to_multiple_of_pow_2(reinterpret_cast<std::uintptr_t>(chunk) + size, alignof(size_type))
280 );
281 }
282 static size_type size_of_used_chunk(const void *chunk, const size_type size) {
283 return *reinterpret_cast<const size_type*>(
284 ceil_to_multiple_of_pow_2(reinterpret_cast<std::uintptr_t>(chunk) + size, alignof(size_type))
285 );
286 }
287
290 return reinterpret_cast<header_type*>(
291 reinterpret_cast<uint8_t*>(value_addr) + sizeof(*value_addr) - sizeof(header_type)
292 );
293 }
294 static const header_type * header_of_unused_chunk(const size_type *value_addr) {
295 return reinterpret_cast<const header_type*>(
296 reinterpret_cast<const uint8_t*>(value_addr) + sizeof(*value_addr) - sizeof(header_type)
297 );
298 }
299
300 static bool is_marked_for_deallocation(const header_type *header) { return header->value_ & DEALLOCATION_BIT_MASK; }
301 static size_type extract_size(const header_type *header) { return header->value_ & ~DEALLOCATION_BIT_MASK; }
304 return n | (header->value_ & DEALLOCATION_BIT_MASK);
305 }
306
307
308 /*==================================================================================================================
309 * Allocation helpers
310 *================================================================================================================*/
311
314 header_type *header = header_of_unused_chunk(&*it);
315 override_addr_ = header; // unlink *without* deallocate
316 return chunks_.erase(it);
317 }
318
322 override_addr_ = chunk; // emplace in chunk
323 pos = chunks_.insert(pos, masked_size);
324 M_insist(pos != chunks_.end());
325 M_insist(*pos == masked_size);
326 return pos;
327 }
328
330 auto it = chunks_.begin();
331 for (; it != chunks_.end() and &*it < chunk; ++it);
332 return link_chunk(it, chunk, masked_size);
333 }
334
335
336 /* Reclaims a chunk by adding it to the list of unused chunks and attempting to coalesce it with its immediate
337 * neighbors. The chunk may have been used or be an entirely new allocation. */
338 void reclaim_chunk(void *chunk, size_type masked_size) {
339 auto pos = insert_chunk_sorted(chunk, masked_size);
340 M_insist(pos != chunks_.end());
341 M_insist(header_of_unused_chunk(&*pos)->value_ == masked_size);
342 pos = coalesce(pos);
343 M_insist(pos != chunks_.end());
344 if (pos != chunks_.begin())
345 coalesce(std::prev(pos));
346 }
347
352 M_insist(alignment == 0 or is_pow_2(alignment), "alignment must be a power of 2");
353
354 /* Ensure the alignment satisfies the requirements of `header_type`. */
355 const size_type effective_alignment = std::max(alignment, min_allocation_alignment_);
356 M_insist(effective_alignment % min_allocation_alignment_ == 0, "size does not guarantee alignment");
357
358 /* Ceil the size to a multiple of `header_type`'s alignment. This avoids wasting space. */
359 const size_type effective_size = ceil_to_multiple_of_pow_2(
360 std::max(size + sizeof(size_type), min_allocation_size_),
362 );
363
364 /* Allocate the chunk with space for the header. */
365 M_insist(override_addr_ == nullptr);
366 void *chunk = aligned_mmap(effective_size, effective_alignment);
367 M_insist(is_aligned(chunk, effective_alignment), "allocated a misaligned chunk");
368 M_insist(is_aligned(chunk, alignment), "allocated a misaligned chunk");
369
370 /* Insert chunk sorted. */
371 auto pos = insert_chunk_sorted(chunk, mark_for_deallocation(effective_size));
372
373 /* Update `min_allocation_size_` according to `strategy_`. */
374 switch (strategy_) {
377 break;
378
379 case AllocationStrategy::Linear: /* nothing to be done */;
380 }
381
382 /* Return chunk. */
383 return pos;
384 }
385
392 header_type *header_first_chunk = header_of_unused_chunk(&*it);
393 const size_type effective_size_original_chunk = extract_size(header_first_chunk);
394 M_insist(effective_size_original_chunk >= 2 * sizeof(header_type),
395 "cannot split chunk that does not fit two headers");
396
397 /* Calculate the effective size of the first chunk. */
398 const size_type effective_size_first_chunk = ceil_to_multiple_of_pow_2(
399 std::max(required_size + sizeof(size_type), sizeof(header_type)),
400 alignof(header_type)
401 );
402
403 /* Update the effective size of the first chunk. */
404#ifndef NDEBUG
405 const bool was_marked_for_deallocation = is_marked_for_deallocation(header_first_chunk);
406#endif
407 header_first_chunk->value_ = preserve_deallocation(effective_size_first_chunk, header_first_chunk);
408#ifndef NDEBUG
409 M_insist(was_marked_for_deallocation == is_marked_for_deallocation(header_first_chunk));
410#endif
411
412 /* Add the second chunk to the list of unused chunks. */
413 header_type *second_chunk = reinterpret_cast<header_type*>(
414 reinterpret_cast<std::uintptr_t>(header_first_chunk) + effective_size_first_chunk
415 );
416 const size_type effective_size_second_chunk = effective_size_original_chunk - effective_size_first_chunk;
417 M_insist(effective_size_second_chunk % alignof(header_type) == 0, "effective size violates alignment");
418 M_insist(effective_size_second_chunk >= sizeof(header_type), "effective size is too small");
419 link_chunk(std::next(it), second_chunk, effective_size_second_chunk);
420 M_insist(not is_marked_for_deallocation(second_chunk),
421 "the second split chunk must never be marked for deallocation");
422
423 return it;
424 }
425
429 header_type *header_first_chunk = header_of_unused_chunk(&*it);
430 auto successor_it = std::next(it);
431
432 /* No next chunk to coalesce with. */
433 if (successor_it == chunks_.end())
434 return it;
435
436#ifndef NDEBUG
437 const bool is_first_chunk_marked_for_deallocation = is_marked_for_deallocation(header_first_chunk);
438#endif
439
440 header_type *header_second_chunk = header_of_unused_chunk(&*successor_it);
441
442 /* If the second chunk is marked for deallocation, it cannot be coalesced with. */
443 if (is_marked_for_deallocation(header_second_chunk))
444 return it;
445
446 const size_type size_first_chunk = extract_size(header_first_chunk);
447 void * const addr_end_first_chunk = reinterpret_cast<uint8_t*>(header_first_chunk) + size_first_chunk;
448
449 if (addr_end_first_chunk != header_second_chunk)
450 return it;
451
452 /* Coalesce the second chunk into the first chunk. */
453 M_insist(not is_marked_for_deallocation(header_second_chunk));
454 const size_type size_second_chunk = extract_size(header_second_chunk);
455 auto next = unlink_chunk(successor_it);
456 header_first_chunk->value_ += size_second_chunk;
457
458#ifndef NDEBUG
459 M_insist(is_marked_for_deallocation(header_first_chunk) == is_first_chunk_marked_for_deallocation,
460 "coalescing must not alter the deallocation mark");
461#endif
462
463 return std::prev(next);
464 }
465
467 M_insist(override_addr_, "override address must be set before calling this function");
468 void *ptr = override_addr_;
469 override_addr_ = nullptr;
470 return ptr;
471 }
472
473 void proxy_deallocate(void *ptr, size_type) {
474 M_insist(ptr == override_addr_, "override address must be set before calling this function");
475 override_addr_ = nullptr;
476 /* nothing to be done */
477 }
478
479 void * aligned_mmap(size_type size, size_type alignment) {
480 M_insist(alignment % get_pagesize() == 0, "alignment must be page aligned");
481 M_insist(size % get_pagesize() == 0, "size must be a whole multiple of a page");
482
483 if (alignment != get_pagesize()) { // over-aligned
484 errno = 0;
485 void *ptr = mmap(nullptr, size + alignment, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, /* fd= */ -1, /* offset= */ 0);
486 if (ptr == MAP_FAILED) {
487 const auto errsv = errno;
488 throw std::runtime_error(strerror(errsv));
489 }
490
491 const std::uintptr_t unaligned_ptr = reinterpret_cast<std::uintptr_t>(ptr);
492 const std::uintptr_t aligned_ptr = ceil_to_multiple_of_pow_2(unaligned_ptr, alignment);
493 M_insist(aligned_ptr >= unaligned_ptr);
494 M_insist(aligned_ptr < unaligned_ptr + alignment);
495 void * const end_of_aligned_allocation = reinterpret_cast<void*>(aligned_ptr + size);
496
497 munmap(ptr, aligned_ptr - unaligned_ptr); // unmap preceding memory
498 munmap(end_of_aligned_allocation, alignment - (aligned_ptr - unaligned_ptr)); // unmap succeeding memory
499
500 return reinterpret_cast<void*>(aligned_ptr);
501 } else {
502 errno = 0;
503 void *ptr = mmap(nullptr, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, /* fd= */ -1, /* offset= */ 0);
504 if (ptr == MAP_FAILED) {
505 const auto errsv = errno;
506 throw std::runtime_error(strerror(errsv));
507 }
508 M_insist(is_aligned(ptr, alignment), "misaligned allocation");
509 return ptr;
510 }
511
512 }
513};
514
515inline void * list_allocator_proxy::allocate(size_type size, size_type alignment)
516{
517 return alloc_->proxy_allocate(size, alignment);
518}
519
520inline void list_allocator_proxy::deallocate(void *ptr, size_type size)
521{
522 return alloc_->proxy_deallocate(ptr, size);
523}
524
525}
#define M_notnull(ARG)
Definition: macro.hpp:182
#define M_insist(...)
Definition: macro.hpp:129
‍mutable namespace
Definition: Backend.hpp:10
M_EXPORT constexpr bool is_pow_2(T n)
Definition: fn.hpp:129
AllocationStrategy
and
Definition: enum_ops.hpp:12
std::size_t M_EXPORT get_pagesize()
Returns the page size of the system.
Definition: fn.cpp:142
std::enable_if_t< not std::is_void_v< T >, T * > allocate()
Allocate space for a single entity of type T that is aligned according to Ts alignment requirement.
void deallocate(void *ptr, size_type size)
Deallocate the allocation at ptr of size size.
allocator_type & get_allocator() const noexcept
Definition: ADT.hpp:499
iterator erase(iterator pos)
Definition: ADT.hpp:591
size_type size() const
Definition: ADT.hpp:524
iterator begin()
Definition: ADT.hpp:508
iterator insert(const_iterator pos, const value_type &value)
Definition: ADT.hpp:565
iterator end()
Definition: ADT.hpp:509
bool empty() const
Definition: ADT.hpp:523
Implements a list allocator.
chunk_list_type::iterator insert_chunk_sorted(void *chunk, size_type masked_size)
void * override_addr_
‍used to override the behavior of allocation requests by chunks_ by routing through a proxy allocator
list_allocator(list_allocator &&other)
static constexpr size_type DEALLOCATION_BIT_MASK
‍mask of the high bit of a size_type (the type of the header of a used chunk)
void * allocate(const size_type size, const size_type alignment=0)
static size_type preserve_deallocation(size_type n, const header_type *header)
static bool is_aligned(void *ptr, size_type alignment)
friend struct list_allocator_proxy
void dump(std::ostream &out) const
void deallocate(void *ptr, const size_type size)
void proxy_deallocate(void *ptr, size_type)
static size_type & size_of_used_chunk(void *chunk, const size_type size)
Returns a reference to the effective size field of a chunk in use.
M_LCOV_EXCL_START friend std::ostream & operator<<(std::ostream &out, const list_allocator &A)
void * aligned_mmap(size_type size, size_type alignment)
size_type num_chunks_available() const
static header_type * header_of_unused_chunk(size_type *value_addr)
Returns a pointer to the header of an unused chunk.
static size_type size_of_used_chunk(const void *chunk, const size_type size)
static constexpr size_type CHUNK_SPLIT_FACTOR
‍the factor by which a chunk must be larger than the requested allocation to be considered for splitt...
static bool is_unaligned(void *ptr, size_type alignment)
allocator< list_allocator > base_type
chunk_list_type::node_type header_type
‍the type of a list node, which is the "header" of an unused chunk
static bool is_marked_for_deallocation(const header_type *header)
void * proxy_allocate(size_type, size_type)
chunk_list_type::iterator coalesce(chunk_list_type::iterator it)
Attempt to coalesce the chunk pointed to by it and its successor.
void reclaim_chunk(void *chunk, size_type masked_size)
chunk_list_type::iterator split_unused_chunk(chunk_list_type::iterator it, size_type required_size)
Splits the given chunk into two chunks.
chunk_list_type::iterator add_fresh_chunk(const size_type size, const size_type alignment)
Allocates fresh memory that contains enough space to fit a chunk of size size, that is aligned accord...
size_type min_allocation_size_
‍the size of the next actual memory allocation
list_allocator & operator=(list_allocator other)
static size_type extract_size(const header_type *header)
size_type min_allocation_alignment_
‍the minimum alignment of an actual memory allocation
chunk_list_type::iterator link_chunk(chunk_list_type::iterator pos, void *chunk, size_type masked_size)
Adds the given chunk to the list of unused chunks by emplacing the list node within the chunk's memor...
list_allocator(const list_allocator &other)
Copy c'tor.
static const header_type * header_of_unused_chunk(const size_type *value_addr)
chunk_list_type chunks_
‍the list of unused (free) chunks
list_allocator(size_type initial_size=0, AllocationStrategy strategy=AllocationStrategy::Linear)
static size_type mark_for_deallocation(size_type n)
friend void swap(list_allocator &first, list_allocator &second)
AllocationStrategy strategy_
‍the allocation strategy; influences how min_allocation_size_ is updated with each actual allocation
chunk_list_type::iterator unlink_chunk(chunk_list_type::iterator it)
Removes the referenced chunk from the list of available chunks without deallocating its memory.