Anklang 0.3.0-460-gc4ef46ba
ASE — Anklang Sound Engine (C++)

« « « Anklang Documentation
Loading...
Searching...
No Matches
memory.cc
Go to the documentation of this file.
1 // This Source Code Form is licensed MPL-2.0: http://mozilla.org/MPL/2.0
2#include "memory.hh"
3#include "ase/internal.hh"
4#include <ase/testing.hh>
5#include <sys/mman.h>
6#include <unistd.h> // _SC_PAGESIZE
7#include <algorithm>
8#include <shared_mutex>
9#include <atomic>
10
11#define MEM_ALIGN(addr, alignment) (alignment * size_t ((size_t (addr) + alignment - 1) / alignment))
12#ifdef NDEBUG
13#define CHECK_FREE_OVERLAPS 0 // avoid paranoid check
14#else
15#define CHECK_FREE_OVERLAPS 1 // slow check to catch overlaps from invalid release_ext
16#endif
17
18inline constexpr size_t MINIMUM_ARENA_SIZE = 4 * 1024 * 1024;
19inline constexpr size_t MINIMUM_HUGEPAGE = 2 * 1024 * 1024;
20
21namespace Ase {
22
23namespace FastMemory {
24
25HugePage::HugePage (void *m, size_t s) :
26 start_(m), size_ (s)
27{}
28
29struct LinuxHugePage : public HugePage {
30 using ReleaseF = void (LinuxHugePage::*) ();
31 ReleaseF release_ = nullptr;
32 LinuxHugePage (void *m, size_t s, ReleaseF r) : HugePage (m, s), release_ (r) {}
33 void free_start () { free (start_); }
34 void unadvise_free_start () { madvise (start_, size_, MADV_NOHUGEPAGE); free_start(); }
35 void unadvise_munmap_start () { madvise (start_, size_, MADV_NOHUGEPAGE); munmap_start(); }
36 void
37 munmap_start ()
38 {
39 munlock (start_, size_);
40 munmap (start_, size_);
41 }
43 {
44 auto release = release_;
45 release_ = nullptr;
46 (this->*release) ();
47 }
48};
49
52HugePage::allocate (size_t minimum_alignment, size_t bytelength)
53{
54 assert_return (0 == (minimum_alignment & (minimum_alignment - 1)), {}); // require power of 2
55 constexpr int protection = PROT_READ | PROT_WRITE;
56 constexpr int flags = MAP_PRIVATE | MAP_ANONYMOUS;
57 // try reserved hugepages for large allocations
58 if (bytelength == MEM_ALIGN (bytelength, MINIMUM_HUGEPAGE) && minimum_alignment <= MINIMUM_HUGEPAGE)
59 {
60 void *memory = mmap (nullptr, bytelength, protection, flags | MAP_HUGETLB, -1, 0);
61 if (memory != MAP_FAILED)
62 {
63 assert_return ((size_t (memory) & (minimum_alignment - 1)) == 0, {}); // ensure alignment
64 // try mlock
65 const int mlret = mlock (memory, bytelength);
66 if (mlret < 0)
67 printerr ("%s: mlock(%p,%u) failed: %s\n", __func__, memory, bytelength, strerror (errno));
68 return std::make_shared<LinuxHugePage> (memory, bytelength, &LinuxHugePage::munmap_start);
69 }
70 }
71 // try transparent hugepages for large allocations and large alignments
72 if (bytelength == MEM_ALIGN (bytelength, std::max (minimum_alignment, MINIMUM_HUGEPAGE)))
73 {
74 static const size_t pagesize = sysconf (_SC_PAGESIZE);
75 minimum_alignment = std::max (minimum_alignment, MINIMUM_HUGEPAGE);
76 size_t areasize = minimum_alignment - pagesize + bytelength;
77 char *memory = (char*) mmap (nullptr, areasize, protection, flags, -1, 0);
78 if (memory)
79 {
80 // discard unaligned head
81 const uintptr_t start = uintptr_t (memory);
82 size_t extra = MEM_ALIGN (start, minimum_alignment) - start;
83 if (extra && munmap (memory, extra) != 0)
84 printerr ("%s: munmap(%p,%u) failed: %s\n", __func__, memory, extra, strerror (errno));
85 memory += extra;
86 areasize -= extra;
87 // discard unaligned tail
88 extra = areasize - size_t (areasize / minimum_alignment) * minimum_alignment;
89 areasize -= extra;
90 if (extra && munmap (memory + areasize, extra) != 0)
91 printerr ("%s: munmap(%p,%u) failed: %s\n", __func__, memory + areasize, extra, strerror (errno));
92 // double check, use THP
93 assert_return (areasize == bytelength, {});
94 assert_return ((size_t (memory) & (minimum_alignment - 1)) == 0, {}); // ensure alignment
95 // try mlock
96 const int mlret = mlock (memory, bytelength);
97 if (mlret < 0)
98 printerr ("%s: mlock(%p,%u) failed: %s\n", __func__, memory, bytelength, strerror (errno));
99 LinuxHugePage::ReleaseF release;
100 // linux/Documentation/admin-guide/mm/transhuge.rst
101 if (madvise (memory, areasize, MADV_HUGEPAGE) >= 0)
102 release = &LinuxHugePage::unadvise_munmap_start;
103 else
104 release = &LinuxHugePage::munmap_start;
105 return std::make_shared<LinuxHugePage> (memory, areasize, release);
106 }
107 }
108 // fallback to aligned_alloc with hugepages
109 if (bytelength == MEM_ALIGN (bytelength, MINIMUM_HUGEPAGE) && minimum_alignment <= MINIMUM_HUGEPAGE)
110 {
111 void *memory = std::aligned_alloc (std::max (MINIMUM_HUGEPAGE, minimum_alignment), bytelength);
112 if (memory)
113 {
114 assert_return ((size_t (memory) & (minimum_alignment - 1)) == 0, {}); // ensure alignment
115 LinuxHugePage::ReleaseF release;
116 // linux/Documentation/admin-guide/mm/transhuge.rst
117 if (madvise (memory, bytelength, MADV_HUGEPAGE) >= 0)
118 release = &LinuxHugePage::unadvise_free_start;
119 else
120 release = &LinuxHugePage::free_start;
121 return std::make_shared<LinuxHugePage> (memory, bytelength, release);
122 }
123 }
124 // otherwise fallback to just aligned_alloc
125 void *memory = std::aligned_alloc (minimum_alignment, bytelength);
126 if (!memory)
127 return {};
128 assert_return ((size_t (memory) & (minimum_alignment - 1)) == 0, {}); // ensure alignment
129 return std::make_shared<LinuxHugePage> (memory, bytelength, &LinuxHugePage::free_start);
130}
131
132struct Extent32 {
133 uint32 start = 0;
134 uint32 length = 0;
135 explicit Extent32 (uint32 sz = 0) : length (sz) {}
136 explicit Extent32 (uint32 st, uint32 len) : start (st), length (len) {}
137 void reset (uint32 sz = 0) { start = 0; length = sz; }
138 void zero (char *area) const { memset (__builtin_assume_aligned (area + start, cache_line_size), 0, length); }
139};
140
141// SequentialFitAllocator
143 HugePageP blob;
144 std::vector<Extent32> extents; // free list
145 const uint32 mem_alignment;
146 SequentialFitAllocator (HugePageP newblob, uint32 alignment) :
147 blob (newblob), mem_alignment (alignment)
148 {
149 assert_return (size() > 0);
150 assert_return (mem_alignment <= blob->alignment());
151 assert_return ((size_t (blob->mem()) & (blob->alignment() - 1)) == 0);
152 if (size() >= 1024 * 1024)
153 extents.reserve (1024);
154 assert_return (size() <= 4294967295);
155 Extent32 area { 0, uint32_t (size()) };
156 area.zero (blob->mem());
157 release_ext (area);
158 assert_return (area.length == blob->size());
159 }
161 {
162 const size_t s = sum();
163 if (s != blob->size())
164 warning ("%s:%s: deleting area while bytes are unreleased: %zd", __FILE__, __func__, blob->size() - s);
165 extents.clear();
166 }
167 char*
168 memory () const
169 {
170 return blob->mem();
171 }
172 size_t
173 size () const
174 {
175 return blob->size();
176 }
177 size_t
178 sum () const
179 {
180 size_t s = 0;
181 for (const auto b : extents)
182 s += b.length;
183 return s;
184 }
185 void
186 release_ext (const Extent32 &ext)
187 {
188 assert_return (ext.length > 0);
189 assert_return (ext.start + ext.length <= blob->size());
190 ext.zero (blob->mem());
191 ssize_t overlaps_existing = -1, before = -1, after = -1;
192 for (size_t i = 0; i < extents.size(); i++)
193 if (ext.start == extents[i].start + extents[i].length)
194 {
195 after = i;
196 if (UNLIKELY (before >= 0))
197 break;
198 }
199 else if (ext.start + ext.length == extents[i].start)
200 {
201 before = i;
202 if (UNLIKELY (after >= 0))
203 break;
204 }
205 else if (CHECK_FREE_OVERLAPS &&
206 ext.start + ext.length > extents[i].start &&
207 ext.start < extents[i].start + extents[i].length)
208 overlaps_existing = i;
209 assert_return (overlaps_existing == -1);
210 // merge with existing extents
211 if (after >= 0)
212 {
213 extents[after].length += ext.length;
214 if (before >= 0)
215 {
216 extents[after].length += extents[before].length;
217 extents.erase (extents.begin() + before);
218 }
219 return;
220 }
221 if (before >= 0)
222 {
223 extents[before].length += ext.length;
224 extents[before].start = ext.start;
225 return;
226 }
227 // add isolated block to free list
228 extents.push_back (ext);
229 }
230 ssize_t
231 best_fit (size_t length) const
232 {
233 ssize_t candidate = -1;
234 for (size_t j = 0; j < extents.size(); j++)
235 {
236 const size_t i = extents.size() - 1 - j; // recent blocks are at the end
237 if (ISLIKELY (length > extents[i].length))
238 continue; // profiled, ISLIKELY saves ~7%
239 if (ISLIKELY (length == extents[i].length)) // profiled, ISLIKELY saves ~20%
240 return i;
241 // length < extents[i].length
242 if (ISLIKELY (candidate < 0) or // profiled, ISLIKELY saves ~20%
243 ISLIKELY (extents[i].length < extents[candidate].length) or
244 (ISLIKELY (extents[i].length == extents[candidate].length) and
245 ISLIKELY (extents[i].start < extents[candidate].start)))
246 candidate = i;
247 }
248 return candidate;
249 }
250 bool
251 alloc_ext (Extent32 &ext)
252 {
253 assert_return (ext.start == 0, false);
254 assert_return (ext.length > 0, false);
255 const uint32 aligned_length = MEM_ALIGN (ext.length, mem_alignment);
256 // find block
257 const ssize_t candidate = best_fit (aligned_length);
258 if (candidate < 0)
259 return false; // OOM
260 // allocate from start of larger block (to facilitate future Arena growth)
261 ext.start = extents[candidate].start;
262 ext.length = aligned_length;
263 if (UNLIKELY (extents[candidate].length > aligned_length))
264 {
265 extents[candidate].start += aligned_length;
266 extents[candidate].length -= aligned_length;
267 }
268 else // unlist if block wasn't larger
269 {
270 // extents.erase (extents.begin() + candidate);
271 extents[candidate] = extents.back();
272 extents.resize (extents.size() - 1);
273 }
274 return true;
275 }
276#if 0 // only needed for deferred coalescing which rarely speeds things up
277 void
278 coalesce_extents()
279 {
280 if (extents.size())
281 {
282 auto isless_start = [] (const Extent32 &a, const Extent32 &b) -> bool {
283 return a.start < b.start;
284 };
285 std::sort (extents.begin(), extents.end(), isless_start);
286 for (size_t i = extents.size() - 1; i > 0; i--)
287 if (extents[i-1].start + extents[i-1].length == extents[i].start) // adjacent
288 {
289 extents[i-1].length += extents[i].length;
290 extents.erase (extents.begin() + i);
291 }
292 }
293 }
294#endif
295};
296
298 Allocator (HugePageP newblob, uint32 alignment) :
299 SequentialFitAllocator (newblob, alignment)
300 {}
301};
302
304 FastMemoryArena (AllocatorP a) : Arena (a) {}
305 static Allocator*
306 allocator (const Arena &base)
307 {
308 static_assert (sizeof (Arena) == sizeof (FastMemoryArena));
309 return reinterpret_cast<const FastMemoryArena*> (&base)->fma.get();
310 }
311};
312static Allocator* fmallocator (const Arena &a) { return FastMemoryArena::allocator (a); }
313
314// == Arena ==
315Arena::Arena (AllocatorP xfma) :
316 fma (xfma)
317{}
318
319static Arena
320create_arena (uint32 alignment, uint32 mem_size)
321{
322 alignment = std::max (alignment, uint32 (cache_line_size));
323 mem_size = MEM_ALIGN (mem_size, alignment);
324 auto blob = HugePage::allocate (alignment, mem_size);
325 if (!blob->mem())
326 fatal_error ("ASE: failed to allocate aligned memory (%u bytes): %s", mem_size, strerror (errno));
327 FastMemory::AllocatorP fmap = std::make_shared<FastMemory::Allocator> (std::move (blob), alignment);
328 return FastMemoryArena (fmap);
329}
330
331Arena::Arena (uint32 mem_size, uint32 alignment)
332{
333 assert_return (alignment <= 2147483648);
334 assert_return (0 == (alignment & (alignment - 1)));
335 assert_return (mem_size <= 2147483648);
336 *this = create_arena (alignment, mem_size);
337}
338
339uint64
341{
342 return fma ? uint64 (fma->memory()) : 0;
343}
344
345uint64
347{
348 return fma ? fma->size() : 0;
349}
350
351size_t
353{
354 return fma ? fma->mem_alignment : 0;
355}
356
357Block
359{
360 const Block zeroblock = { nullptr, 0 };
361 assert_return (fma, zeroblock);
362 return_unless (length > 0, zeroblock);
363 Extent32 ext { 0, length };
364 if (fma->alloc_ext (ext))
365 return Block { fma->memory() + ext.start, ext.length };
366 // TODO: does it makes sense to try growing here?
367 return zeroblock;
368}
369
370Block
372{
373 Block ab = allocate (length, std::nothrow);
374 if (!ab.block_start)
375 throw std::bad_alloc();
376 return ab;
377}
378
379void
381{
383 assert_return (ab.block_start >= fma->memory());
384 assert_return (ab.block_start < fma->memory() + fma->size());
385 assert_return (0 == (size_t (ab.block_start) & (alignment() - 1)));
386 assert_return (0 == (size_t (ab.block_length) & (alignment() - 1)));
387 const size_t block_offset = ((char*) ab.block_start) - fma->memory();
388 assert_return (block_offset + ab.block_length <= fma->size());
389 Extent32 ext { uint32 (block_offset), ab.block_length };
390 fma->release_ext (ext);
391}
392
394 EmptyArena() :
395 Arena (AllocatorP (nullptr))
396 {}
397};
398
399// == NewDeleteBase ==
400static constexpr bool trace_NewDeleteBase = false;
401
402void
403NewDeleteBase::delete_ (void *ptr, std::size_t sz, std::align_val_t al)
404{
405 if (trace_NewDeleteBase)
406 Ase::printerr ("del: %p (%d, %d)\n", ptr, sz, al);
407 // sz and al are both bogus if delete is called via base class
408 fast_mem_free (ptr);
409 //::operator delete[] (ptr, al);
410}
411
412void*
413NewDeleteBase::new_ (std::size_t sz, std::align_val_t al)
414{
415 //auto ptr = ::operator new[] (sz, al);
416 void *ptr = nullptr;
417 if (size_t (al) <= FastMemory::cache_line_size)
418 ptr = fast_mem_alloc (sz);
419 if (trace_NewDeleteBase)
420 Ase::printerr ("new: %p (%d, %d)\n", ptr, sz, al);
421 if (!ptr)
422 throw std::bad_alloc();
423 return ptr;
424}
425
426// == ArenaBlock ==
427static std::mutex fast_mem_mutex;
429
431 void *block_start = nullptr;
432 uint32 block_length = 0;
433 uint32 arena_index = ~0;
434 ArenaBlock () = default;
435 ArenaBlock (void *ptr, uint32 length, uint32 index) :
436 block_start (ptr), block_length (length), arena_index (index)
437 {
438 assert_return (index < fast_mem_arenas.size());
439 }
440 ArenaBlock& operator= (const ArenaBlock &src) = default;
441 /*copy*/ ArenaBlock (const ArenaBlock &src) = default;
442 Block block () const { return { block_start, block_length }; }
443};
444
445static ArenaBlock
446fast_mem_allocate_aligned_block_L (uint32 length)
447{
448 // try to allocate from existing arenas
449 Extent32 ext { 0, length };
450 for (uint32 i = 0; i < fast_mem_arenas.size(); i++)
451 {
452 FastMemory::Allocator *fma = fmallocator (fast_mem_arenas[i]);
453 if (fma->alloc_ext (ext))
454 {
455 void *const ptr = fma->memory() + ext.start;
456 return { ptr, ext.length, i };
457 }
458 }
459 // allocate a new area
460 Arena arena = create_arena (cache_line_size, std::max (size_t (length), MINIMUM_ARENA_SIZE));
461 assert_return (fmallocator (arena), {});
462 const uint32 arena_index = fast_mem_arenas.size();
463 fast_mem_arenas.push_back (arena);
464 FastMemory::Allocator *fma = fmallocator (arena);
465 if (fma->alloc_ext (ext))
466 {
467 void *const ptr = fma->memory() + ext.start;
468 return { ptr, ext.length, arena_index };
469 }
470 fatal_error ("newly allocated arena too short for request: %u < %u", fma->size(), ext.length);
471}
472
473// == MemoryMetaTable ==
475 std::mutex mutex;
477};
478
479static MemoryMetaInfo&
480mm_info_lookup (void *ptr)
481{
482 static MemoryMetaInfo mm_info[1024];
483 const size_t arrsz = sizeof (mm_info) / sizeof (mm_info[0]);
484 union { uint64_t v; uint8_t a[8]; } u { uintptr_t (ptr) };
485 const uint64_t M = 11400714819323198487ull; // golden ratio, rounded up to next odd
486 const uint64_t S = 0xcbf29ce484222325;
487 size_t hash = S; // swap a[0]..a[7] on big-endian for good avalange effect
488 hash = (u.a[0] ^ hash) * M;
489 hash = (u.a[1] ^ hash) * M;
490 hash = (u.a[2] ^ hash) * M;
491 hash = (u.a[3] ^ hash) * M;
492 hash = (u.a[4] ^ hash) * M;
493 hash = (u.a[5] ^ hash) * M;
494 hash = (u.a[6] ^ hash) * M;
495 hash = (u.a[7] ^ hash) * M;
496 return mm_info[hash % arrsz];
497}
498
499static void
500mm_info_push_mt (const ArenaBlock &ablock) // MT-Safe
501{
502 MemoryMetaInfo &mi = mm_info_lookup (ablock.block_start);
503 std::lock_guard<std::mutex> locker (mi.mutex);
504 mi.ablocks.push_back (ablock);
505}
506
507static ArenaBlock
508mm_info_pop_mt (void *block_start) // MT-Safe
509{
510 MemoryMetaInfo &mi = mm_info_lookup (block_start);
511 std::lock_guard<std::mutex> locker (mi.mutex);
512 auto it = std::find_if (mi.ablocks.begin(), mi.ablocks.end(),
513 [block_start] (const auto &ab) {
514 return ab.block_start == block_start;
515 });
516 if (it != mi.ablocks.end()) // found it, now pop
517 {
518 const ArenaBlock ab = *it;
519 if (it < mi.ablocks.end() - 1)
520 *it = mi.ablocks.back(); // swap with tail for quick shrinking
521 mi.ablocks.resize (mi.ablocks.size() - 1);
522 return ab;
523 }
524 return {};
525}
526
527} // FastMemory
528
529// == aligned malloc/calloc/free ==
530void*
531fast_mem_alloc (size_t size)
532{
533 std::unique_lock<std::mutex> shortlock (FastMemory::fast_mem_mutex);
534 FastMemory::ArenaBlock ab = FastMemory::fast_mem_allocate_aligned_block_L (size); // MT-Guarded
535 shortlock.unlock();
536 void *const ptr = ab.block_start;
537 if (ptr)
538 mm_info_push_mt (ab);
539 else
540 fatal_error ("%s: failed to allocate %u bytes\n", __func__, size);
541 return ptr;
542}
543
544void
545fast_mem_free (void *mem)
546{
547 return_unless (mem);
548 FastMemory::ArenaBlock ab = FastMemory::mm_info_pop_mt (mem);
549 if (!ab.block_start)
550 fatal_error ("%s: invalid memory pointer: %p\n", __func__, mem);
551 std::lock_guard<std::mutex> locker (FastMemory::fast_mem_mutex);
552 FastMemory::fast_mem_arenas[ab.arena_index].release (ab.block()); // MT-Guarded
553}
554
555// == CString ==
556#ifndef NDEBUG
557static CString cstring_early_test = "NULL"; // initialization must preceede cstring_globals
558#endif
561 struct StrPtrHash {
562 std::size_t operator() (const String *k) const noexcept { return std::hash<String>{} (*k); }
563 };
564 struct StrPtrEqual {
565 bool operator() (const String *a, const String *b) const noexcept { return *a == *b; }
566 };
568 StrPtrMap quarks_;
570 std::shared_mutex mutex_;
572 {
573 static String empty_string;
574 strings_ = { &empty_string }; // ID==0
575 quarks_[&empty_string] = 0;
576 }
577public:
578 uint add (const String &s) noexcept;
579 uint find (const String &s) noexcept;
580 const String& lookup (uint quark) noexcept;
581 static CStringTable& the () noexcept { static auto &g = *new CStringTable; return g; }
582};
583
584uint
585CStringTable::add (const String &s) noexcept
586{
587 const std::unique_lock ulock (mutex_);
588 auto it = quarks_.find (&s);
589 if (it != quarks_.end()) [[likely]]
590 return it->second;
591 const uint quark = strings_.size();
592 strings_.push_back (new String (s));
593 quarks_[strings_.back()] = quark;
594 return quark;
595}
596
597uint
598CStringTable::find (const String &s) noexcept
599{
600 const std::unique_lock ulock (mutex_);
601 auto it = quarks_.find (&s);
602 if (it == quarks_.end()) return 0;
603 return it->second;
604}
605
606const String&
607CStringTable::lookup (uint quark) noexcept
608{
609 const std::unique_lock ulock (mutex_);
610 if (quark < strings_.size()) [[likely]]
611 return *strings_[quark];
612 return *strings_[0]; // empty_string;
613}
614
620CString&
621CString::assign (const String &s) noexcept
622{
623 quark_ = CStringTable::the().add (s);
624 return *this;
625}
626
632{
633 CString cstring;
634 cstring.quark_ = CStringTable::the().find (s);
635 return cstring;
636}
637
639const std::string&
641{
642 return CStringTable::the().lookup (quark_);
643}
644
645uint
646CString::temp_quark_impl (CString c)
647{
648 return c.quark_;
649}
650
652CString::temp_quark_impl (uint maybequark)
653{
654 CString cstring;
655 const std::string &stdstring = CStringTable::the().lookup (maybequark);
656 cstring.quark_ = stdstring.empty() ? 0 : maybequark;
657 return cstring;
658}
659
660} // Ase
661
662// == Allocator Tests ==
663#include "randomhash.hh"
664namespace { // Anon
665using namespace Ase;
666
667TEST_INTEGRITY (aligned_allocator_tests);
668static void
669aligned_allocator_tests()
670{
672 const ssize_t kb = 1024, asz = 4 * 1024;
673 // create small area
675 FastMemory::Allocator *fmap = fmallocator (arena);
676 assert_return (fmap != nullptr);
677 FastMemory::Allocator &fma = *fmap;
678 assert_return (fma.sum() == asz);
679 // allocate 4 * 1mb
680 bool success;
681 Extent32 s1 (kb);
682 success = fma.alloc_ext (s1);
683 assert_return (success);
684 assert_return (fma.sum() == asz - kb);
685 Extent32 s2 (kb - 1);
686 success = fma.alloc_ext (s2);
687 assert_return (success && s2.length == kb); // check alignment
688 assert_return (fma.sum() == asz - 2 * kb);
689 Extent32 s3 (kb);
690 success = fma.alloc_ext (s3);
691 assert_return (success);
692 assert_return (fma.sum() == asz - 3 * kb);
693 Extent32 s4 (kb);
694 success = fma.alloc_ext (s4);
695 assert_return (success);
696 assert_return (fma.sum() == 0);
697 // release with fragmentation
698 fma.release_ext (s1);
699 assert_return (fma.sum() == kb);
700 fma.release_ext (s3);
701 assert_return (fma.sum() == 2 * kb);
702 // fail allocation due to fragmentation
703 s1.reset (2 * kb);
704 success = fma.alloc_ext (s1);
705 assert_return (success == false);
706 // release middle block and allocate coalesced result
707 fma.release_ext (s2);
708 assert_return (fma.sum() == 3 * kb);
709 s1.reset (3 * kb);
710 success = fma.alloc_ext (s1);
711 assert_return (success);
712 assert_return (fma.sum() == 0);
713 // release all
714 fma.release_ext (s1);
715 fma.release_ext (s4);
716 assert_return (fma.sum() == asz);
717 // test general purpose allocations exceeding a single FastMemory::Arena
719 size_t sum = 0;
720 while (sum < 37 * 1024 * 1024)
721 {
722 const size_t sz = random_irange (8, 98304);
723 ptrs.push_back (fast_mem_alloc (sz));
724 assert_return (ptrs.back() != nullptr);
725 sum += sz;
726 }
727 while (!ptrs.empty())
728 {
729 fast_mem_free (ptrs.back());
730 ptrs.pop_back();
731 }
732}
733
734TEST_INTEGRITY (memory_cstring_tests);
735static void
736memory_cstring_tests()
737{
738 // test CString
739#ifndef NDEBUG
740 const bool equality_checks =
741 cstring_early_test == CString ("NULL") &&
742 cstring_early_test == String ("NULL") &&
743 cstring_early_test == "NULL" &&
744 cstring_early_test != CString ("u") &&
745 cstring_early_test != String ("u") &&
746 cstring_early_test != "u" &&
747 1;
748 assert_return (equality_checks == true);
749 const bool lesser_checks =
750 CString ("1") < CString ("2") && CString ("x") <= CString ("x") &&
751 CString ("1") < String ("2") && CString ("x") <= String ("x") &&
752 CString ("1") < "2" && CString ("x") <= "x" &&
753 1;
754 assert_return (lesser_checks == true);
755 const bool greater_checks =
756 CString ("2") > CString ("1") && CString ("x") >= CString ("x") &&
757 CString ("2") > String ("1") && CString ("x") >= String ("x") &&
758 CString ("2") > "1" && CString ("x") >= "x" &&
759 1;
760 assert_return (greater_checks == true);
761#endif
762 CString c;
763 assert_return (c == "");
764 assert_return (c == CString (""));
765 c = "foo";
766 assert_return (c == "foo");
767 assert_return (c != "");
768 assert_return (c == CString ("foo", 3));
769 assert_return (c == CString::lookup ("foo"));
770 c = "bar";
771 assert_return (c == "bar");
772 assert_return (c == CString (std::string ("bar")));
773 c = "three";
774 assert_return (c == "three");
775 assert_return (c == CString (CString ("three")));
776 CString d = "four";
777 assert_return (d == "four");
778 assert_return (CString ("four") == d.c_str());
779 assert_return (std::string ("four") == d.c_str());
780 std::string stdstring = d;
781 assert_return (stdstring == d);
782 assert_return (std::hash<CString>{} ("four") == std::hash<std::string>{} (stdstring));
783 assert_return (d != c);
785 os << c;
786 assert_return (os.str() == "three");
787 os << d;
788 assert_return (os.str() == "threefour");
789 // assert_return (c + d == "threefour"); // not-ok: avoid implicit CString creation
790 assert_return (c + "FOO" == "threeFOO"); // ok, just allocates a std::string
791 assert_return ("FOO" + d == "FOOfour"); // ok, just allocates a std::string
792 assert_return (string_format ("%s+%s", c, d) == "three+four");
793 c = "four";
794 assert_return (d == c);
795 assert_return (c.c_str() == d.c_str()); // works only for CString, not std:::string
796 const char *unique_str = "Af00-61c34bc5fd7c#nosuchthing";
797 c = CString::lookup (unique_str); // yields, empty, unique_str never seen before
798 assert_return (c.empty() == true);
799 d = unique_str; // unique_str forced assignment
800 assert_return (d.empty() == false);
801 c = CString::lookup (unique_str); // succeeds, unique_str has been seen before
802 assert_return (c.empty() == false);
803 struct TwoCStrings { CString a, b; };
804 static_assert (sizeof (TwoCStrings) <= 2 * 4);
805 // CString temporary comparisons
806 assert_return (CString ("a") == String ("a"));
807 assert_return (String ("a") == CString ("a"));
808 assert_return (CString ("a") == CString ("a"));
809 assert_return ("a" == CString ("a"));
810 assert_return (CString ("a") == "a");
811 assert_return (CString ("a") != String ("b"));
812 assert_return (String ("a") != CString ("b"));
813 assert_return (CString ("a") != CString ("b"));
814 assert_return ("b" != CString ("a"));
815 assert_return (CString ("a") != "b");
816 // CString const comparisons
817 const CString ac ("a"), bc ("b");
818 CString a ("a"), b ("b");
819 assert_return (a == a);
820 assert_return (ac == ac);
821 assert_return (a == ac);
822 assert_return (ac == a);
823 assert_return (a != b);
824 assert_return (ac != bc);
825 assert_return (a != bc);
826 assert_return (ac != b);
827 assert_return ("foo" == CString::temp_quark_impl (CString::temp_quark_impl ("foo")));
828}
829
830} // Anon
T aligned_alloc(T... args)
Map std::string <-> uint IDs, thread safe.
Definition memory.cc:560
Compact, deduplicating string variant for constant strings.
Definition memory.hh:138
static CString lookup(const std::string &s)
Definition memory.cc:631
const std::string & string() const
Convert CString into a std::string.
Definition memory.cc:640
T empty(T... args)
errno
T find_if(T... args)
fma
free
T get(T... args)
#define assert_return(expr,...)
Return from the current function if expr is unmet and issue an assertion warning.
Definition internal.hh:29
#define return_unless(cond,...)
Return silently if cond does not evaluate to true with return value ...
Definition internal.hh:71
#define UNLIKELY(cond)
Hint to the compiler to optimize for cond == FALSE.
Definition internal.hh:63
#define TEST_INTEGRITY(FUNC)
Register func as an integrity test.
Definition internal.hh:77
#define ISLIKELY(cond)
Hint to the compiler to optimize for cond == TRUE.
Definition internal.hh:61
T max(T... args)
memset
munlock
mmap
munmap
constexpr size_t cache_line_size
Minimum alignment >= cache line size, see getconf LEVEL1_DCACHE_LINESIZE.
Definition memory.hh:13
The Anklang C++ API namespace.
Definition api.hh:9
std::string string_format(const char *format, const Args &...args) __attribute__((__format__(__printf__
Format a string similar to sprintf(3) with support for std::string and std::ostringstream convertible...
uint64_t uint64
A 64-bit unsigned integer, use PRI*64 in format strings.
Definition cxxaux.hh:25
int64_t random_irange(int64_t begin, int64_t end)
std::string String
Convenience alias for std::string.
Definition cxxaux.hh:35
uint32_t uint32
A 32-bit unsigned integer.
Definition cxxaux.hh:24
uint32_t uint
Provide 'uint' as convenience type.
Definition cxxaux.hh:18
T sort(T... args)
typedef uintptr_t
T str(T... args)
strerror
Memory area (over-)aligned to cache size and utilizing huge pages.
Definition memory.hh:100
uint64 location() const
Address of memory area.
Definition memory.cc:340
size_t alignment() const
Alignment for block addresses and length.
Definition memory.cc:352
uint64 reserved() const
Reserved memory area in bytes.
Definition memory.cc:346
Arena(uint32 mem_size, uint32 alignment=cache_line_size)
Create isolated memory area.
Definition memory.cc:331
Block allocate(uint32 length) const
Create a memory block from cache-line aligned memory area, MT-Unsafe.
Definition memory.cc:371
void release(Block allocatedblock) const
Realease a previously allocated block, MT-Unsafe.
Definition memory.cc:380
AllocatorP fma
Identifier for the associated memory allocator.
Definition memory.hh:116
Reference for an allocated memory block.
Definition memory.hh:90
Interface to the OS huge page allocator.
Definition memory.hh:120
static HugePageP allocate(size_t minimum_alignment, size_t bytelength)
Try to allocate a HugePage >= bytelength with minimum_alignment, usual sizes are 2MB.
Definition memory.cc:52
typedef size_t
sysconf