47 #ifndef EASTL_INTERNAL_FIXED_POOL_H 48 #define EASTL_INTERNAL_FIXED_POOL_H 51 #include <stk_util/util/config_eastl.h> 52 #include <stk_util/util/functional_eastl.h> 53 #include <stk_util/util/memory_eastl.h> 54 #include <stk_util/util/allocator_eastl.h> 55 #include <stk_util/util/type_traits_eastl.h> 58 #pragma warning(push, 0) 74 #ifndef EASTL_FIXED_POOL_DEFAULT_NAME 75 #define EASTL_FIXED_POOL_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_pool" // Unless the user overrides something, this is "EASTL fixed_pool". 106 template <
size_t size,
size_t alignment>
109 template<
size_t size>
110 struct aligned_buffer<size, 2> { EA_PREFIX_ALIGN(2)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2); };
112 template<
size_t size>
113 struct aligned_buffer<size, 4> { EA_PREFIX_ALIGN(4)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4); };
115 template<
size_t size>
116 struct aligned_buffer<size, 8> { EA_PREFIX_ALIGN(8)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(8); };
118 template<
size_t size>
119 struct aligned_buffer<size, 16> { EA_PREFIX_ALIGN(16)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(16); };
121 template<
size_t size>
122 struct aligned_buffer<size, 32> { EA_PREFIX_ALIGN(32)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(32); };
124 template<
size_t size>
125 struct aligned_buffer<size, 64> { EA_PREFIX_ALIGN(64)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(64); };
127 template<
size_t size>
128 struct aligned_buffer<size, 128> { EA_PREFIX_ALIGN(128)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(128); };
130 #if !defined(EA_PLATFORM_PSP) // This compiler fails to compile alignment >= 256 and gives an error. 132 template<
size_t size>
133 struct aligned_buffer<size, 256> { EA_PREFIX_ALIGN(256)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(256); };
135 template<
size_t size>
136 struct aligned_buffer<size, 512> { EA_PREFIX_ALIGN(512)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(512); };
138 template<
size_t size>
139 struct aligned_buffer<size, 1024> { EA_PREFIX_ALIGN(1024)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(1024); };
141 template<
size_t size>
142 struct aligned_buffer<size, 2048> { EA_PREFIX_ALIGN(2048)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2048); };
144 template<
size_t size>
145 struct aligned_buffer<size, 4096> { EA_PREFIX_ALIGN(4096)
aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4096); };
147 #endif // EA_PLATFORM_PSP 167 : mpHead((
Link*)pMemory)
168 , mpNext((
Link*)pMemory)
169 , mpCapacity((
Link*)pMemory)
174 #if EASTL_FIXED_SIZE_TRACKING_ENABLED 197 void init(
void* pMemory,
size_t memorySize,
size_t nodeSize,
198 size_t alignment,
size_t alignmentOffset = 0);
208 #if EASTL_FIXED_SIZE_TRACKING_ENABLED 222 return (mpHead != NULL) || (mpNext != mpCapacity);
238 #if EASTL_FIXED_SIZE_TRACKING_ENABLED 239 uint32_t mnCurrentSize;
281 fixed_pool(
void* pMemory,
size_t memorySize,
size_t nodeSize,
282 size_t alignment,
size_t alignmentOffset = 0)
284 init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
304 Link* pLink = mpHead;
308 #if EASTL_FIXED_SIZE_TRACKING_ENABLED 309 if(++mnCurrentSize > mnPeakSize)
310 mnPeakSize = mnCurrentSize;
313 mpHead = pLink->mpNext;
321 if(mpNext != mpCapacity)
325 mpNext =
reinterpret_cast<Link*
>(
reinterpret_cast<char8_t*
>(mpNext) + mnNodeSize);
327 #if EASTL_FIXED_SIZE_TRACKING_ENABLED 328 if(++mnCurrentSize > mnPeakSize)
329 mnPeakSize = mnCurrentSize;
349 #if EASTL_FIXED_SIZE_TRACKING_ENABLED 353 ((
Link*)p)->mpNext = mpHead;
361 const char* get_name()
const 363 return EASTL_FIXED_POOL_DEFAULT_NAME;
367 void set_name(
const char*)
384 template <
typename Allocator = EASTLAllocatorType>
390 mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
397 size_t alignment,
size_t alignmentOffset = 0)
398 : mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
402 mpPoolBegin = pMemory;
410 #if EASTL_ALLOCATOR_COPY_ENABLED 411 mOverflowAllocator = x.mOverflowAllocator;
420 void init(
void* pMemory,
size_t memorySize,
size_t nodeSize,
421 size_t alignment,
size_t alignmentOffset = 0)
425 mpPoolBegin = pMemory;
432 Link* pLink = mpHead;
438 mpHead = pLink->mpNext;
445 if(mpNext != mpCapacity)
448 mpNext =
reinterpret_cast<Link*
>(
reinterpret_cast<char8_t*
>(mpNext) + mnNodeSize);
451 p = mOverflowAllocator.allocate(mnNodeSize);
454 #if EASTL_FIXED_SIZE_TRACKING_ENABLED 455 if(p && (++mnCurrentSize > mnPeakSize))
456 mnPeakSize = mnCurrentSize;
463 void deallocate(
void* p)
465 #if EASTL_FIXED_SIZE_TRACKING_ENABLED 469 if((p >= mpPoolBegin) && (p < mpCapacity))
471 ((Link*)p)->mpNext = mpHead;
475 mOverflowAllocator.deallocate(p, (
size_t)mnNodeSize);
482 const char* get_name()
const 484 return mOverflowAllocator.get_name();
488 void set_name(
const char* pName)
490 mOverflowAllocator.set_name(pName);
494 Allocator mOverflowAllocator;
532 template <
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
bool bEnableOverflow,
typename Allocator = EASTLAllocatorType>
536 typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<Allocator>,
fixed_pool>::type pool_type;
538 typedef Allocator overflow_allocator_type;
542 kNodeSize = nodeSize,
543 kNodeCount = nodeCount,
544 kNodesSize = nodeCount * nodeSize,
545 kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
546 kNodeAlignment = nodeAlignment,
547 kNodeAlignmentOffset = nodeAlignmentOffset
561 : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
581 : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
590 this_type& operator=(
const this_type& x)
597 void* allocate(
size_t n,
int = 0)
600 EASTL_ASSERT(n == kNodeSize);
601 return mPool.allocate();
605 void* allocate(
size_t n,
size_t ,
size_t ,
int = 0)
608 EASTL_ASSERT(n == kNodeSize);
609 return mPool.allocate();
613 void deallocate(
void* p,
size_t)
625 return mPool.can_allocate();
636 mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
640 const char* get_name()
const 642 return mPool.get_name();
646 void set_name(
const char* pName)
648 mPool.set_name(pName);
652 overflow_allocator_type& get_overflow_allocator()
654 return mPool.mOverflowAllocator;
658 void set_overflow_allocator(
const overflow_allocator_type& allocator)
660 mPool.mOverflowAllocator = allocator;
669 template <
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
typename Allocator>
670 class fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
673 typedef fixed_pool pool_type;
674 typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator> this_type;
675 typedef Allocator overflow_allocator_type;
679 kNodeSize = nodeSize,
680 kNodeCount = nodeCount,
681 kNodesSize = nodeCount * nodeSize,
683 kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
684 kNodeAlignment = nodeAlignment,
685 kNodeAlignmentOffset = nodeAlignmentOffset
692 fixed_node_allocator(
void* pNodeBuffer)
693 : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
698 fixed_node_allocator(
const this_type& x)
699 : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
704 this_type& operator=(
const this_type& x)
711 void* allocate(
size_t n,
int = 0)
714 EASTL_ASSERT(n == kNodeSize);
715 return mPool.allocate();
719 void* allocate(
size_t n,
size_t ,
size_t ,
int = 0)
722 EASTL_ASSERT(n == kNodeSize);
723 return mPool.allocate();
727 void deallocate(
void* p,
size_t)
733 bool can_allocate()
const 735 return mPool.can_allocate();
739 void reset(
void* pNodeBuffer)
741 mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
745 const char* get_name()
const 747 return mPool.get_name();
751 void set_name(
const char* pName)
753 mPool.set_name(pName);
757 overflow_allocator_type& get_overflow_allocator()
760 return *(overflow_allocator_type*)NULL;
764 void set_overflow_allocator(
const overflow_allocator_type& )
778 template <
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
bool bEnableOverflow,
typename Allocator>
779 inline bool operator==(
const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
780 const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
786 template <
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
bool bEnableOverflow,
typename Allocator>
787 inline bool operator!=(
const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
788 const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
815 template <
size_t bucketCount,
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
bool bEnableOverflow,
typename Allocator = EASTLAllocatorType>
819 typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<Allocator>,
fixed_pool>::type pool_type;
821 typedef Allocator overflow_allocator_type;
825 kBucketCount = bucketCount + 1,
826 kBucketsSize = bucketCount *
sizeof(
void*),
827 kNodeSize = nodeSize,
828 kNodeCount = nodeCount,
829 kNodesSize = nodeCount * nodeSize,
831 kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
833 kNodeAlignment = nodeAlignment,
834 kNodeAlignmentOffset = nodeAlignmentOffset,
835 kAllocFlagBuckets = 0x00400000
840 void* mpBucketBuffer;
849 : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
857 : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
858 mpBucketBuffer(pBucketBuffer)
869 : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
870 mpBucketBuffer(x.mpBucketBuffer)
886 void* allocate(
size_t n,
int flags = 0)
889 EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000);
890 if((flags & kAllocFlagBuckets) == 0)
892 EASTL_ASSERT(n == kNodeSize); (void)n;
893 return mPool.allocate();
896 EASTL_ASSERT(n <= kBucketsSize);
897 return mpBucketBuffer;
901 void* allocate(
size_t n,
size_t ,
size_t ,
int flags = 0)
904 if((flags & kAllocFlagBuckets) == 0)
906 EASTL_ASSERT(n == kNodeSize); (void)n;
907 return mPool.allocate();
911 EASTL_ASSERT(n <= kBucketsSize);
912 return mpBucketBuffer;
916 void deallocate(
void* p,
size_t)
918 if(p != mpBucketBuffer)
923 bool can_allocate()
const 925 return mPool.can_allocate();
929 void reset(
void* pNodeBuffer)
932 mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
936 const char* get_name()
const 938 return mPool.get_name();
942 void set_name(
const char* pName)
944 mPool.set_name(pName);
948 overflow_allocator_type& get_overflow_allocator()
950 return mPool.mOverflowAllocator;
954 void set_overflow_allocator(
const overflow_allocator_type& allocator)
956 mPool.mOverflowAllocator = allocator;
965 template <
size_t bucketCount,
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
typename Allocator>
966 class fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
969 typedef fixed_pool pool_type;
970 typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator> this_type;
971 typedef Allocator overflow_allocator_type;
975 kBucketCount = bucketCount + 1,
976 kBucketsSize = bucketCount *
sizeof(
void*),
977 kNodeSize = nodeSize,
978 kNodeCount = nodeCount,
979 kNodesSize = nodeCount * nodeSize,
981 kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
983 kNodeAlignment = nodeAlignment,
984 kNodeAlignmentOffset = nodeAlignmentOffset,
985 kAllocFlagBuckets = 0x00400000
990 void* mpBucketBuffer;
998 fixed_hashtable_allocator(
void* pNodeBuffer)
999 : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1000 mpBucketBuffer(NULL)
1006 fixed_hashtable_allocator(
void* pNodeBuffer,
void* pBucketBuffer)
1007 : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1008 mpBucketBuffer(pBucketBuffer)
1018 fixed_hashtable_allocator(
const this_type& x)
1019 : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1020 mpBucketBuffer(x.mpBucketBuffer)
1025 fixed_hashtable_allocator& operator=(
const fixed_hashtable_allocator& x)
1032 void* allocate(
size_t n,
int flags = 0)
1035 EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000);
1036 if((flags & kAllocFlagBuckets) == 0)
1038 EASTL_ASSERT(n == kNodeSize); (void)n;
1039 return mPool.allocate();
1042 EASTL_ASSERT(n <= kBucketsSize);
1043 return mpBucketBuffer;
1047 void* allocate(
size_t n,
size_t ,
size_t ,
int flags = 0)
1050 if((flags & kAllocFlagBuckets) == 0)
1052 EASTL_ASSERT(n == kNodeSize); (void)n;
1053 return mPool.allocate();
1057 EASTL_ASSERT(n <= kBucketsSize);
1058 return mpBucketBuffer;
1062 void deallocate(
void* p,
size_t)
1064 if(p != mpBucketBuffer)
1065 mPool.deallocate(p);
1069 bool can_allocate()
const 1071 return mPool.can_allocate();
1075 void reset(
void* pNodeBuffer)
1078 mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
1082 const char* get_name()
const 1084 return mPool.get_name();
1088 void set_name(
const char* pName)
1090 mPool.set_name(pName);
1094 overflow_allocator_type& get_overflow_allocator()
1096 EASTL_ASSERT(
false);
1097 return *(overflow_allocator_type*)NULL;
1100 void set_overflow_allocator(
const overflow_allocator_type& )
1103 EASTL_ASSERT(
false);
1113 template <
size_t bucketCount,
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
bool bEnableOverflow,
typename Allocator>
1114 inline bool operator==(
const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
1115 const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
1121 template <
size_t bucketCount,
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
bool bEnableOverflow,
typename Allocator>
1122 inline bool operator!=(
const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
1123 const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
1147 template <
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
bool bEnableOverflow,
typename Allocator = EASTLAllocatorType>
1152 typedef Allocator overflow_allocator_type;
1156 kNodeSize = nodeSize,
1157 kNodeCount = nodeCount,
1158 kNodesSize = nodeCount * nodeSize,
1160 kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
1161 kNodeAlignment = nodeAlignment,
1162 kNodeAlignmentOffset = nodeAlignmentOffset
1166 overflow_allocator_type mOverflowAllocator;
1176 : mpPoolBegin(pNodeBuffer)
1182 #if EASTL_ALLOCATOR_COPY_ENABLED 1183 mOverflowAllocator = x.mOverflowAllocator;
1191 void* allocate(
size_t n,
int flags = 0)
1193 return mOverflowAllocator.allocate(n, flags);
1196 void* allocate(
size_t n,
size_t alignment,
size_t offset,
int flags = 0)
1198 return mOverflowAllocator.allocate(n, alignment, offset, flags);
1201 void deallocate(
void* p,
size_t n)
1203 if(p != mpPoolBegin)
1204 mOverflowAllocator.deallocate(p, n);
1207 const char* get_name()
const 1209 return mOverflowAllocator.get_name();
1212 void set_name(
const char* pName)
1214 mOverflowAllocator.set_name(pName);
1217 overflow_allocator_type& get_overflow_allocator()
1219 return mOverflowAllocator;
1222 void set_overflow_allocator(
const overflow_allocator_type&
allocator)
1230 template <
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
typename Allocator>
1235 typedef Allocator overflow_allocator_type;
1239 kNodeSize = nodeSize,
1240 kNodeCount = nodeCount,
1241 kNodesSize = nodeCount * nodeSize,
1243 kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
1244 kNodeAlignment = nodeAlignment,
1245 kNodeAlignmentOffset = nodeAlignmentOffset
1252 fixed_vector_allocator(
void* )
1256 void* allocate(
size_t ,
int = 0)
1258 EASTL_ASSERT(
false);
1262 void* allocate(
size_t ,
size_t ,
size_t ,
int = 0)
1264 EASTL_ASSERT(
false);
1268 void deallocate(
void* ,
size_t )
1272 const char* get_name()
const 1274 return EASTL_FIXED_POOL_DEFAULT_NAME;
1277 void set_name(
const char* )
1281 overflow_allocator_type& get_overflow_allocator()
1283 EASTL_ASSERT(
false);
1284 overflow_allocator_type* pNULL = NULL;
1288 void set_overflow_allocator(
const overflow_allocator_type& )
1291 EASTL_ASSERT(
false);
1301 template <
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
bool bEnableOverflow,
typename Allocator>
1302 inline bool operator==(
const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
1303 const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
1309 template <
size_t nodeSize,
size_t nodeCount,
size_t nodeAlignment,
size_t nodeAlignmentOffset,
bool bEnableOverflow,
typename Allocator>
1310 inline bool operator!=(
const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
1311 const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
1332 template <
typename Container>
1336 eastl::less<size_t> compare;
1338 if(compare(
sizeof(a), EASTL_MAX_STACK_USAGE))
1345 const Container temp(a);
1351 EASTLAllocatorType
allocator(*EASTLAllocatorDefault(), EASTL_TEMP_DEFAULT_NAME);
1352 void*
const pMemory =
allocator.allocate(
sizeof(a));
1356 Container*
const pTemp = ::new(pMemory) Container(a);
1360 pTemp->~Container();
1361 allocator.deallocate(pMemory,
sizeof(a));
1371 #endif // Header include guard fixed_hashtable_allocator(const this_type &x)
char EASTL_MAY_ALIAS aligned_buffer_char
bool can_allocate() const
bool can_allocate() const
void reset(void *pNodeBuffer)
fixed_pool(void *pMemory=NULL)
fixed_pool(void *pMemory, size_t memorySize, size_t nodeSize, size_t alignment, size_t alignmentOffset=0)
fixed_node_allocator(const this_type &x)
fixed_pool_base(void *pMemory=NULL)
fixed_pool_base & operator=(const fixed_pool_base &)
fixed_pool & operator=(const fixed_pool &)
void reset(MPI_Comm new_comm)
Function reset determines new parallel_size and parallel_rank. Flushes, closes, and reopens log files...
fixed_pool_with_overflow & operator=(const fixed_pool_with_overflow &x)
void init(void *pMemory, size_t memorySize, size_t nodeSize, size_t alignment, size_t alignmentOffset=0)
EA Standard Template Library.
void fixed_swap(Container &a, Container &b)