#ifndef WTF_PartitionAlloc_h
#define WTF_PartitionAlloc_h
#include "wtf/Assertions.h"
#include "wtf/BitwiseOperations.h"
#include "wtf/ByteSwap.h"
#include "wtf/CPU.h"
#include "wtf/PageAllocator.h"
#include "wtf/SpinLock.h"
#include <limits.h>
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#include <stdlib.h>
#endif
#ifndef NDEBUG
#include <string.h>
#endif
namespace WTF {
static const size_t kMaxPartitionSize = 2046u * 1024u * 1024u;
static const size_t kAllocationGranularity = sizeof(void*);
static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
static const size_t kPartitionPageShift = 14;
static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
static const size_t kMaxPartitionPagesPerSlotSpan = 4;
static const size_t kNumSystemPagesPerPartitionPage = kPartitionPageSize / kSystemPageSize;
static const size_t kMaxSystemPagesPerSlotSpan = kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
static const size_t kSuperPageShift = 21;
static const size_t kSuperPageSize = 1 << kSuperPageShift;
static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
static const size_t kNumPartitionPagesPerSuperPage = kSuperPageSize / kPartitionPageSize;
static const size_t kPageMetadataShift = 5;
static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
static const size_t kGenericMinBucketedOrder = 4;
static const size_t kGenericMaxBucketedOrder = 20;
static const size_t kGenericNumBucketedOrders = (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
static const size_t kGenericNumBucketsPerOrderBits = 3;
static const size_t kGenericNumBucketsPerOrder = 1 << kGenericNumBucketsPerOrderBits;
static const size_t kGenericSmallestBucket = 1 << (kGenericMinBucketedOrder - 1);
static const size_t kGenericMaxBucketSpacing = 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
static const size_t kGenericMaxBucketed = (1 << (kGenericMaxBucketedOrder - 1)) + ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
static const size_t kGenericMinDirectMappedDownsize = 16 * kPartitionPageSize;
static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize;
static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT;
static const size_t kMaxFreeableSpans = 16;
#ifndef NDEBUG
static const unsigned char kUninitializedByte = 0xAB;
static const unsigned char kFreedByte = 0xCD;
static const uint32_t kCookieValue = 0xDEADBEEFu;
static const size_t kCookieSize = 16;
#endif
struct PartitionBucket;
struct PartitionRootBase;
struct PartitionFreelistEntry {
PartitionFreelistEntry* next;
};
struct PartitionPage {
PartitionFreelistEntry* freelistHead;
PartitionPage* nextPage;
PartitionBucket* bucket;
int16_t numAllocatedSlots;
uint16_t numUnprovisionedSlots;
uint16_t pageOffset;
int16_t freeCacheIndex;
};
struct PartitionBucket {
PartitionPage* activePagesHead;
PartitionPage* freePagesHead;
uint32_t slotSize;
uint16_t numSystemPagesPerSlotSpan;
uint16_t numFullPages;
};
struct PartitionSuperPageExtentEntry {
PartitionRootBase* root;
char* superPageBase;
char* superPagesEnd;
PartitionSuperPageExtentEntry* next;
};
struct WTF_EXPORT PartitionRootBase {
size_t totalSizeOfSuperPages;
unsigned numBuckets;
unsigned maxAllocation;
bool initialized;
char* nextSuperPage;
char* nextPartitionPage;
char* nextPartitionPageEnd;
PartitionSuperPageExtentEntry* currentExtent;
PartitionSuperPageExtentEntry* firstExtent;
PartitionPage* globalEmptyPageRing[kMaxFreeableSpans];
size_t globalEmptyPageRingIndex;
uintptr_t invertedSelf;
static int gInitializedLock;
static bool gInitialized;
static PartitionPage gSeedPage;
static PartitionBucket gPagedBucket;
};
struct PartitionRoot : public PartitionRootBase {
ALWAYS_INLINE PartitionBucket* buckets() { return reinterpret_cast<PartitionBucket*>(this + 1); }
ALWAYS_INLINE const PartitionBucket* buckets() const { return reinterpret_cast<const PartitionBucket*>(this + 1); }
};
struct PartitionRootGeneric : public PartitionRootBase {
int lock;
size_t orderIndexShifts[kBitsPerSizet + 1];
size_t orderSubIndexMasks[kBitsPerSizet + 1];
PartitionBucket* bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1];
PartitionBucket buckets[kGenericNumBucketedOrders * kGenericNumBucketsPerOrder];
};
enum PartitionAllocFlags {
PartitionAllocReturnNull = 1 << 0,
};
WTF_EXPORT void partitionAllocInit(PartitionRoot*, size_t numBuckets, size_t maxAllocation);
WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*);
WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*);
WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*);
WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, int, size_t, PartitionBucket*);
WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*);
WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, void*, size_t);
ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(PartitionFreelistEntry* ptr)
{
#if CPU(BIG_ENDIAN)
uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
#else
uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr));
#endif
return reinterpret_cast<PartitionFreelistEntry*>(masked);
}
ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size)
{
#ifndef NDEBUG
ASSERT(size + (2 * kCookieSize) > size);
size += 2 * kCookieSize;
#endif
return size;
}
ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size)
{
#ifndef NDEBUG
ASSERT(size >= 2 * kCookieSize);
size -= 2 * kCookieSize;
#endif
return size;
}
ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr)
{
#ifndef NDEBUG
ptr = static_cast<char*>(ptr) - kCookieSize;
#endif
return ptr;
}
ALWAYS_INLINE void partitionCookieWriteValue(void* ptr)
{
#ifndef NDEBUG
uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr);
for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr)
*cookiePtr = kCookieValue;
#endif
}
ALWAYS_INLINE void partitionCookieCheckValue(void* ptr)
{
#ifndef NDEBUG
uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr);
for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr)
ASSERT(*cookiePtr == kCookieValue);
#endif
}
ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr)
{
uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
ASSERT(!(pointerAsUint & kSuperPageOffsetMask));
return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
}
ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr)
{
uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
char* superPagePtr = reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask);
uintptr_t partitionPageIndex = (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift;
ASSERT(partitionPageIndex);
ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
PartitionPage* page = reinterpret_cast<PartitionPage*>(partitionSuperPageToMetadataArea(superPagePtr) + (partitionPageIndex << kPageMetadataShift));
size_t delta = page->pageOffset << kPageMetadataShift;
page = reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
return page;
}
ALWAYS_INLINE void* partitionPageToPointer(PartitionPage* page)
{
uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page);
uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask);
ASSERT(superPageOffset > kSystemPageSize);
ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * kPageMetadataSize));
uintptr_t partitionPageIndex = (superPageOffset - kSystemPageSize) >> kPageMetadataShift;
ASSERT(partitionPageIndex);
ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>(superPageBase + (partitionPageIndex << kPartitionPageShift));
return ret;
}
ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr)
{
PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % page->bucket->slotSize));
return page;
}
ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page)
{
PartitionSuperPageExtentEntry* extentEntry = reinterpret_cast<PartitionSuperPageExtentEntry*>(reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
return extentEntry->root;
}
ALWAYS_INLINE bool partitionPointerIsValid(void* ptr)
{
PartitionPage* page = partitionPointerToPage(ptr);
PartitionRootBase* root = partitionPageToRoot(page);
return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root);
}
ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket)
{
PartitionPage* page = bucket->activePagesHead;
ASSERT(page->numAllocatedSlots >= 0);
void* ret = page->freelistHead;
if (LIKELY(ret != 0)) {
ASSERT(partitionPointerIsValid(ret));
PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
page->freelistHead = newHead;
ASSERT(!ret || partitionPointerIsValid(ret));
page->numAllocatedSlots++;
} else {
ret = partitionAllocSlowPath(root, flags, size, bucket);
}
#ifndef NDEBUG
if (!ret)
return 0;
page = partitionPointerToPage(ret);
size_t bucketSize = page->bucket->slotSize;
memset(ret, kUninitializedByte, bucketSize);
partitionCookieWriteValue(ret);
partitionCookieWriteValue(reinterpret_cast<char*>(ret) + bucketSize - kCookieSize);
ret = static_cast<char*>(ret) + kCookieSize;
#endif
return ret;
}
ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* result = malloc(size);
RELEASE_ASSERT(result);
return result;
#else
size = partitionCookieSizeAdjustAdd(size);
ASSERT(root->initialized);
size_t index = size >> kBucketShift;
ASSERT(index < root->numBuckets);
ASSERT(size == index << kBucketShift);
PartitionBucket* bucket = &root->buckets()[index];
return partitionBucketAlloc(root, 0, size, bucket);
#endif
}
ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page)
{
#ifndef NDEBUG
size_t bucketSize = page->bucket->slotSize;
partitionCookieCheckValue(ptr);
partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + bucketSize - kCookieSize);
memset(ptr, kFreedByte, bucketSize);
#endif
ASSERT(page->numAllocatedSlots);
PartitionFreelistEntry* freelistHead = page->freelistHead;
ASSERT(!freelistHead || partitionPointerIsValid(freelistHead));
RELEASE_ASSERT(ptr != freelistHead);
ASSERT(!freelistHead || ptr != partitionFreelistMask(freelistHead->next));
PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
entry->next = partitionFreelistMask(freelistHead);
page->freelistHead = entry;
--page->numAllocatedSlots;
if (UNLIKELY(page->numAllocatedSlots <= 0))
partitionFreeSlowPath(page);
}
ALWAYS_INLINE void partitionFree(void* ptr)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr);
#else
ptr = partitionCookieFreePointerAdjust(ptr);
ASSERT(partitionPointerIsValid(ptr));
PartitionPage* page = partitionPointerToPage(ptr);
partitionFreeWithPage(ptr, page);
#endif
}
ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(PartitionRootGeneric* root, size_t size)
{
size_t order = kBitsPerSizet - countLeadingZerosSizet(size);
size_t orderIndex = (size >> root->orderIndexShifts[order]) & (kGenericNumBucketsPerOrder - 1);
size_t subOrderIndex = size & root->orderSubIndexMasks[order];
PartitionBucket* bucket = root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + orderIndex + !!subOrderIndex];
ASSERT(!bucket->slotSize || bucket->slotSize >= size);
ASSERT(!(bucket->slotSize % kGenericSmallestBucket));
return bucket;
}
ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, int flags, size_t size)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* result = malloc(size);
RELEASE_ASSERT(result);
return result;
#else
ASSERT(root->initialized);
size = partitionCookieSizeAdjustAdd(size);
PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
spinLockLock(&root->lock);
void* ret = partitionBucketAlloc(root, flags, size, bucket);
spinLockUnlock(&root->lock);
return ret;
#endif
}
ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, size_t size)
{
return partitionAllocGenericFlags(root, 0, size);
}
ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr);
#else
ASSERT(root->initialized);
if (UNLIKELY(!ptr))
return;
ptr = partitionCookieFreePointerAdjust(ptr);
ASSERT(partitionPointerIsValid(ptr));
PartitionPage* page = partitionPointerToPage(ptr);
spinLockLock(&root->lock);
partitionFreeWithPage(ptr, page);
spinLockUnlock(&root->lock);
#endif
}
ALWAYS_INLINE bool partitionBucketIsDirectMapped(PartitionBucket* bucket)
{
return !bucket->numSystemPagesPerSlotSpan;
}
ALWAYS_INLINE size_t partitionDirectMapSize(size_t size)
{
ASSERT(size <= kGenericMaxDirectMapped);
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
}
ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, size_t size)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size;
#else
ASSERT(root->initialized);
size = partitionCookieSizeAdjustAdd(size);
PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
if (LIKELY(!partitionBucketIsDirectMapped(bucket))) {
size = bucket->slotSize;
} else if (size > kGenericMaxDirectMapped) {
} else {
ASSERT(bucket == &PartitionRootBase::gPagedBucket);
size = partitionDirectMapSize(size);
}
return partitionCookieSizeAdjustSubtract(size);
#endif
}
ALWAYS_INLINE bool partitionAllocSupportsGetSize()
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return false;
#else
return true;
#endif
}
ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr)
{
ASSERT(partitionAllocSupportsGetSize());
ptr = partitionCookieFreePointerAdjust(ptr);
ASSERT(partitionPointerIsValid(ptr));
PartitionPage* page = partitionPointerToPage(ptr);
size_t size = page->bucket->slotSize;
return partitionCookieSizeAdjustSubtract(size);
}
template <size_t N>
class SizeSpecificPartitionAllocator {
public:
static const size_t kMaxAllocation = N - kAllocationGranularity;
static const size_t kNumBuckets = N / kAllocationGranularity;
void init() { partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocation); }
bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); }
ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; }
private:
PartitionRoot m_partitionRoot;
PartitionBucket m_actualBuckets[kNumBuckets];
};
class PartitionAllocatorGeneric {
public:
void init() { partitionAllocGenericInit(&m_partitionRoot); }
bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); }
ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; }
private:
PartitionRootGeneric m_partitionRoot;
};
}
using WTF::SizeSpecificPartitionAllocator;
using WTF::PartitionAllocatorGeneric;
using WTF::PartitionRoot;
using WTF::partitionAllocInit;
using WTF::partitionAllocShutdown;
using WTF::partitionAlloc;
using WTF::partitionFree;
using WTF::partitionAllocGeneric;
using WTF::partitionFreeGeneric;
using WTF::partitionReallocGeneric;
using WTF::partitionAllocActualSize;
using WTF::partitionAllocSupportsGetSize;
using WTF::partitionAllocGetSize;
#endif