This source file includes following definitions.
- CheckIfKernelSupportsTLS
- CheckIfKernelSupportsTLS
- CheckIfKernelSupportsTLS
- Init
- Cleanup
- FetchFromCentralCache
- ListTooLong
- ReleaseToCentralCache
- Scavenge
- IncreaseCacheLimit
- IncreaseCacheLimitLocked
- GetSamplePeriod
- InitModule
- InitTSD
- CreateCacheIfNecessary
- NewHeap
- BecomeIdle
- DestroyThreadCache
- DeleteCache
- RecomputePerThreadCacheSize
- GetThreadStats
- set_overall_thread_cache_size
#include <config.h>
#include "thread_cache.h"
#include <errno.h>
#include <string.h>
#include <algorithm>
#include "base/commandlineflags.h"
#include "base/spinlock.h"
#include "central_freelist.h"
#include "maybe_threads.h"
using std::min;
using std::max;
DEFINE_int64(tcmalloc_max_total_thread_cache_bytes,
EnvToInt64("TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES",
kDefaultOverallThreadCacheSize),
"Bound on the total amount of bytes allocated to "
"thread caches. This bound is not strict, so it is possible "
"for the cache to go over this bound in certain circumstances. "
"Maximum value of this flag is capped to 1 GB.");
namespace tcmalloc {
static bool phinited = false;
volatile size_t ThreadCache::per_thread_cache_size_ = kMaxThreadCacheSize;
size_t ThreadCache::overall_thread_cache_size_ = kDefaultOverallThreadCacheSize;
ssize_t ThreadCache::unclaimed_cache_space_ = kDefaultOverallThreadCacheSize;
PageHeapAllocator<ThreadCache> threadcache_allocator;
ThreadCache* ThreadCache::thread_heaps_ = NULL;
int ThreadCache::thread_heap_count_ = 0;
ThreadCache* ThreadCache::next_memory_steal_ = NULL;
#ifdef HAVE_TLS
__thread ThreadCache* ThreadCache::threadlocal_heap_
# ifdef HAVE___ATTRIBUTE__
__attribute__ ((tls_model ("initial-exec")))
# endif
;
#endif
bool ThreadCache::tsd_inited_ = false;
pthread_key_t ThreadCache::heap_key_;
#if defined(HAVE_TLS)
bool kernel_supports_tls = false;
# if defined(_WIN32)
void CheckIfKernelSupportsTLS() {
kernel_supports_tls = true;
}
# elif !HAVE_DECL_UNAME
void CheckIfKernelSupportsTLS() {
kernel_supports_tls = false;
}
# else
# include <sys/utsname.h>
void CheckIfKernelSupportsTLS() {
struct utsname buf;
if (uname(&buf) < 0) {
Log(kLog, __FILE__, __LINE__,
"uname failed assuming no TLS support (errno)", errno);
kernel_supports_tls = false;
} else if (strcasecmp(buf.sysname, "linux") == 0) {
if (buf.release[0] < '2' && buf.release[1] == '.')
kernel_supports_tls = false;
else if (buf.release[0] == '2' && buf.release[1] == '.' &&
buf.release[2] >= '0' && buf.release[2] < '6' &&
buf.release[3] == '.')
kernel_supports_tls = false;
else
kernel_supports_tls = true;
} else if (strcasecmp(buf.sysname, "CYGWIN_NT-6.1-WOW64") == 0) {
kernel_supports_tls = false;
} else {
kernel_supports_tls = true;
}
}
# endif
#endif
void ThreadCache::Init(pthread_t tid) {
size_ = 0;
max_size_ = 0;
IncreaseCacheLimitLocked();
if (max_size_ == 0) {
max_size_ = kMinThreadCacheSize;
unclaimed_cache_space_ -= kMinThreadCacheSize;
ASSERT(unclaimed_cache_space_ < 0);
}
next_ = NULL;
prev_ = NULL;
tid_ = tid;
in_setspecific_ = false;
for (size_t cl = 0; cl < kNumClasses; ++cl) {
list_[cl].Init();
}
uint32_t sampler_seed;
memcpy(&sampler_seed, &tid, sizeof(sampler_seed));
sampler_.Init(sampler_seed);
}
void ThreadCache::Cleanup() {
for (int cl = 0; cl < kNumClasses; ++cl) {
if (list_[cl].length() > 0) {
ReleaseToCentralCache(&list_[cl], cl, list_[cl].length());
}
}
}
void* ThreadCache::FetchFromCentralCache(size_t cl, size_t byte_size) {
FreeList* list = &list_[cl];
ASSERT(list->empty());
const int batch_size = Static::sizemap()->num_objects_to_move(cl);
const int num_to_move = min<int>(list->max_length(), batch_size);
void *start, *end;
int fetch_count = Static::central_cache()[cl].RemoveRange(
&start, &end, num_to_move);
ASSERT((start == NULL) == (fetch_count == 0));
if (--fetch_count >= 0) {
size_ += byte_size * fetch_count;
list->PushRange(fetch_count, SLL_Next(start), end);
}
if (list->max_length() < batch_size) {
list->set_max_length(list->max_length() + 1);
} else {
int new_length = min<int>(list->max_length() + batch_size,
kMaxDynamicFreeListLength);
new_length -= new_length % batch_size;
ASSERT(new_length % batch_size == 0);
list->set_max_length(new_length);
}
return start;
}
void ThreadCache::ListTooLong(FreeList* list, size_t cl) {
const int batch_size = Static::sizemap()->num_objects_to_move(cl);
ReleaseToCentralCache(list, cl, batch_size);
if (list->max_length() < batch_size) {
list->set_max_length(list->max_length() + 1);
} else if (list->max_length() > batch_size) {
list->set_length_overages(list->length_overages() + 1);
if (list->length_overages() > kMaxOverages) {
ASSERT(list->max_length() > batch_size);
list->set_max_length(list->max_length() - batch_size);
list->set_length_overages(0);
}
}
}
void ThreadCache::ReleaseToCentralCache(FreeList* src, size_t cl, int N) {
ASSERT(src == &list_[cl]);
if (N > src->length()) N = src->length();
size_t delta_bytes = N * Static::sizemap()->ByteSizeForClass(cl);
int batch_size = Static::sizemap()->num_objects_to_move(cl);
while (N > batch_size) {
void *tail, *head;
src->PopRange(batch_size, &head, &tail);
Static::central_cache()[cl].InsertRange(head, tail, batch_size);
N -= batch_size;
}
void *tail, *head;
src->PopRange(N, &head, &tail);
Static::central_cache()[cl].InsertRange(head, tail, N);
size_ -= delta_bytes;
}
void ThreadCache::Scavenge() {
for (int cl = 0; cl < kNumClasses; cl++) {
FreeList* list = &list_[cl];
const int lowmark = list->lowwatermark();
if (lowmark > 0) {
const int drop = (lowmark > 1) ? lowmark/2 : 1;
ReleaseToCentralCache(list, cl, drop);
const int batch_size = Static::sizemap()->num_objects_to_move(cl);
if (list->max_length() > batch_size) {
list->set_max_length(
max<int>(list->max_length() - batch_size, batch_size));
}
}
list->clear_lowwatermark();
}
IncreaseCacheLimit();
}
void ThreadCache::IncreaseCacheLimit() {
SpinLockHolder h(Static::pageheap_lock());
IncreaseCacheLimitLocked();
}
void ThreadCache::IncreaseCacheLimitLocked() {
if (unclaimed_cache_space_ > 0) {
unclaimed_cache_space_ -= kStealAmount;
max_size_ += kStealAmount;
return;
}
for (int i = 0; i < 10;
++i, next_memory_steal_ = next_memory_steal_->next_) {
if (next_memory_steal_ == NULL) {
ASSERT(thread_heaps_ != NULL);
next_memory_steal_ = thread_heaps_;
}
if (next_memory_steal_ == this ||
next_memory_steal_->max_size_ <= kMinThreadCacheSize) {
continue;
}
next_memory_steal_->max_size_ -= kStealAmount;
max_size_ += kStealAmount;
next_memory_steal_ = next_memory_steal_->next_;
return;
}
}
int ThreadCache::GetSamplePeriod() {
return sampler_.GetSamplePeriod();
}
void ThreadCache::InitModule() {
SpinLockHolder h(Static::pageheap_lock());
if (!phinited) {
Static::InitStaticVars();
threadcache_allocator.Init();
phinited = 1;
}
}
void ThreadCache::InitTSD() {
ASSERT(!tsd_inited_);
perftools_pthread_key_create(&heap_key_, DestroyThreadCache);
tsd_inited_ = true;
#ifdef PTHREADS_CRASHES_IF_RUN_TOO_EARLY
pthread_t zero;
memset(&zero, 0, sizeof(zero));
SpinLockHolder h(Static::pageheap_lock());
for (ThreadCache* h = thread_heaps_; h != NULL; h = h->next_) {
if (h->tid_ == zero) {
h->tid_ = pthread_self();
}
}
#endif
}
ThreadCache* ThreadCache::CreateCacheIfNecessary() {
ThreadCache* heap = NULL;
{
SpinLockHolder h(Static::pageheap_lock());
#ifdef PTHREADS_CRASHES_IF_RUN_TOO_EARLY
pthread_t me;
if (!tsd_inited_) {
memset(&me, 0, sizeof(me));
} else {
me = pthread_self();
}
#else
const pthread_t me = pthread_self();
#endif
for (ThreadCache* h = thread_heaps_; h != NULL; h = h->next_) {
if (h->tid_ == me) {
heap = h;
break;
}
}
if (heap == NULL) heap = NewHeap(me);
}
if (!heap->in_setspecific_ && tsd_inited_) {
heap->in_setspecific_ = true;
perftools_pthread_setspecific(heap_key_, heap);
#ifdef HAVE_TLS
threadlocal_heap_ = heap;
#endif
heap->in_setspecific_ = false;
}
return heap;
}
ThreadCache* ThreadCache::NewHeap(pthread_t tid) {
ThreadCache *heap = threadcache_allocator.New();
heap->Init(tid);
heap->next_ = thread_heaps_;
heap->prev_ = NULL;
if (thread_heaps_ != NULL) {
thread_heaps_->prev_ = heap;
} else {
ASSERT(next_memory_steal_ == NULL);
next_memory_steal_ = heap;
}
thread_heaps_ = heap;
thread_heap_count_++;
return heap;
}
void ThreadCache::BecomeIdle() {
if (!tsd_inited_) return;
ThreadCache* heap = GetThreadHeap();
if (heap == NULL) return;
if (heap->in_setspecific_) return;
heap->in_setspecific_ = true;
perftools_pthread_setspecific(heap_key_, NULL);
#ifdef HAVE_TLS
threadlocal_heap_ = NULL;
#endif
heap->in_setspecific_ = false;
if (GetThreadHeap() == heap) {
return;
}
DeleteCache(heap);
}
void ThreadCache::DestroyThreadCache(void* ptr) {
if (ptr == NULL) return;
#ifdef HAVE_TLS
threadlocal_heap_ = NULL;
#endif
DeleteCache(reinterpret_cast<ThreadCache*>(ptr));
}
void ThreadCache::DeleteCache(ThreadCache* heap) {
heap->Cleanup();
SpinLockHolder h(Static::pageheap_lock());
if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
if (thread_heaps_ == heap) thread_heaps_ = heap->next_;
thread_heap_count_--;
if (next_memory_steal_ == heap) next_memory_steal_ = heap->next_;
if (next_memory_steal_ == NULL) next_memory_steal_ = thread_heaps_;
unclaimed_cache_space_ += heap->max_size_;
threadcache_allocator.Delete(heap);
}
void ThreadCache::RecomputePerThreadCacheSize() {
int n = thread_heap_count_ > 0 ? thread_heap_count_ : 1;
size_t space = overall_thread_cache_size_ / n;
if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
double ratio = space / max<double>(1, per_thread_cache_size_);
size_t claimed = 0;
for (ThreadCache* h = thread_heaps_; h != NULL; h = h->next_) {
if (ratio < 1.0) {
h->max_size_ = static_cast<size_t>(h->max_size_ * ratio);
}
claimed += h->max_size_;
}
unclaimed_cache_space_ = overall_thread_cache_size_ - claimed;
per_thread_cache_size_ = space;
}
void ThreadCache::GetThreadStats(uint64_t* total_bytes, uint64_t* class_count) {
for (ThreadCache* h = thread_heaps_; h != NULL; h = h->next_) {
*total_bytes += h->Size();
if (class_count) {
for (int cl = 0; cl < kNumClasses; ++cl) {
class_count[cl] += h->freelist_length(cl);
}
}
}
}
void ThreadCache::set_overall_thread_cache_size(size_t new_size) {
if (new_size < kMinThreadCacheSize) new_size = kMinThreadCacheSize;
if (new_size > (1<<30)) new_size = (1<<30);
overall_thread_cache_size_ = new_size;
RecomputePerThreadCacheSize();
}
}