This source file includes following definitions.
- fallback_
- AllocInternal
- Initialize
#ifdef __linux
#include <config.h>
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <stddef.h>
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/statfs.h>
#include <unistd.h>
#include <new>
#include <string>
#include <gperftools/malloc_extension.h>
#include "base/basictypes.h"
#include "base/googleinit.h"
#include "base/sysinfo.h"
#include "internal_logging.h"
using tcmalloc::kLog;
using tcmalloc::kCrash;
using tcmalloc::Log;
using std::string;
DEFINE_string(memfs_malloc_path, EnvToString("TCMALLOC_MEMFS_MALLOC_PATH", ""),
"Path where hugetlbfs or tmpfs is mounted. The caller is "
"responsible for ensuring that the path is unique and does "
"not conflict with another process");
DEFINE_int64(memfs_malloc_limit_mb,
EnvToInt("TCMALLOC_MEMFS_LIMIT_MB", 0),
"Limit total allocation size to the "
"specified number of MiB. 0 == no limit.");
DEFINE_bool(memfs_malloc_abort_on_fail,
EnvToBool("TCMALLOC_MEMFS_ABORT_ON_FAIL", false),
"abort() whenever memfs_malloc fails to satisfy an allocation "
"for any reason.");
DEFINE_bool(memfs_malloc_ignore_mmap_fail,
EnvToBool("TCMALLOC_MEMFS_IGNORE_MMAP_FAIL", false),
"Ignore failures from mmap");
DEFINE_bool(memfs_malloc_map_private,
EnvToBool("TCMALLOC_MEMFS_MAP_PRIVATE", false),
"Use MAP_PRIVATE with mmap");
class HugetlbSysAllocator: public SysAllocator {
public:
explicit HugetlbSysAllocator(SysAllocator* fallback)
: failed_(true),
big_page_size_(0),
hugetlb_fd_(-1),
hugetlb_base_(0),
fallback_(fallback) {
}
void* Alloc(size_t size, size_t *actual_size, size_t alignment);
bool Initialize();
bool failed_;
private:
void* AllocInternal(size_t size, size_t *actual_size, size_t alignment);
int64 big_page_size_;
int hugetlb_fd_;
off_t hugetlb_base_;
SysAllocator* fallback_;
};
static char hugetlb_space[sizeof(HugetlbSysAllocator)];
void* HugetlbSysAllocator::Alloc(size_t size, size_t *actual_size,
size_t alignment) {
if (failed_) {
return fallback_->Alloc(size, actual_size, alignment);
}
if (actual_size == NULL && size < big_page_size_) {
return fallback_->Alloc(size, actual_size, alignment);
}
size_t new_alignment = alignment;
if (new_alignment < big_page_size_) new_alignment = big_page_size_;
size_t aligned_size = ((size + new_alignment - 1) /
new_alignment) * new_alignment;
if (aligned_size < size) {
return fallback_->Alloc(size, actual_size, alignment);
}
void* result = AllocInternal(aligned_size, actual_size, new_alignment);
if (result != NULL) {
return result;
}
Log(kLog, __FILE__, __LINE__,
"HugetlbSysAllocator: (failed, allocated)", failed_, hugetlb_base_);
if (FLAGS_memfs_malloc_abort_on_fail) {
Log(kCrash, __FILE__, __LINE__,
"memfs_malloc_abort_on_fail is set");
}
return fallback_->Alloc(size, actual_size, alignment);
}
void* HugetlbSysAllocator::AllocInternal(size_t size, size_t* actual_size,
size_t alignment) {
size_t extra = 0;
if (alignment > big_page_size_) {
extra = alignment - big_page_size_;
}
off_t limit = FLAGS_memfs_malloc_limit_mb*1024*1024;
if (limit > 0 && hugetlb_base_ + size + extra > limit) {
if (limit - hugetlb_base_ < big_page_size_) {
Log(kLog, __FILE__, __LINE__, "reached memfs_malloc_limit_mb");
failed_ = true;
}
else {
Log(kLog, __FILE__, __LINE__,
"alloc too large (size, bytes left)", size, limit-hugetlb_base_);
}
return NULL;
}
int ret = ftruncate(hugetlb_fd_, hugetlb_base_ + size + extra);
if (ret != 0 && errno != EINVAL) {
Log(kLog, __FILE__, __LINE__,
"ftruncate failed", strerror(errno));
failed_ = true;
return NULL;
}
void *result;
result = mmap(0, size + extra, PROT_WRITE|PROT_READ,
FLAGS_memfs_malloc_map_private ? MAP_PRIVATE : MAP_SHARED,
hugetlb_fd_, hugetlb_base_);
if (result == reinterpret_cast<void*>(MAP_FAILED)) {
if (!FLAGS_memfs_malloc_ignore_mmap_fail) {
Log(kLog, __FILE__, __LINE__,
"mmap failed (size, error)", size + extra, strerror(errno));
failed_ = true;
}
return NULL;
}
uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
size_t adjust = 0;
if ((ptr & (alignment - 1)) != 0) {
adjust = alignment - (ptr & (alignment - 1));
}
ptr += adjust;
hugetlb_base_ += (size + extra);
if (actual_size) {
*actual_size = size + extra - adjust;
}
return reinterpret_cast<void*>(ptr);
}
bool HugetlbSysAllocator::Initialize() {
char path[PATH_MAX];
const int pathlen = FLAGS_memfs_malloc_path.size();
if (pathlen + 8 > sizeof(path)) {
Log(kCrash, __FILE__, __LINE__, "XX fatal: memfs_malloc_path too long");
return false;
}
memcpy(path, FLAGS_memfs_malloc_path.data(), pathlen);
memcpy(path + pathlen, ".XXXXXX", 8);
int hugetlb_fd = mkstemp(path);
if (hugetlb_fd == -1) {
Log(kLog, __FILE__, __LINE__,
"warning: unable to create memfs_malloc_path",
path, strerror(errno));
return false;
}
if (unlink(path) == -1) {
Log(kCrash, __FILE__, __LINE__,
"fatal: error unlinking memfs_malloc_path", path, strerror(errno));
return false;
}
struct statfs sfs;
if (fstatfs(hugetlb_fd, &sfs) == -1) {
Log(kCrash, __FILE__, __LINE__,
"fatal: error fstatfs of memfs_malloc_path", strerror(errno));
return false;
}
int64 page_size = sfs.f_bsize;
hugetlb_fd_ = hugetlb_fd;
big_page_size_ = page_size;
failed_ = false;
return true;
}
REGISTER_MODULE_INITIALIZER(memfs_malloc, {
if (FLAGS_memfs_malloc_path.length()) {
SysAllocator* alloc = MallocExtension::instance()->GetSystemAllocator();
HugetlbSysAllocator* hp = new (hugetlb_space) HugetlbSysAllocator(alloc);
if (hp->Initialize()) {
MallocExtension::instance()->SetSystemAllocator(hp);
}
}
});
#endif