This source file includes following definitions.
- current_thread_is
- Init
- Shutdown
- Lock
- Unlock
- LockIsHeld
- DoFindRegionLocked
- FindRegion
- FindAndMarkStackRegion
- BeginRegionLocked
- EndRegionLocked
- DoInsertRegionLocked
- HandleSavedRegionsLocked
- InsertRegionLocked
- RecordRegionAddition
- RecordRegionRemoval
- MmapHook
- MunmapHook
- MremapHook
- SbrkHook
- LogAllLocked
#include <config.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
#ifdef HAVE_MMAP
#include <sys/mman.h>
#elif !defined(MAP_FAILED)
#define MAP_FAILED -1
#endif
#ifdef HAVE_PTHREAD
#include <pthread.h>
#endif
#include <stddef.h>
#include <algorithm>
#include <set>
#include "memory_region_map.h"
#include "base/logging.h"
#include "base/low_level_alloc.h"
#include "malloc_hook-inl.h"
#include <gperftools/stacktrace.h>
#include <gperftools/malloc_hook.h>
#ifndef MREMAP_FIXED
# define MREMAP_FIXED 0
#endif
using std::max;
int MemoryRegionMap::client_count_ = 0;
int MemoryRegionMap::max_stack_depth_ = 0;
MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL;
LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL;
SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED);
SpinLock MemoryRegionMap::owner_lock_(
SpinLock::LINKER_INITIALIZED);
int MemoryRegionMap::recursion_count_ = 0;
pthread_t MemoryRegionMap::lock_owner_tid_;
int64 MemoryRegionMap::map_size_ = 0;
int64 MemoryRegionMap::unmap_size_ = 0;
static bool libpthread_initialized = false;
static bool initializer = (libpthread_initialized = true, true);
static inline bool current_thread_is(pthread_t should_be) {
if (!libpthread_initialized) return true;
return pthread_equal(pthread_self(), should_be);
}
union MemoryRegionMap::RegionSetRep {
char rep[sizeof(RegionSet)];
void* align_it;
RegionSet* region_set() { return reinterpret_cast<RegionSet*>(rep); }
};
static MemoryRegionMap::RegionSetRep regions_rep;
static bool recursive_insert = false;
void MemoryRegionMap::Init(int max_stack_depth) {
RAW_VLOG(10, "MemoryRegionMap Init");
RAW_CHECK(max_stack_depth >= 0, "");
RAW_CHECK(max_stack_depth <= kMaxStackDepth,
"need to increase kMaxStackDepth?");
Lock();
client_count_ += 1;
max_stack_depth_ = max(max_stack_depth_, max_stack_depth);
if (client_count_ > 1) {
Unlock();
RAW_VLOG(10, "MemoryRegionMap Init increment done");
return;
}
RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
recursive_insert = true;
arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
recursive_insert = false;
HandleSavedRegionsLocked(&InsertRegionLocked);
Unlock();
RAW_VLOG(10, "MemoryRegionMap Init done");
}
bool MemoryRegionMap::Shutdown() {
RAW_VLOG(10, "MemoryRegionMap Shutdown");
Lock();
RAW_CHECK(client_count_ > 0, "");
client_count_ -= 1;
if (client_count_ != 0) {
Unlock();
RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done");
return true;
}
RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
if (regions_) regions_->~RegionSet();
regions_ = NULL;
bool deleted_arena = LowLevelAlloc::DeleteArena(arena_);
if (deleted_arena) {
arena_ = 0;
} else {
RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used");
}
Unlock();
RAW_VLOG(10, "MemoryRegionMap Shutdown done");
return deleted_arena;
}
void MemoryRegionMap::Lock() {
{
SpinLockHolder l(&owner_lock_);
if (recursion_count_ > 0 && current_thread_is(lock_owner_tid_)) {
RAW_CHECK(lock_.IsHeld(), "Invariants violated");
recursion_count_++;
RAW_CHECK(recursion_count_ <= 5,
"recursive lock nesting unexpectedly deep");
return;
}
}
lock_.Lock();
{
SpinLockHolder l(&owner_lock_);
RAW_CHECK(recursion_count_ == 0,
"Last Unlock didn't reset recursion_count_");
if (libpthread_initialized)
lock_owner_tid_ = pthread_self();
recursion_count_ = 1;
}
}
void MemoryRegionMap::Unlock() {
SpinLockHolder l(&owner_lock_);
RAW_CHECK(recursion_count_ > 0, "unlock when not held");
RAW_CHECK(lock_.IsHeld(),
"unlock when not held, and recursion_count_ is wrong");
RAW_CHECK(current_thread_is(lock_owner_tid_), "unlock by non-holder");
recursion_count_--;
if (recursion_count_ == 0) {
lock_.Unlock();
}
}
bool MemoryRegionMap::LockIsHeld() {
SpinLockHolder l(&owner_lock_);
return lock_.IsHeld() && current_thread_is(lock_owner_tid_);
}
const MemoryRegionMap::Region*
MemoryRegionMap::DoFindRegionLocked(uintptr_t addr) {
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
if (regions_ != NULL) {
Region sample;
sample.SetRegionSetKey(addr);
RegionSet::iterator region = regions_->lower_bound(sample);
if (region != regions_->end()) {
RAW_CHECK(addr <= region->end_addr, "");
if (region->start_addr <= addr && addr < region->end_addr) {
return &(*region);
}
}
}
return NULL;
}
bool MemoryRegionMap::FindRegion(uintptr_t addr, Region* result) {
Lock();
const Region* region = DoFindRegionLocked(addr);
if (region != NULL) *result = *region;
Unlock();
return region != NULL;
}
bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top,
Region* result) {
Lock();
const Region* region = DoFindRegionLocked(stack_top);
if (region != NULL) {
RAW_VLOG(10, "Stack at %p is inside region %p..%p",
reinterpret_cast<void*>(stack_top),
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
const_cast<Region*>(region)->set_is_stack();
*result = *region;
}
Unlock();
return region != NULL;
}
MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() {
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
RAW_CHECK(regions_ != NULL, "");
return regions_->begin();
}
MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() {
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
RAW_CHECK(regions_ != NULL, "");
return regions_->end();
}
inline void MemoryRegionMap::DoInsertRegionLocked(const Region& region) {
RAW_VLOG(12, "Inserting region %p..%p from %p",
reinterpret_cast<void*>(region.start_addr),
reinterpret_cast<void*>(region.end_addr),
reinterpret_cast<void*>(region.caller()));
RegionSet::const_iterator i = regions_->lower_bound(region);
if (i != regions_->end() && i->start_addr <= region.start_addr) {
RAW_DCHECK(region.end_addr <= i->end_addr, "");
return;
}
if (DEBUG_MODE) {
RAW_CHECK(i == regions_->end() || !region.Overlaps(*i),
"Wow, overlapping memory regions");
Region sample;
sample.SetRegionSetKey(region.start_addr);
i = regions_->lower_bound(sample);
RAW_CHECK(i == regions_->end() || !region.Overlaps(*i),
"Wow, overlapping memory regions");
}
region.AssertIsConsistent();
regions_->insert(region);
RAW_VLOG(12, "Inserted region %p..%p :",
reinterpret_cast<void*>(region.start_addr),
reinterpret_cast<void*>(region.end_addr));
if (VLOG_IS_ON(12)) LogAllLocked();
}
static int saved_regions_count = 0;
static MemoryRegionMap::Region saved_regions[20];
inline void MemoryRegionMap::HandleSavedRegionsLocked(
void (*insert_func)(const Region& region)) {
while (saved_regions_count > 0) {
Region r = saved_regions[--saved_regions_count];
(*insert_func)(r);
}
}
inline void MemoryRegionMap::InsertRegionLocked(const Region& region) {
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
if (recursive_insert) {
RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p",
reinterpret_cast<void*>(region.start_addr),
reinterpret_cast<void*>(region.end_addr),
reinterpret_cast<void*>(region.caller()));
RAW_CHECK(saved_regions_count < arraysize(saved_regions), "");
saved_regions[saved_regions_count++] = region;
} else {
if (regions_ == NULL) {
RAW_VLOG(12, "Initializing region set");
regions_ = regions_rep.region_set();
recursive_insert = true;
new(regions_) RegionSet();
HandleSavedRegionsLocked(&DoInsertRegionLocked);
recursive_insert = false;
}
recursive_insert = true;
DoInsertRegionLocked(region);
HandleSavedRegionsLocked(&DoInsertRegionLocked);
recursive_insert = false;
}
}
#ifdef NDEBUG
static const int kStripFrames = 1;
#else
static const int kStripFrames = 3;
#endif
void MemoryRegionMap::RecordRegionAddition(const void* start, size_t size) {
Region region;
region.Create(start, size);
const int depth =
max_stack_depth_ > 0
? MallocHook::GetCallerStackTrace(const_cast<void**>(region.call_stack),
max_stack_depth_, kStripFrames + 1)
: 0;
region.set_call_stack_depth(depth);
RAW_VLOG(10, "New global region %p..%p from %p",
reinterpret_cast<void*>(region.start_addr),
reinterpret_cast<void*>(region.end_addr),
reinterpret_cast<void*>(region.caller()));
Lock();
map_size_ += size;
InsertRegionLocked(region);
Unlock();
}
void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
Lock();
if (recursive_insert) {
uintptr_t start_addr = reinterpret_cast<uintptr_t>(start);
uintptr_t end_addr = start_addr + size;
int put_pos = 0;
int old_count = saved_regions_count;
for (int i = 0; i < old_count; ++i, ++put_pos) {
Region& r = saved_regions[i];
if (r.start_addr == start_addr && r.end_addr == end_addr) {
--saved_regions_count;
--put_pos;
RAW_VLOG(10, ("Insta-Removing saved region %p..%p; "
"now have %d saved regions"),
reinterpret_cast<void*>(start_addr),
reinterpret_cast<void*>(end_addr),
saved_regions_count);
} else {
if (put_pos < i) {
saved_regions[put_pos] = saved_regions[i];
}
}
}
}
if (regions_ == NULL) {
Unlock();
return;
}
if (!recursive_insert) {
HandleSavedRegionsLocked(&InsertRegionLocked);
}
uintptr_t start_addr = reinterpret_cast<uintptr_t>(start);
uintptr_t end_addr = start_addr + size;
RAW_VLOG(10, "Removing global region %p..%p; have %"PRIuS" regions",
reinterpret_cast<void*>(start_addr),
reinterpret_cast<void*>(end_addr),
regions_->size());
Region sample;
sample.SetRegionSetKey(start_addr);
for (RegionSet::iterator region = regions_->lower_bound(sample);
region != regions_->end() && region->start_addr < end_addr;
) {
RAW_VLOG(13, "Looking at region %p..%p",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
if (start_addr <= region->start_addr &&
region->end_addr <= end_addr) {
RAW_VLOG(12, "Deleting region %p..%p",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
RegionSet::iterator d = region;
++region;
regions_->erase(d);
continue;
} else if (region->start_addr < start_addr &&
end_addr < region->end_addr) {
RAW_VLOG(12, "Splitting region %p..%p in two",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
Region r = *region;
r.set_end_addr(start_addr);
InsertRegionLocked(r);
const_cast<Region&>(*region).set_start_addr(end_addr);
} else if (end_addr > region->start_addr &&
start_addr <= region->start_addr) {
RAW_VLOG(12, "Start-chopping region %p..%p",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
const_cast<Region&>(*region).set_start_addr(end_addr);
} else if (start_addr > region->start_addr &&
start_addr < region->end_addr) {
RAW_VLOG(12, "End-chopping region %p..%p",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
Region r = *region;
r.set_end_addr(start_addr);
RegionSet::iterator d = region;
++region;
regions_->erase(d);
InsertRegionLocked(r);
continue;
}
++region;
}
RAW_VLOG(12, "Removed region %p..%p; have %"PRIuS" regions",
reinterpret_cast<void*>(start_addr),
reinterpret_cast<void*>(end_addr),
regions_->size());
if (VLOG_IS_ON(12)) LogAllLocked();
unmap_size_ += size;
Unlock();
}
void MemoryRegionMap::MmapHook(const void* result,
const void* start, size_t size,
int prot, int flags,
int fd, off_t offset) {
RAW_VLOG(10, "MMap = 0x%"PRIxPTR" of %"PRIuS" at %"PRIu64" "
"prot %d flags %d fd %d offs %"PRId64,
reinterpret_cast<uintptr_t>(result), size,
reinterpret_cast<uint64>(start), prot, flags, fd,
static_cast<int64>(offset));
if (result != reinterpret_cast<void*>(MAP_FAILED) && size != 0) {
RecordRegionAddition(result, size);
}
}
void MemoryRegionMap::MunmapHook(const void* ptr, size_t size) {
RAW_VLOG(10, "MUnmap of %p %"PRIuS"", ptr, size);
if (size != 0) {
RecordRegionRemoval(ptr, size);
}
}
void MemoryRegionMap::MremapHook(const void* result,
const void* old_addr, size_t old_size,
size_t new_size, int flags,
const void* new_addr) {
RAW_VLOG(10, "MRemap = 0x%"PRIxPTR" of 0x%"PRIxPTR" %"PRIuS" "
"to %"PRIuS" flags %d new_addr=0x%"PRIxPTR,
(uintptr_t)result, (uintptr_t)old_addr,
old_size, new_size, flags,
flags & MREMAP_FIXED ? (uintptr_t)new_addr : 0);
if (result != reinterpret_cast<void*>(-1)) {
RecordRegionRemoval(old_addr, old_size);
RecordRegionAddition(result, new_size);
}
}
extern "C" void* __sbrk(ptrdiff_t increment);
void MemoryRegionMap::SbrkHook(const void* result, ptrdiff_t increment) {
RAW_VLOG(10, "Sbrk = 0x%"PRIxPTR" of %"PRIdS"", (uintptr_t)result, increment);
if (result != reinterpret_cast<void*>(-1)) {
if (increment > 0) {
void* new_end = sbrk(0);
RecordRegionAddition(result, reinterpret_cast<uintptr_t>(new_end) -
reinterpret_cast<uintptr_t>(result));
} else if (increment < 0) {
void* new_end = sbrk(0);
RecordRegionRemoval(new_end, reinterpret_cast<uintptr_t>(result) -
reinterpret_cast<uintptr_t>(new_end));
}
}
}
void MemoryRegionMap::LogAllLocked() {
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
RAW_LOG(INFO, "List of regions:");
uintptr_t previous = 0;
for (RegionSet::const_iterator r = regions_->begin();
r != regions_->end(); ++r) {
RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" "
"from 0x%"PRIxPTR" stack=%d",
r->start_addr, r->end_addr, r->caller(), r->is_stack);
RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order");
previous = r->end_addr;
}
RAW_LOG(INFO, "End of regions list");
}