This source file includes following definitions.
- IsDebuggerAttached
- ProfilingIsEnabledForAllThreads
- Init
- Shutdown
- alloc_count
- Allocate
- Free
- Free
- DeleteAndNull
- DeleteAndNullIfNot
- get_thread_disable_counter
- set_thread_disable_counter
- get_thread_disable_counter
- set_thread_disable_counter
- AsPtr
- AsInt
- hc_strstr
- NewHook
- DeleteHook
- GetStackDirection
- RegisterStackLocked
- MakeIgnoredObjectsLiveCallbackLocked
- MakeDisabledLiveCallbackLocked
- RecordGlobalDataLocked
- IsLibraryNamed
- DisableLibraryAllocsLocked
- UseProcMapsLocked
- IgnoreLiveThreadsLocked
- IgnoreNonThreadLiveObjectsLocked
- IsOneThread
- IgnoreAllLiveObjectsLocked
- IgnoreLiveObjectsLocked
- DisableChecksIn
- DoIgnoreObject
- UnIgnoreObject
- MakeProfileNameLocked
- Create
- BytesLeaked
- ObjectsLeaked
- invocation_name
- invocation_path
- invocation_name
- invocation_path
- SuggestPprofCommand
- DoNoLeaks
- IsActive
- RunHeapCleanups
- HeapLeakChecker_RunHeapCleanups
- HeapLeakChecker_InternalInitStart
- NoGlobalLeaksMaybeSymbolize
- DoMainHeapCheck
- GlobalChecker
- NoGlobalLeaks
- CancelGlobalCheck
- BeforeConstructorsLocked
- TurnItselfOffLocked
- HeapLeakChecker_BeforeConstructors
- MallocHook_InitAtFirstAllocation_HeapLeakChecker
- HeapLeakChecker_AfterDestructors
- DisableChecksFromToLocked
- HaveOnHeapLocked
- GetAllocCaller
#include "config.h"
#include <fcntl.h>
#include <string.h>
#include <errno.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_MMAP
#include <sys/mman.h>
#endif
#ifdef HAVE_PTHREAD
#include <pthread.h>
#endif
#include <sys/stat.h>
#include <sys/types.h>
#include <time.h>
#include <assert.h>
#if defined(HAVE_LINUX_PTRACE_H)
#include <linux/ptrace.h>
#endif
#ifdef HAVE_SYS_SYSCALL_H
#include <sys/syscall.h>
#endif
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__)
#include <wtypes.h>
#include <winbase.h>
#undef ERROR
#undef max
#undef min
#endif
#include <string>
#include <vector>
#include <map>
#include <set>
#include <algorithm>
#include <functional>
#include <gperftools/heap-checker.h>
#include "base/basictypes.h"
#include "base/googleinit.h"
#include "base/logging.h"
#include <gperftools/stacktrace.h>
#include "base/commandlineflags.h"
#include "base/elfcore.h"
#include "base/thread_lister.h"
#include "heap-profile-table.h"
#include "base/low_level_alloc.h"
#include "malloc_hook-inl.h"
#include <gperftools/malloc_hook.h>
#include <gperftools/malloc_extension.h>
#include "maybe_threads.h"
#include "memory_region_map.h"
#include "base/spinlock.h"
#include "base/sysinfo.h"
#include "base/stl_allocator.h"
using std::string;
using std::basic_string;
using std::pair;
using std::map;
using std::set;
using std::vector;
using std::swap;
using std::make_pair;
using std::min;
using std::max;
using std::less;
using std::char_traits;
static bool IsDebuggerAttached(void) {
char buf[256];
int fd = open("/proc/self/status", O_RDONLY);
if (fd == -1) {
return false;
}
const int len = read(fd, buf, sizeof(buf));
bool rc = false;
if (len > 0) {
const char *const kTracerPid = "TracerPid:\t";
buf[len - 1] = '\0';
const char *p = strstr(buf, kTracerPid);
if (p != NULL) {
rc = (strncmp(p + strlen(kTracerPid), "0\n", 2) != 0);
}
}
close(fd);
return rc;
}
extern "C" {
ATTRIBUTE_WEAK PERFTOOLS_DLL_DECL bool ProfilingIsEnabledForAllThreads();
bool ProfilingIsEnabledForAllThreads() { return false; }
}
DEFINE_string(heap_check,
EnvToString("HEAPCHECK", ""),
"The heap leak checking to be done over the whole executable: "
"\"minimal\", \"normal\", \"strict\", "
"\"draconian\", \"as-is\", and \"local\" "
" or the empty string are the supported choices. "
"(See HeapLeakChecker_InternalInitStart for details.)");
DEFINE_bool(heap_check_report, true, "Obsolete");
DEFINE_bool(heap_check_before_constructors,
true,
"deprecated; pretty much always true now");
DEFINE_bool(heap_check_after_destructors,
EnvToBool("HEAP_CHECK_AFTER_DESTRUCTORS", false),
"If overall heap check is to end after global destructors "
"or right after all REGISTER_HEAPCHECK_CLEANUP's");
DEFINE_bool(heap_check_strict_check, true, "Obsolete");
DEFINE_bool(heap_check_ignore_global_live,
EnvToBool("HEAP_CHECK_IGNORE_GLOBAL_LIVE", true),
"If overall heap check is to ignore heap objects reachable "
"from the global data");
DEFINE_bool(heap_check_identify_leaks,
EnvToBool("HEAP_CHECK_IDENTIFY_LEAKS", false),
"If heap check should generate the addresses of the leaked "
"objects in the memory leak profiles. This may be useful "
"in tracking down leaks where only a small fraction of "
"objects allocated at the same stack trace are leaked.");
DEFINE_bool(heap_check_ignore_thread_live,
EnvToBool("HEAP_CHECK_IGNORE_THREAD_LIVE", true),
"If set to true, objects reachable from thread stacks "
"and registers are not reported as leaks");
DEFINE_bool(heap_check_test_pointer_alignment,
EnvToBool("HEAP_CHECK_TEST_POINTER_ALIGNMENT", false),
"Set to true to check if the found leak can be due to "
"use of unaligned pointers");
static const size_t kPointerSourceAlignment = sizeof(void*);
DEFINE_int32(heap_check_pointer_source_alignment,
EnvToInt("HEAP_CHECK_POINTER_SOURCE_ALIGNMENT",
kPointerSourceAlignment),
"Alignment at which all pointers in memory are supposed to be "
"located. Use 1 if any alignment is ok.");
static const int64 kHeapCheckMaxPointerOffset = 1024;
DEFINE_int64(heap_check_max_pointer_offset,
EnvToInt("HEAP_CHECK_MAX_POINTER_OFFSET",
kHeapCheckMaxPointerOffset),
"Largest pointer offset for which we traverse "
"pointers going inside of heap allocated objects. "
"Set to -1 to use the actual largest heap object size.");
DEFINE_bool(heap_check_run_under_gdb,
EnvToBool("HEAP_CHECK_RUN_UNDER_GDB", false),
"If false, turns off heap-checking library when running under gdb "
"(normally, set to 'true' only when debugging the heap-checker)");
DEFINE_int32(heap_check_delay_seconds, 0,
"Number of seconds to delay on-exit heap checking."
" If you set this flag,"
" you may also want to set exit_timeout_seconds in order to"
" avoid exit timeouts.\n"
"NOTE: This flag is to be used only to help diagnose issues"
" where it is suspected that the heap checker is reporting"
" false leaks that will disappear if the heap checker delays"
" its checks. Report any such issues to the heap-checker"
" maintainer(s).");
DEFINE_int32(heap_check_error_exit_code,
EnvToInt("HEAP_CHECK_ERROR_EXIT_CODE", 1),
"Exit code to return if any leaks were detected.");
DEFINE_string(heap_profile_pprof,
EnvToString("PPROF_PATH", "pprof"),
"OBSOLETE; not used");
DEFINE_string(heap_check_dump_directory,
EnvToString("HEAP_CHECK_DUMP_DIRECTORY", "/tmp"),
"Directory to put heap-checker leak dump information");
static SpinLock heap_checker_lock(SpinLock::LINKER_INITIALIZED);
static const string* profile_name_prefix = NULL;
static HeapLeakChecker* main_heap_checker = NULL;
static bool do_main_heap_check = false;
static HeapProfileTable* heap_profile = NULL;
static bool heap_checker_on = false;
static pid_t heap_checker_pid = 0;
static bool constructor_heap_profiling = false;
static const int heap_checker_info_level = 0;
class HeapLeakChecker::Allocator {
public:
static void Init() {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_DCHECK(arena_ == NULL, "");
arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
}
static void Shutdown() {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
if (!LowLevelAlloc::DeleteArena(arena_) || alloc_count_ != 0) {
RAW_LOG(FATAL, "Internal heap checker leak of %d objects", alloc_count_);
}
}
static int alloc_count() {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
return alloc_count_;
}
static void* Allocate(size_t n) {
RAW_DCHECK(arena_ && heap_checker_lock.IsHeld(), "");
void* p = LowLevelAlloc::AllocWithArena(n, arena_);
if (p) alloc_count_ += 1;
return p;
}
static void Free(void* p) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
if (p) alloc_count_ -= 1;
LowLevelAlloc::Free(p);
}
static void Free(void* p, size_t ) {
Free(p);
}
template<typename T> static void DeleteAndNull(T** p) {
(*p)->~T();
Free(*p);
*p = NULL;
}
template<typename T> static void DeleteAndNullIfNot(T** p) {
if (*p != NULL) DeleteAndNull(p);
}
private:
static LowLevelAlloc::Arena* arena_;
static int alloc_count_;
};
LowLevelAlloc::Arena* HeapLeakChecker::Allocator::arena_ = NULL;
int HeapLeakChecker::Allocator::alloc_count_ = 0;
enum ObjectPlacement {
MUST_BE_ON_HEAP,
IGNORED_ON_HEAP,
MAYBE_LIVE,
IN_GLOBAL_DATA,
THREAD_DATA,
THREAD_REGISTERS,
};
struct AllocObject {
const void* ptr;
uintptr_t size;
ObjectPlacement place;
AllocObject(const void* p, size_t s, ObjectPlacement l)
: ptr(p), size(s), place(l) { }
};
typedef map<uintptr_t, size_t, less<uintptr_t>,
STL_Allocator<pair<const uintptr_t, size_t>,
HeapLeakChecker::Allocator>
> IgnoredObjectsMap;
static IgnoredObjectsMap* ignored_objects = NULL;
typedef vector<AllocObject,
STL_Allocator<AllocObject, HeapLeakChecker::Allocator>
> LiveObjectsStack;
static LiveObjectsStack* live_objects = NULL;
typedef basic_string<char, char_traits<char>,
STL_Allocator<char, HeapLeakChecker::Allocator>
> HCL_string;
typedef map<HCL_string, LiveObjectsStack, less<HCL_string>,
STL_Allocator<pair<const HCL_string, LiveObjectsStack>,
HeapLeakChecker::Allocator>
> LibraryLiveObjectsStacks;
static LibraryLiveObjectsStacks* library_live_objects = NULL;
struct HeapLeakChecker::RangeValue {
uintptr_t start_address;
int max_depth;
};
typedef map<uintptr_t, HeapLeakChecker::RangeValue, less<uintptr_t>,
STL_Allocator<pair<const uintptr_t, HeapLeakChecker::RangeValue>,
HeapLeakChecker::Allocator>
> DisabledRangeMap;
static DisabledRangeMap* disabled_ranges = NULL;
typedef set<uintptr_t, less<uintptr_t>,
STL_Allocator<uintptr_t, HeapLeakChecker::Allocator>
> StackTopSet;
static StackTopSet* stack_tops = NULL;
typedef map<uintptr_t, uintptr_t, less<uintptr_t>,
STL_Allocator<pair<const uintptr_t, uintptr_t>,
HeapLeakChecker::Allocator>
> GlobalRegionCallerRangeMap;
static GlobalRegionCallerRangeMap* global_region_caller_ranges = NULL;
#ifdef HAVE_TLS
static __thread int thread_disable_counter
# ifdef HAVE___ATTRIBUTE__
__attribute__ ((tls_model ("initial-exec")))
# endif
;
inline int get_thread_disable_counter() {
return thread_disable_counter;
}
inline void set_thread_disable_counter(int value) {
thread_disable_counter = value;
}
#else
static pthread_key_t thread_disable_counter_key;
static int main_thread_counter;
static bool use_main_thread_counter = true;
inline int get_thread_disable_counter() {
if (use_main_thread_counter)
return main_thread_counter;
void* p = perftools_pthread_getspecific(thread_disable_counter_key);
return (intptr_t)p;
}
inline void set_thread_disable_counter(int value) {
if (use_main_thread_counter) {
main_thread_counter = value;
return;
}
intptr_t pointer_sized_value = value;
void* p = (void*)pointer_sized_value;
perftools_pthread_setspecific(thread_disable_counter_key, p);
}
class InitThreadDisableCounter {
public:
InitThreadDisableCounter() {
perftools_pthread_key_create(&thread_disable_counter_key, NULL);
void* p = (void*)main_thread_counter;
perftools_pthread_setspecific(thread_disable_counter_key, p);
use_main_thread_counter = false;
}
};
InitThreadDisableCounter init_thread_disable_counter;
#endif
HeapLeakChecker::Disabler::Disabler() {
int counter = get_thread_disable_counter();
set_thread_disable_counter(counter + 1);
RAW_VLOG(10, "Increasing thread disable counter to %d", counter + 1);
}
HeapLeakChecker::Disabler::~Disabler() {
int counter = get_thread_disable_counter();
RAW_DCHECK(counter > 0, "");
if (counter > 0) {
set_thread_disable_counter(counter - 1);
RAW_VLOG(10, "Decreasing thread disable counter to %d", counter);
} else {
RAW_VLOG(0, "Thread disable counter underflow : %d", counter);
}
}
static size_t max_heap_object_size = 0;
static uintptr_t min_heap_address = uintptr_t(-1LL);
static uintptr_t max_heap_address = 0;
template<typename T>
inline static const void* AsPtr(T addr) {
return reinterpret_cast<void*>(addr);
}
inline static uintptr_t AsInt(const void* ptr) {
return reinterpret_cast<uintptr_t>(ptr);
}
static const char* hc_strstr(const char* s1, const char* s2) {
const size_t len = strlen(s2);
RAW_CHECK(len > 0, "Unexpected empty string passed to strstr()");
for (const char* p = strchr(s1, *s2); p != NULL; p = strchr(p+1, *s2)) {
if (strncmp(p, s2, len) == 0) {
return p;
}
}
return NULL;
}
static void NewHook(const void* ptr, size_t size) {
if (ptr != NULL) {
const int counter = get_thread_disable_counter();
const bool ignore = (counter > 0);
RAW_VLOG(16, "Recording Alloc: %p of %"PRIuS "; %d", ptr, size,
int(counter));
void* stack[HeapProfileTable::kMaxStackDepth];
int depth = HeapProfileTable::GetCallerStackTrace(0, stack);
{ SpinLockHolder l(&heap_checker_lock);
if (size > max_heap_object_size) max_heap_object_size = size;
uintptr_t addr = AsInt(ptr);
if (addr < min_heap_address) min_heap_address = addr;
addr += size;
if (addr > max_heap_address) max_heap_address = addr;
if (heap_checker_on) {
heap_profile->RecordAlloc(ptr, size, depth, stack);
if (ignore) {
heap_profile->MarkAsIgnored(ptr);
}
}
}
RAW_VLOG(17, "Alloc Recorded: %p of %"PRIuS"", ptr, size);
}
}
static void DeleteHook(const void* ptr) {
if (ptr != NULL) {
RAW_VLOG(16, "Recording Free %p", ptr);
{ SpinLockHolder l(&heap_checker_lock);
if (heap_checker_on) heap_profile->RecordFree(ptr);
}
RAW_VLOG(17, "Free Recorded: %p", ptr);
}
}
enum StackDirection {
GROWS_TOWARDS_HIGH_ADDRESSES,
GROWS_TOWARDS_LOW_ADDRESSES,
UNKNOWN_DIRECTION
};
static StackDirection ATTRIBUTE_NOINLINE GetStackDirection(
const uintptr_t *const ptr) {
uintptr_t x;
if (&x < ptr)
return GROWS_TOWARDS_LOW_ADDRESSES;
if (ptr < &x)
return GROWS_TOWARDS_HIGH_ADDRESSES;
RAW_CHECK(0, "");
return UNKNOWN_DIRECTION;
}
static StackDirection stack_direction = UNKNOWN_DIRECTION;
static void RegisterStackLocked(const void* top_ptr) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_DCHECK(MemoryRegionMap::LockIsHeld(), "");
RAW_VLOG(10, "Thread stack at %p", top_ptr);
uintptr_t top = AsInt(top_ptr);
stack_tops->insert(top);
if (stack_direction == UNKNOWN_DIRECTION) {
stack_direction = GetStackDirection(&top);
}
MemoryRegionMap::Region region;
if (MemoryRegionMap::FindAndMarkStackRegion(top, ®ion)) {
if (stack_direction == GROWS_TOWARDS_LOW_ADDRESSES) {
RAW_VLOG(11, "Live stack at %p of %"PRIuPTR" bytes",
top_ptr, region.end_addr - top);
live_objects->push_back(AllocObject(top_ptr, region.end_addr - top,
THREAD_DATA));
} else {
RAW_VLOG(11, "Live stack at %p of %"PRIuPTR" bytes",
AsPtr(region.start_addr),
top - region.start_addr);
live_objects->push_back(AllocObject(AsPtr(region.start_addr),
top - region.start_addr,
THREAD_DATA));
}
} else if (FLAGS_heap_check_ignore_global_live) {
for (LibraryLiveObjectsStacks::iterator lib = library_live_objects->begin();
lib != library_live_objects->end(); ++lib) {
for (LiveObjectsStack::iterator span = lib->second.begin();
span != lib->second.end(); ++span) {
uintptr_t start = AsInt(span->ptr);
uintptr_t end = start + span->size;
if (start <= top && top < end) {
RAW_VLOG(11, "Stack at %p is inside /proc/self/maps chunk %p..%p",
top_ptr, AsPtr(start), AsPtr(end));
uintptr_t stack_start = start;
uintptr_t stack_end = end;
RAW_DCHECK(MemoryRegionMap::LockIsHeld(), "");
for (MemoryRegionMap::RegionIterator r =
MemoryRegionMap::BeginRegionLocked();
r != MemoryRegionMap::EndRegionLocked(); ++r) {
if (top < r->start_addr && r->start_addr < stack_end) {
stack_end = r->start_addr;
}
if (stack_start < r->end_addr && r->end_addr <= top) {
stack_start = r->end_addr;
}
}
if (stack_start != start || stack_end != end) {
RAW_VLOG(11, "Stack at %p is actually inside memory chunk %p..%p",
top_ptr, AsPtr(stack_start), AsPtr(stack_end));
}
if (stack_direction == GROWS_TOWARDS_LOW_ADDRESSES) {
RAW_VLOG(11, "Live stack at %p of %"PRIuPTR" bytes",
top_ptr, stack_end - top);
live_objects->push_back(
AllocObject(top_ptr, stack_end - top, THREAD_DATA));
} else {
RAW_VLOG(11, "Live stack at %p of %"PRIuPTR" bytes",
AsPtr(stack_start), top - stack_start);
live_objects->push_back(
AllocObject(AsPtr(stack_start), top - stack_start, THREAD_DATA));
}
lib->second.erase(span);
if (stack_start != start) {
lib->second.push_back(AllocObject(AsPtr(start), stack_start - start,
MAYBE_LIVE));
}
if (stack_end != end) {
lib->second.push_back(AllocObject(AsPtr(stack_end), end - stack_end,
MAYBE_LIVE));
}
return;
}
}
}
RAW_LOG(ERROR, "Memory region for stack at %p not found. "
"Will likely report false leak positives.", top_ptr);
}
}
static void MakeIgnoredObjectsLiveCallbackLocked(
const void* ptr, const HeapProfileTable::AllocInfo& info) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
if (info.ignored) {
live_objects->push_back(AllocObject(ptr, info.object_size,
MUST_BE_ON_HEAP));
}
}
static void MakeDisabledLiveCallbackLocked(
const void* ptr, const HeapProfileTable::AllocInfo& info) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
bool stack_disable = false;
bool range_disable = false;
for (int depth = 0; depth < info.stack_depth; depth++) {
uintptr_t addr = AsInt(info.call_stack[depth]);
if (disabled_ranges) {
DisabledRangeMap::const_iterator iter
= disabled_ranges->upper_bound(addr);
if (iter != disabled_ranges->end()) {
RAW_DCHECK(iter->first > addr, "");
if (iter->second.start_address < addr &&
iter->second.max_depth > depth) {
range_disable = true;
break;
}
}
}
}
if (stack_disable || range_disable) {
uintptr_t start_address = AsInt(ptr);
uintptr_t end_address = start_address + info.object_size;
StackTopSet::const_iterator iter
= stack_tops->lower_bound(start_address);
if (iter != stack_tops->end()) {
RAW_DCHECK(*iter >= start_address, "");
if (*iter < end_address) {
RAW_VLOG(11, "Not %s-disabling %"PRIuS" bytes at %p"
": have stack inside: %p",
(stack_disable ? "stack" : "range"),
info.object_size, ptr, AsPtr(*iter));
return;
}
}
RAW_VLOG(11, "%s-disabling %"PRIuS" bytes at %p",
(stack_disable ? "Stack" : "Range"), info.object_size, ptr);
live_objects->push_back(AllocObject(ptr, info.object_size,
MUST_BE_ON_HEAP));
}
}
static const char kUnnamedProcSelfMapEntry[] = "UNNAMED";
static void RecordGlobalDataLocked(uintptr_t start_address,
uintptr_t end_address,
const char* permissions,
const char* filename) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
if (strchr(permissions, 'w') == NULL) return;
if (filename == NULL || *filename == '\0') {
filename = kUnnamedProcSelfMapEntry;
}
RAW_VLOG(11, "Looking into %s: 0x%" PRIxPTR "..0x%" PRIxPTR,
filename, start_address, end_address);
(*library_live_objects)[filename].
push_back(AllocObject(AsPtr(start_address),
end_address - start_address,
MAYBE_LIVE));
}
static bool IsLibraryNamed(const char* library, const char* library_base) {
const char* p = hc_strstr(library, library_base);
size_t sz = strlen(library_base);
return p != NULL && (p[sz] == '.' || p[sz] == '-');
}
void HeapLeakChecker::DisableLibraryAllocsLocked(const char* library,
uintptr_t start_address,
uintptr_t end_address) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
int depth = 0;
if (IsLibraryNamed(library, "/libpthread") ||
IsLibraryNamed(library, "/libdl") ||
IsLibraryNamed(library, "/libcrypto") ||
IsLibraryNamed(library, "/libjvm") ||
IsLibraryNamed(library, "/libzip")
) {
depth = 1;
} else if (IsLibraryNamed(library, "/ld")
) {
depth = 2;
}
if (depth) {
RAW_VLOG(10, "Disabling allocations from %s at depth %d:", library, depth);
DisableChecksFromToLocked(AsPtr(start_address), AsPtr(end_address), depth);
if (IsLibraryNamed(library, "/libpthread") ||
IsLibraryNamed(library, "/libdl") ||
IsLibraryNamed(library, "/ld")) {
RAW_VLOG(10, "Global memory regions made by %s will be live data",
library);
if (global_region_caller_ranges == NULL) {
global_region_caller_ranges =
new(Allocator::Allocate(sizeof(GlobalRegionCallerRangeMap)))
GlobalRegionCallerRangeMap;
}
global_region_caller_ranges
->insert(make_pair(end_address, start_address));
}
}
}
HeapLeakChecker::ProcMapsResult HeapLeakChecker::UseProcMapsLocked(
ProcMapsTask proc_maps_task) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
ProcMapsIterator::Buffer buffer;
ProcMapsIterator it(0, &buffer);
if (!it.Valid()) {
int errsv = errno;
RAW_LOG(ERROR, "Could not open /proc/self/maps: errno=%d. "
"Libraries will not be handled correctly.", errsv);
return CANT_OPEN_PROC_MAPS;
}
uint64 start_address, end_address, file_offset;
int64 inode;
char *permissions, *filename;
bool saw_shared_lib = false;
bool saw_nonzero_inode = false;
bool saw_shared_lib_with_nonzero_inode = false;
while (it.Next(&start_address, &end_address, &permissions,
&file_offset, &inode, &filename)) {
if (start_address >= end_address) {
if (inode != 0) {
RAW_LOG(ERROR, "Errors reading /proc/self/maps. "
"Some global memory regions will not "
"be handled correctly.");
}
continue;
}
if (inode != 0) {
saw_nonzero_inode = true;
}
if ((hc_strstr(filename, "lib") && hc_strstr(filename, ".so")) ||
hc_strstr(filename, ".dll") ||
hc_strstr(filename, ".dylib") || hc_strstr(filename, ".bundle")) {
saw_shared_lib = true;
if (inode != 0) {
saw_shared_lib_with_nonzero_inode = true;
}
}
switch (proc_maps_task) {
case DISABLE_LIBRARY_ALLOCS:
if (inode != 0 && strncmp(permissions, "r-xp", 4) == 0) {
DisableLibraryAllocsLocked(filename, start_address, end_address);
}
break;
case RECORD_GLOBAL_DATA:
RecordGlobalDataLocked(start_address, end_address,
permissions, filename);
break;
default:
RAW_CHECK(0, "");
}
}
if (saw_nonzero_inode) {
saw_shared_lib = saw_shared_lib_with_nonzero_inode;
}
if (!saw_shared_lib) {
RAW_LOG(ERROR, "No shared libs detected. Will likely report false leak "
"positives for statically linked executables.");
return NO_SHARED_LIBS_IN_PROC_MAPS;
}
return PROC_MAPS_USED;
}
static int64 live_objects_total;
static int64 live_bytes_total;
static pid_t self_thread_pid = 0;
static enum {
CALLBACK_NOT_STARTED,
CALLBACK_STARTED,
CALLBACK_COMPLETED,
} thread_listing_status = CALLBACK_NOT_STARTED;
int HeapLeakChecker::IgnoreLiveThreadsLocked(void* parameter,
int num_threads,
pid_t* thread_pids,
va_list ) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
thread_listing_status = CALLBACK_STARTED;
RAW_VLOG(11, "Found %d threads (from pid %d)", num_threads, getpid());
if (FLAGS_heap_check_ignore_global_live) {
UseProcMapsLocked(RECORD_GLOBAL_DATA);
}
vector<void*, STL_Allocator<void*, Allocator> > thread_registers;
int failures = 0;
for (int i = 0; i < num_threads; ++i) {
if (thread_pids[i] == self_thread_pid) continue;
RAW_VLOG(11, "Handling thread with pid %d", thread_pids[i]);
#if (defined(__i386__) || defined(__x86_64)) && \
defined(HAVE_LINUX_PTRACE_H) && defined(HAVE_SYS_SYSCALL_H) && defined(DUMPER)
i386_regs thread_regs;
#define sys_ptrace(r, p, a, d) syscall(SYS_ptrace, (r), (p), (a), (d))
if (sys_ptrace(PTRACE_GETREGS, thread_pids[i], NULL, &thread_regs) == 0) {
COMPILE_ASSERT(sizeof(thread_regs.SP) == sizeof(void*),
SP_register_does_not_look_like_a_pointer);
RegisterStackLocked(reinterpret_cast<void*>(thread_regs.SP));
for (void** p = reinterpret_cast<void**>(&thread_regs);
p < reinterpret_cast<void**>(&thread_regs + 1); ++p) {
RAW_VLOG(12, "Thread register %p", *p);
thread_registers.push_back(*p);
}
} else {
failures += 1;
}
#else
failures += 1;
#endif
}
IgnoreLiveObjectsLocked("threads stack data", "");
if (thread_registers.size()) {
RAW_VLOG(11, "Live registers at %p of %"PRIuS" bytes",
&thread_registers[0], thread_registers.size() * sizeof(void*));
live_objects->push_back(AllocObject(&thread_registers[0],
thread_registers.size() * sizeof(void*),
THREAD_REGISTERS));
IgnoreLiveObjectsLocked("threads register data", "");
}
IgnoreNonThreadLiveObjectsLocked();
ResumeAllProcessThreads(num_threads, thread_pids);
thread_listing_status = CALLBACK_COMPLETED;
return failures;
}
static const void* self_thread_stack_top;
void HeapLeakChecker::IgnoreNonThreadLiveObjectsLocked() {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_DCHECK(MemoryRegionMap::LockIsHeld(), "");
RAW_VLOG(11, "Handling self thread with pid %d", self_thread_pid);
RegisterStackLocked(self_thread_stack_top);
IgnoreLiveObjectsLocked("stack data", "");
if (ignored_objects) {
for (IgnoredObjectsMap::const_iterator object = ignored_objects->begin();
object != ignored_objects->end(); ++object) {
const void* ptr = AsPtr(object->first);
RAW_VLOG(11, "Ignored live object at %p of %"PRIuS" bytes",
ptr, object->second);
live_objects->
push_back(AllocObject(ptr, object->second, MUST_BE_ON_HEAP));
size_t object_size;
if (!(heap_profile->FindAlloc(ptr, &object_size) &&
object->second == object_size)) {
RAW_LOG(FATAL, "Object at %p of %"PRIuS" bytes from an"
" IgnoreObject() has disappeared", ptr, object->second);
}
}
IgnoreLiveObjectsLocked("ignored objects", "");
}
heap_profile->IterateAllocs(MakeIgnoredObjectsLiveCallbackLocked);
IgnoreLiveObjectsLocked("disabled objects", "");
heap_profile->IterateAllocs(MakeDisabledLiveCallbackLocked);
IgnoreLiveObjectsLocked("disabled code", "");
if (FLAGS_heap_check_ignore_global_live) {
bool have_null_region_callers = false;
for (LibraryLiveObjectsStacks::iterator l = library_live_objects->begin();
l != library_live_objects->end(); ++l) {
RAW_CHECK(live_objects->empty(), "");
RAW_DCHECK(MemoryRegionMap::LockIsHeld(), "");
for (MemoryRegionMap::RegionIterator region =
MemoryRegionMap::BeginRegionLocked();
region != MemoryRegionMap::EndRegionLocked(); ++region) {
bool subtract = true;
if (!region->is_stack && global_region_caller_ranges) {
if (region->caller() == static_cast<uintptr_t>(NULL)) {
have_null_region_callers = true;
} else {
GlobalRegionCallerRangeMap::const_iterator iter
= global_region_caller_ranges->upper_bound(region->caller());
if (iter != global_region_caller_ranges->end()) {
RAW_DCHECK(iter->first > region->caller(), "");
if (iter->second < region->caller()) {
subtract = false;
}
}
}
}
if (subtract) {
for (LiveObjectsStack::const_iterator i = l->second.begin();
i != l->second.end(); ++i) {
uintptr_t start = AsInt(i->ptr);
uintptr_t end = start + i->size;
if (region->start_addr <= start && end <= region->end_addr) {
} else if (start < region->start_addr &&
region->end_addr < end) {
live_objects->push_back(AllocObject(i->ptr,
region->start_addr - start,
IN_GLOBAL_DATA));
live_objects->push_back(AllocObject(AsPtr(region->end_addr),
end - region->end_addr,
IN_GLOBAL_DATA));
} else if (region->end_addr > start &&
region->start_addr <= start) {
live_objects->push_back(AllocObject(AsPtr(region->end_addr),
end - region->end_addr,
IN_GLOBAL_DATA));
} else if (region->start_addr > start &&
region->start_addr < end) {
live_objects->push_back(AllocObject(i->ptr,
region->start_addr - start,
IN_GLOBAL_DATA));
} else {
live_objects->push_back(AllocObject(i->ptr, i->size,
IN_GLOBAL_DATA));
}
}
live_objects->swap(l->second);
live_objects->clear();
}
}
if (VLOG_IS_ON(11)) {
for (LiveObjectsStack::const_iterator i = l->second.begin();
i != l->second.end(); ++i) {
RAW_VLOG(11, "Library live region at %p of %"PRIuPTR" bytes",
i->ptr, i->size);
}
}
live_objects->swap(l->second);
IgnoreLiveObjectsLocked("in globals of\n ", l->first.c_str());
}
if (have_null_region_callers) {
RAW_LOG(ERROR, "Have memory regions w/o callers: "
"might report false leaks");
}
Allocator::DeleteAndNull(&library_live_objects);
}
}
static int IsOneThread(void* parameter, int num_threads,
pid_t* thread_pids, va_list ap) {
if (num_threads != 1) {
RAW_LOG(WARNING, "Have threads: Won't CPU-profile the bulk of leak "
"checking work happening in IgnoreLiveThreadsLocked!");
}
ResumeAllProcessThreads(num_threads, thread_pids);
return num_threads;
}
static va_list dummy_ap;
void HeapLeakChecker::IgnoreAllLiveObjectsLocked(const void* self_stack_top) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_CHECK(live_objects == NULL, "");
live_objects = new(Allocator::Allocate(sizeof(LiveObjectsStack)))
LiveObjectsStack;
stack_tops = new(Allocator::Allocate(sizeof(StackTopSet))) StackTopSet;
live_objects_total = 0;
live_bytes_total = 0;
const size_t old_max_heap_object_size = max_heap_object_size;
max_heap_object_size = (
FLAGS_heap_check_max_pointer_offset != -1
? min(size_t(FLAGS_heap_check_max_pointer_offset), max_heap_object_size)
: max_heap_object_size);
if (FLAGS_heap_check_ignore_global_live) {
library_live_objects =
new(Allocator::Allocate(sizeof(LibraryLiveObjectsStacks)))
LibraryLiveObjectsStacks;
}
thread_listing_status = CALLBACK_NOT_STARTED;
bool need_to_ignore_non_thread_objects = true;
self_thread_pid = getpid();
self_thread_stack_top = self_stack_top;
if (FLAGS_heap_check_ignore_thread_live) {
bool want_and_can_run_in_main_thread =
ProfilingIsEnabledForAllThreads() &&
ListAllProcessThreads(NULL, IsOneThread) == 1;
int r = want_and_can_run_in_main_thread
? IgnoreLiveThreadsLocked(NULL, 1, &self_thread_pid, dummy_ap)
: ListAllProcessThreads(NULL, IgnoreLiveThreadsLocked);
need_to_ignore_non_thread_objects = r < 0;
if (r < 0) {
RAW_LOG(WARNING, "Thread finding failed with %d errno=%d", r, errno);
if (thread_listing_status == CALLBACK_COMPLETED) {
RAW_LOG(INFO, "Thread finding callback "
"finished ok; hopefully everything is fine");
need_to_ignore_non_thread_objects = false;
} else if (thread_listing_status == CALLBACK_STARTED) {
RAW_LOG(FATAL, "Thread finding callback was "
"interrupted or crashed; can't fix this");
} else {
RAW_LOG(ERROR, "Could not find thread stacks. "
"Will likely report false leak positives.");
}
} else if (r != 0) {
RAW_LOG(ERROR, "Thread stacks not found for %d threads. "
"Will likely report false leak positives.", r);
} else {
RAW_VLOG(11, "Thread stacks appear to be found for all threads");
}
} else {
RAW_LOG(WARNING, "Not looking for thread stacks; "
"objects reachable only from there "
"will be reported as leaks");
}
if (need_to_ignore_non_thread_objects) {
if (FLAGS_heap_check_ignore_global_live) {
UseProcMapsLocked(RECORD_GLOBAL_DATA);
}
IgnoreNonThreadLiveObjectsLocked();
}
if (live_objects_total) {
RAW_VLOG(10, "Ignoring %"PRId64" reachable objects of %"PRId64" bytes",
live_objects_total, live_bytes_total);
}
Allocator::DeleteAndNull(&live_objects);
Allocator::DeleteAndNull(&stack_tops);
max_heap_object_size = old_max_heap_object_size;
}
static size_t pointer_source_alignment = kPointerSourceAlignment;
static SpinLock alignment_checker_lock(SpinLock::LINKER_INITIALIZED);
void HeapLeakChecker::IgnoreLiveObjectsLocked(const char* name,
const char* name2) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
int64 live_object_count = 0;
int64 live_byte_count = 0;
while (!live_objects->empty()) {
const char* object =
reinterpret_cast<const char*>(live_objects->back().ptr);
size_t size = live_objects->back().size;
const ObjectPlacement place = live_objects->back().place;
live_objects->pop_back();
if (place == MUST_BE_ON_HEAP && heap_profile->MarkAsLive(object)) {
live_object_count += 1;
live_byte_count += size;
}
RAW_VLOG(13, "Looking for heap pointers in %p of %"PRIuS" bytes",
object, size);
const char* const whole_object = object;
size_t const whole_size = size;
const size_t remainder = AsInt(object) % pointer_source_alignment;
if (remainder) {
object += pointer_source_alignment - remainder;
if (size >= pointer_source_alignment - remainder) {
size -= pointer_source_alignment - remainder;
} else {
size = 0;
}
}
if (size < sizeof(void*)) continue;
#ifdef NO_FRAME_POINTER
if (name2 == kUnnamedProcSelfMapEntry) {
static const uintptr_t page_mask = ~(getpagesize() - 1);
const uintptr_t addr = reinterpret_cast<uintptr_t>(object);
if ((addr & page_mask) == 0 && (size & page_mask) == 0) {
if (msync(const_cast<char*>(object), size, MS_ASYNC) != 0) {
RAW_VLOG(0, "Ignoring inaccessible object [%p, %p) "
"(msync error %d (%s))",
object, object + size, errno, strerror(errno));
continue;
}
}
}
#endif
const char* const max_object = object + size - sizeof(void*);
while (object <= max_object) {
const uintptr_t addr = *reinterpret_cast<const uintptr_t*>(object);
const bool can_be_on_heap =
#if defined(__x86_64__)
addr <= max_heap_address &&
min_heap_address <= addr;
#else
min_heap_address <= addr &&
addr <= max_heap_address;
#endif
if (can_be_on_heap) {
const void* ptr = reinterpret_cast<const void*>(addr);
size_t object_size;
if (HaveOnHeapLocked(&ptr, &object_size) &&
heap_profile->MarkAsLive(ptr)) {
RAW_VLOG(14, "Found pointer to %p of %"PRIuS" bytes at %p "
"inside %p of size %"PRIuS"",
ptr, object_size, object, whole_object, whole_size);
if (VLOG_IS_ON(15)) {
HeapProfileTable::AllocInfo alloc;
bool r = heap_profile->FindAllocDetails(ptr, &alloc);
r = r;
RAW_DCHECK(r, "");
RAW_LOG(INFO, "New live %p object's alloc stack:", ptr);
for (int i = 0; i < alloc.stack_depth; ++i) {
RAW_LOG(INFO, " @ %p", alloc.call_stack[i]);
}
}
live_object_count += 1;
live_byte_count += object_size;
live_objects->push_back(AllocObject(ptr, object_size,
IGNORED_ON_HEAP));
}
}
object += pointer_source_alignment;
}
}
live_objects_total += live_object_count;
live_bytes_total += live_byte_count;
if (live_object_count) {
RAW_VLOG(10, "Removed %"PRId64" live heap objects of %"PRId64" bytes: %s%s",
live_object_count, live_byte_count, name, name2);
}
}
void HeapLeakChecker::DisableChecksIn(const char* pattern) {
RAW_LOG(WARNING, "DisableChecksIn(%s) is ignored", pattern);
}
void HeapLeakChecker::DoIgnoreObject(const void* ptr) {
SpinLockHolder l(&heap_checker_lock);
if (!heap_checker_on) return;
size_t object_size;
if (!HaveOnHeapLocked(&ptr, &object_size)) {
RAW_LOG(ERROR, "No live heap object at %p to ignore", ptr);
} else {
RAW_VLOG(10, "Going to ignore live object at %p of %"PRIuS" bytes",
ptr, object_size);
if (ignored_objects == NULL) {
ignored_objects = new(Allocator::Allocate(sizeof(IgnoredObjectsMap)))
IgnoredObjectsMap;
}
if (!ignored_objects->insert(make_pair(AsInt(ptr), object_size)).second) {
RAW_LOG(WARNING, "Object at %p is already being ignored", ptr);
}
}
}
void HeapLeakChecker::UnIgnoreObject(const void* ptr) {
SpinLockHolder l(&heap_checker_lock);
if (!heap_checker_on) return;
size_t object_size;
if (!HaveOnHeapLocked(&ptr, &object_size)) {
RAW_LOG(FATAL, "No live heap object at %p to un-ignore", ptr);
} else {
bool found = false;
if (ignored_objects) {
IgnoredObjectsMap::iterator object = ignored_objects->find(AsInt(ptr));
if (object != ignored_objects->end() && object_size == object->second) {
ignored_objects->erase(object);
found = true;
RAW_VLOG(10, "Now not going to ignore live object "
"at %p of %"PRIuS" bytes", ptr, object_size);
}
}
if (!found) RAW_LOG(FATAL, "Object at %p has not been ignored", ptr);
}
}
char* HeapLeakChecker::MakeProfileNameLocked() {
RAW_DCHECK(lock_->IsHeld(), "");
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
const int len = profile_name_prefix->size() + strlen(name_) + 5 +
strlen(HeapProfileTable::kFileExt) + 1;
char* file_name = reinterpret_cast<char*>(Allocator::Allocate(len));
snprintf(file_name, len, "%s.%s-end%s",
profile_name_prefix->c_str(), name_,
HeapProfileTable::kFileExt);
return file_name;
}
void HeapLeakChecker::Create(const char *name, bool make_start_snapshot) {
SpinLockHolder l(lock_);
name_ = NULL;
start_snapshot_ = NULL;
has_checked_ = false;
inuse_bytes_increase_ = 0;
inuse_allocs_increase_ = 0;
keep_profiles_ = false;
char* n = new char[strlen(name) + 1];
IgnoreObject(n);
{
SpinLockHolder al(&alignment_checker_lock);
SpinLockHolder hl(&heap_checker_lock);
MemoryRegionMap::LockHolder ml;
if (heap_checker_on && profile_name_prefix != NULL) {
RAW_DCHECK(strchr(name, '/') == NULL, "must be a simple name");
memcpy(n, name, strlen(name) + 1);
name_ = n;
if (make_start_snapshot) {
start_snapshot_ = heap_profile->TakeSnapshot();
}
const HeapProfileTable::Stats& t = heap_profile->total();
const size_t start_inuse_bytes = t.alloc_size - t.free_size;
const size_t start_inuse_allocs = t.allocs - t.frees;
RAW_VLOG(10, "Start check \"%s\" profile: %"PRIuS" bytes "
"in %"PRIuS" objects",
name_, start_inuse_bytes, start_inuse_allocs);
} else {
RAW_LOG(WARNING, "Heap checker is not active, "
"hence checker \"%s\" will do nothing!", name);
RAW_LOG(WARNING, "To activate set the HEAPCHECK environment variable.\n");
}
}
if (name_ == NULL) {
UnIgnoreObject(n);
delete[] n;
}
}
HeapLeakChecker::HeapLeakChecker(const char *name) : lock_(new SpinLock) {
RAW_DCHECK(strcmp(name, "_main_") != 0, "_main_ is reserved");
Create(name, true);
}
HeapLeakChecker::HeapLeakChecker() : lock_(new SpinLock) {
if (FLAGS_heap_check_before_constructors) {
Create("_main_", false);
} else {
Create("_main_", true);
}
}
ssize_t HeapLeakChecker::BytesLeaked() const {
SpinLockHolder l(lock_);
if (!has_checked_) {
RAW_LOG(FATAL, "*NoLeaks|SameHeap must execute before this call");
}
return inuse_bytes_increase_;
}
ssize_t HeapLeakChecker::ObjectsLeaked() const {
SpinLockHolder l(lock_);
if (!has_checked_) {
RAW_LOG(FATAL, "*NoLeaks|SameHeap must execute before this call");
}
return inuse_allocs_increase_;
}
static int32 main_thread_pid = getpid();
#ifdef HAVE_PROGRAM_INVOCATION_NAME
extern char* program_invocation_name;
extern char* program_invocation_short_name;
static const char* invocation_name() { return program_invocation_short_name; }
static string invocation_path() { return program_invocation_name; }
#else
static const char* invocation_name() { return "<your binary>"; }
static string invocation_path() { return "<your binary>"; }
#endif
static void SuggestPprofCommand(const char* pprof_file_arg) {
string extra_help;
const string remote_header =
"This program is being executed remotely and therefore the pprof\n"
"command printed above will not work. Either run this program\n"
"locally, or adjust the pprof command as follows to allow it to\n"
"work on your local machine:\n";
string fetch_cmd;
RAW_LOG(WARNING,
"\n\n"
"If the preceding stack traces are not enough to find "
"the leaks, try running THIS shell command:\n\n"
"%s%s %s \"%s\" --inuse_objects --lines --heapcheck "
" --edgefraction=1e-10 --nodefraction=1e-10 --gv\n"
"\n"
"%s"
"If you are still puzzled about why the leaks are "
"there, try rerunning this program with "
"HEAP_CHECK_TEST_POINTER_ALIGNMENT=1 and/or with "
"HEAP_CHECK_MAX_POINTER_OFFSET=-1\n"
"If the leak report occurs in a small fraction of runs, "
"try running with TCMALLOC_MAX_FREE_QUEUE_SIZE of few hundred MB "
"or with TCMALLOC_RECLAIM_MEMORY=false, "
"it might help find leaks more repeatably\n",
fetch_cmd.c_str(),
"pprof",
invocation_path().c_str(),
pprof_file_arg,
extra_help.c_str()
);
}
bool HeapLeakChecker::DoNoLeaks(ShouldSymbolize should_symbolize) {
SpinLockHolder l(lock_);
SpinLockHolder al(&alignment_checker_lock);
static bool have_disabled_hooks_for_symbolize = false;
if (have_disabled_hooks_for_symbolize) {
RAW_LOG(FATAL, "Must not call heap leak checker manually after "
" program-exit's automatic check.");
}
HeapProfileTable::Snapshot* leaks = NULL;
char* pprof_file = NULL;
{
SpinLockHolder hl(&heap_checker_lock);
if (heap_checker_on == false) {
if (name_ != NULL) {
RAW_LOG(WARNING, "Heap leak checker got turned off after checker "
"\"%s\" has been created, no leak check is being done for it!",
name_);
}
return true;
}
Allocator::DeleteAndNullIfNot(&global_region_caller_ranges);
ProcMapsResult pm_result = UseProcMapsLocked(DISABLE_LIBRARY_ALLOCS);
RAW_CHECK(pm_result == PROC_MAPS_USED, "");
const int initial_allocs = Allocator::alloc_count();
if (name_ == NULL) {
RAW_LOG(FATAL, "Heap leak checker must not be turned on "
"after construction of a HeapLeakChecker");
}
MemoryRegionMap::LockHolder ml;
int a_local_var;
HeapProfileTable::Snapshot* base =
reinterpret_cast<HeapProfileTable::Snapshot*>(start_snapshot_);
RAW_DCHECK(FLAGS_heap_check_pointer_source_alignment > 0, "");
pointer_source_alignment = FLAGS_heap_check_pointer_source_alignment;
IgnoreAllLiveObjectsLocked(&a_local_var);
leaks = heap_profile->NonLiveSnapshot(base);
inuse_bytes_increase_ = static_cast<ssize_t>(leaks->total().alloc_size);
inuse_allocs_increase_ = static_cast<ssize_t>(leaks->total().allocs);
if (leaks->Empty()) {
heap_profile->ReleaseSnapshot(leaks);
leaks = NULL;
if (Allocator::alloc_count() != initial_allocs) {
RAW_LOG(FATAL, "Internal HeapChecker leak of %d objects ; %d -> %d",
Allocator::alloc_count() - initial_allocs,
initial_allocs, Allocator::alloc_count());
}
} else if (FLAGS_heap_check_test_pointer_alignment) {
if (pointer_source_alignment == 1) {
RAW_LOG(WARNING, "--heap_check_test_pointer_alignment has no effect: "
"--heap_check_pointer_source_alignment was already set to 1");
} else {
pointer_source_alignment = 1;
IgnoreAllLiveObjectsLocked(&a_local_var);
HeapProfileTable::Snapshot* leaks_wo_align =
heap_profile->NonLiveSnapshot(base);
pointer_source_alignment = FLAGS_heap_check_pointer_source_alignment;
if (leaks_wo_align->Empty()) {
RAW_LOG(WARNING, "Found no leaks without pointer alignment: "
"something might be placing pointers at "
"unaligned addresses! This needs to be fixed.");
} else {
RAW_LOG(INFO, "Found leaks without pointer alignment as well: "
"unaligned pointers must not be the cause of leaks.");
RAW_LOG(INFO, "--heap_check_test_pointer_alignment did not help "
"to diagnose the leaks.");
}
heap_profile->ReleaseSnapshot(leaks_wo_align);
}
}
if (leaks != NULL) {
pprof_file = MakeProfileNameLocked();
}
}
has_checked_ = true;
if (leaks == NULL) {
if (FLAGS_heap_check_max_pointer_offset == -1) {
RAW_LOG(WARNING,
"Found no leaks without max_pointer_offset restriction: "
"it's possible that the default value of "
"heap_check_max_pointer_offset flag is too low. "
"Do you use pointers with larger than that offsets "
"pointing in the middle of heap-allocated objects?");
}
const HeapProfileTable::Stats& stats = heap_profile->total();
RAW_VLOG(heap_checker_info_level,
"No leaks found for check \"%s\" "
"(but no 100%% guarantee that there aren't any): "
"found %"PRId64" reachable heap objects of %"PRId64" bytes",
name_,
int64(stats.allocs - stats.frees),
int64(stats.alloc_size - stats.free_size));
} else {
if (should_symbolize == SYMBOLIZE) {
if (MallocHook::GetNewHook() == NewHook)
MallocHook::SetNewHook(NULL);
if (MallocHook::GetDeleteHook() == DeleteHook)
MallocHook::SetDeleteHook(NULL);
MemoryRegionMap::Shutdown();
RAW_CHECK(MallocHook::GetNewHook() == NULL, "");
RAW_CHECK(MallocHook::GetDeleteHook() == NULL, "");
RAW_CHECK(MallocHook::GetMmapHook() == NULL, "");
RAW_CHECK(MallocHook::GetSbrkHook() == NULL, "");
have_disabled_hooks_for_symbolize = true;
leaks->ReportLeaks(name_, pprof_file, true);
} else {
leaks->ReportLeaks(name_, pprof_file, false);
}
if (FLAGS_heap_check_identify_leaks) {
leaks->ReportIndividualObjects();
}
SuggestPprofCommand(pprof_file);
{
SpinLockHolder hl(&heap_checker_lock);
heap_profile->ReleaseSnapshot(leaks);
Allocator::Free(pprof_file);
}
}
return (leaks == NULL);
}
HeapLeakChecker::~HeapLeakChecker() {
if (name_ != NULL) {
if (!has_checked_) {
RAW_LOG(FATAL, "Some *NoLeaks|SameHeap method"
" must be called on any created HeapLeakChecker");
}
if (start_snapshot_ != NULL) {
SpinLockHolder l(&heap_checker_lock);
heap_profile->ReleaseSnapshot(
reinterpret_cast<HeapProfileTable::Snapshot*>(start_snapshot_));
}
UnIgnoreObject(name_);
delete[] name_;
name_ = NULL;
}
delete lock_;
}
bool HeapLeakChecker::IsActive() {
SpinLockHolder l(&heap_checker_lock);
return heap_checker_on;
}
vector<HeapCleaner::void_function>* HeapCleaner::heap_cleanups_ = NULL;
HeapCleaner::HeapCleaner(void_function f) {
if (heap_cleanups_ == NULL)
heap_cleanups_ = new vector<HeapCleaner::void_function>;
heap_cleanups_->push_back(f);
}
void HeapCleaner::RunHeapCleanups() {
if (!heap_cleanups_)
return;
for (int i = 0; i < heap_cleanups_->size(); i++) {
void (*f)(void) = (*heap_cleanups_)[i];
f();
}
delete heap_cleanups_;
heap_cleanups_ = NULL;
}
void HeapLeakChecker_RunHeapCleanups() {
if (FLAGS_heap_check == "local")
return;
{ SpinLockHolder l(&heap_checker_lock);
if (heap_checker_pid != getpid()) return;
}
HeapCleaner::RunHeapCleanups();
if (!FLAGS_heap_check_after_destructors) HeapLeakChecker::DoMainHeapCheck();
}
static bool internal_init_start_has_run = false;
void HeapLeakChecker_InternalInitStart() {
{ SpinLockHolder l(&heap_checker_lock);
RAW_CHECK(!internal_init_start_has_run,
"Heap-check constructor called twice. Perhaps you both linked"
" in the heap checker, and also used LD_PRELOAD to load it?");
internal_init_start_has_run = true;
#ifdef ADDRESS_SANITIZER
FLAGS_heap_check = "";
#endif
if (FLAGS_heap_check.empty()) {
HeapLeakChecker::TurnItselfOffLocked();
return;
} else if (RunningOnValgrind()) {
RAW_LOG(WARNING, "Can't run under Valgrind; will turn itself off");
HeapLeakChecker::TurnItselfOffLocked();
return;
}
}
if (!FLAGS_heap_check_run_under_gdb && IsDebuggerAttached()) {
RAW_LOG(WARNING, "Someone is ptrace()ing us; will turn itself off");
SpinLockHolder l(&heap_checker_lock);
HeapLeakChecker::TurnItselfOffLocked();
return;
}
{ SpinLockHolder l(&heap_checker_lock);
if (!constructor_heap_profiling) {
RAW_LOG(FATAL, "Can not start so late. You have to enable heap checking "
"with HEAPCHECK=<mode>.");
}
}
RAW_DCHECK(FLAGS_heap_check_pointer_source_alignment > 0, "");
if (FLAGS_heap_check == "minimal") {
FLAGS_heap_check_before_constructors = false;
FLAGS_heap_check_after_destructors = false;
FLAGS_heap_check_ignore_thread_live = true;
FLAGS_heap_check_ignore_global_live = true;
} else if (FLAGS_heap_check == "normal") {
FLAGS_heap_check_before_constructors = true;
FLAGS_heap_check_after_destructors = false;
FLAGS_heap_check_ignore_thread_live = true;
FLAGS_heap_check_ignore_global_live = true;
} else if (FLAGS_heap_check == "strict") {
FLAGS_heap_check_before_constructors = true;
FLAGS_heap_check_after_destructors = true;
FLAGS_heap_check_ignore_thread_live = true;
FLAGS_heap_check_ignore_global_live = true;
} else if (FLAGS_heap_check == "draconian") {
FLAGS_heap_check_before_constructors = true;
FLAGS_heap_check_after_destructors = true;
FLAGS_heap_check_ignore_thread_live = false;
FLAGS_heap_check_ignore_global_live = false;
} else if (FLAGS_heap_check == "as-is") {
} else if (FLAGS_heap_check == "local") {
} else {
RAW_LOG(FATAL, "Unsupported heap_check flag: %s",
FLAGS_heap_check.c_str());
}
#ifdef __FreeBSD__
FLAGS_heap_check_after_destructors = true;
#endif
{ SpinLockHolder l(&heap_checker_lock);
RAW_DCHECK(heap_checker_pid == getpid(), "");
heap_checker_on = true;
RAW_DCHECK(heap_profile, "");
HeapLeakChecker::ProcMapsResult pm_result = HeapLeakChecker::UseProcMapsLocked(HeapLeakChecker::DISABLE_LIBRARY_ALLOCS);
if (pm_result != HeapLeakChecker::PROC_MAPS_USED) {
HeapLeakChecker::TurnItselfOffLocked();
return;
}
}
string* profile_prefix =
new string(FLAGS_heap_check_dump_directory + "/" + invocation_name());
const int32 our_pid = getpid();
{ SpinLockHolder l(&heap_checker_lock);
if (main_thread_pid == 0)
main_thread_pid = our_pid;
}
char pid_buf[15];
snprintf(pid_buf, sizeof(pid_buf), ".%d", main_thread_pid);
*profile_prefix += pid_buf;
{ SpinLockHolder l(&heap_checker_lock);
RAW_DCHECK(profile_name_prefix == NULL, "");
profile_name_prefix = profile_prefix;
}
char* test_str = new char[5];
size_t size;
{ SpinLockHolder l(&heap_checker_lock);
RAW_CHECK(heap_profile->FindAlloc(test_str, &size),
"our own new/delete not linked?");
}
delete[] test_str;
{ SpinLockHolder l(&heap_checker_lock);
RAW_CHECK(!heap_profile->FindAlloc(test_str, &size),
"our own new/delete not linked?");
}
RAW_VLOG(heap_checker_info_level,
"WARNING: Perftools heap leak checker is active "
"-- Performance may suffer");
if (FLAGS_heap_check != "local") {
HeapLeakChecker* main_hc = new HeapLeakChecker();
SpinLockHolder l(&heap_checker_lock);
RAW_DCHECK(main_heap_checker == NULL,
"Repeated creation of main_heap_checker");
main_heap_checker = main_hc;
do_main_heap_check = true;
}
{ SpinLockHolder l(&heap_checker_lock);
RAW_CHECK(heap_checker_on && constructor_heap_profiling,
"Leak checking is expected to be fully turned on now");
}
#if 0
SetCommandLineOptionWithMode("max_free_queue_size", "104857600",
SET_FLAG_IF_DEFAULT);
#endif
}
REGISTER_MODULE_INITIALIZER(init_start, HeapLeakChecker_InternalInitStart());
REGISTER_MODULE_DESTRUCTOR(init_start, HeapLeakChecker_RunHeapCleanups());
bool HeapLeakChecker::NoGlobalLeaksMaybeSymbolize(
ShouldSymbolize should_symbolize) {
HeapLeakChecker* main_hc = GlobalChecker();
if (main_hc) {
RAW_VLOG(10, "Checking for whole-program memory leaks");
return main_hc->DoNoLeaks(should_symbolize);
}
return true;
}
bool HeapLeakChecker::DoMainHeapCheck() {
if (FLAGS_heap_check_delay_seconds > 0) {
sleep(FLAGS_heap_check_delay_seconds);
}
{ SpinLockHolder l(&heap_checker_lock);
if (!do_main_heap_check) return false;
RAW_DCHECK(heap_checker_pid == getpid(), "");
do_main_heap_check = false;
}
if (!NoGlobalLeaksMaybeSymbolize(SYMBOLIZE)) {
if (FLAGS_heap_check_identify_leaks) {
RAW_LOG(FATAL, "Whole-program memory leaks found.");
}
RAW_LOG(ERROR, "Exiting with error code (instead of crashing) "
"because of whole-program memory leaks");
_exit(FLAGS_heap_check_error_exit_code);
}
return true;
}
HeapLeakChecker* HeapLeakChecker::GlobalChecker() {
SpinLockHolder l(&heap_checker_lock);
return main_heap_checker;
}
bool HeapLeakChecker::NoGlobalLeaks() {
return NoGlobalLeaksMaybeSymbolize(DO_NOT_SYMBOLIZE);
}
void HeapLeakChecker::CancelGlobalCheck() {
SpinLockHolder l(&heap_checker_lock);
if (do_main_heap_check) {
RAW_VLOG(heap_checker_info_level,
"Canceling the automatic at-exit whole-program memory leak check");
do_main_heap_check = false;
}
}
void HeapLeakChecker::BeforeConstructorsLocked() {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_CHECK(!constructor_heap_profiling,
"BeforeConstructorsLocked called multiple times");
#ifdef ADDRESS_SANITIZER
return;
#endif
RAW_CHECK(MallocHook::AddNewHook(&NewHook), "");
RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), "");
constructor_heap_profiling = true;
MemoryRegionMap::Init(1, false);
Allocator::Init();
RAW_CHECK(heap_profile == NULL, "");
heap_profile = new(Allocator::Allocate(sizeof(HeapProfileTable)))
HeapProfileTable(&Allocator::Allocate, &Allocator::Free,
false);
RAW_VLOG(10, "Starting tracking the heap");
heap_checker_on = true;
}
void HeapLeakChecker::TurnItselfOffLocked() {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
if (!FLAGS_heap_check.empty())
FLAGS_heap_check.clear();
if (constructor_heap_profiling) {
RAW_CHECK(heap_checker_on, "");
RAW_VLOG(heap_checker_info_level, "Turning perftools heap leak checking off");
heap_checker_on = false;
RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), "");
RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), "");
Allocator::DeleteAndNull(&heap_profile);
Allocator::DeleteAndNullIfNot(&ignored_objects);
Allocator::DeleteAndNullIfNot(&disabled_ranges);
Allocator::DeleteAndNullIfNot(&global_region_caller_ranges);
Allocator::Shutdown();
MemoryRegionMap::Shutdown();
}
RAW_CHECK(!heap_checker_on, "");
}
extern bool heap_leak_checker_bcad_variable;
static bool has_called_before_constructors = false;
void HeapLeakChecker_BeforeConstructors() {
SpinLockHolder l(&heap_checker_lock);
if (has_called_before_constructors) return;
has_called_before_constructors = true;
heap_checker_pid = getpid();
heap_leak_checker_bcad_variable = true;
const char* verbose_str = GetenvBeforeMain("PERFTOOLS_VERBOSE");
if (verbose_str && atoi(verbose_str)) {
FLAGS_verbose = atoi(verbose_str);
}
bool need_heap_check = true;
if (!GetenvBeforeMain("HEAPCHECK")) {
need_heap_check = false;
}
#ifdef HAVE_GETEUID
if (need_heap_check && getuid() != geteuid()) {
RAW_LOG(WARNING, ("HeapChecker: ignoring HEAPCHECK because "
"program seems to be setuid\n"));
need_heap_check = false;
}
#endif
if (need_heap_check) {
HeapLeakChecker::BeforeConstructorsLocked();
}
}
extern "C" void MallocHook_InitAtFirstAllocation_HeapLeakChecker() {
HeapLeakChecker_BeforeConstructors();
}
void HeapLeakChecker_AfterDestructors() {
{ SpinLockHolder l(&heap_checker_lock);
if (heap_checker_pid != getpid()) return;
}
if (FLAGS_heap_check_after_destructors) {
if (HeapLeakChecker::DoMainHeapCheck()) {
const struct timespec sleep_time = { 0, 500000000 };
nanosleep(&sleep_time, NULL);
}
}
SpinLockHolder l(&heap_checker_lock);
RAW_CHECK(!do_main_heap_check, "should have done it");
}
void HeapLeakChecker::DisableChecksFromToLocked(const void* start_address,
const void* end_address,
int max_depth) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_DCHECK(start_address < end_address, "");
if (disabled_ranges == NULL) {
disabled_ranges = new(Allocator::Allocate(sizeof(DisabledRangeMap)))
DisabledRangeMap;
}
RangeValue value;
value.start_address = AsInt(start_address);
value.max_depth = max_depth;
if (disabled_ranges->insert(make_pair(AsInt(end_address), value)).second) {
RAW_VLOG(10, "Disabling leak checking in stack traces "
"under frame addresses between %p..%p",
start_address, end_address);
} else {
RangeValue const& val = disabled_ranges->find(AsInt(end_address))->second;
if (val.max_depth != value.max_depth ||
val.start_address != value.start_address) {
RAW_LOG(FATAL, "Two DisableChecksToHereFrom calls conflict: "
"(%p, %p, %d) vs. (%p, %p, %d)",
AsPtr(val.start_address), end_address, val.max_depth,
start_address, end_address, max_depth);
}
}
}
inline bool HeapLeakChecker::HaveOnHeapLocked(const void** ptr,
size_t* object_size) {
const uintptr_t addr = AsInt(*ptr);
if (heap_profile->FindInsideAlloc(
*ptr, max_heap_object_size, ptr, object_size)) {
RAW_VLOG(16, "Got pointer into %p at +%"PRIuPTR" offset",
*ptr, addr - AsInt(*ptr));
return true;
}
return false;
}
const void* HeapLeakChecker::GetAllocCaller(void* ptr) {
HeapProfileTable::AllocInfo info;
{ SpinLockHolder l(&heap_checker_lock);
RAW_CHECK(heap_profile->FindAllocDetails(ptr, &info), "");
}
RAW_CHECK(info.stack_depth >= 1, "");
return info.call_stack[0];
}