This source file includes following definitions.
- ByAllocatedSpace
- dealloc_
- DeallocateAllocationMap
- DeallocateBucketTable
- GetBucket
- GetCallerStackTrace
- RecordAlloc
- RecordFree
- FindAlloc
- FindAllocDetails
- FindInsideAlloc
- MarkAsLive
- MarkAsIgnored
- UnparseBucket
- MakeSortedBucketList
- RefreshMMapData
- ClearMMapData
- IterateOrderedAllocContexts
- FillOrderedProfile
- DumpNonLiveIterator
- ZeroBucketCountsIterator
- AddIfNonLive
- WriteProfile
- CleanupOldProfiles
- TakeSnapshot
- ReleaseSnapshot
- AddToSnapshot
- NonLiveSnapshot
- ReportCallback
- ReportLeaks
- ReportObject
- ReportIndividualObjects
#include <config.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>   
#endif
#include <fcntl.h>    
#ifdef HAVE_GLOB_H
#include <glob.h>
#ifndef GLOB_NOMATCH  
# define GLOB_NOMATCH 0
#endif
#endif
#ifdef HAVE_INTTYPES_H
#include <inttypes.h> 
#endif
#ifdef HAVE_POLL_H
#include <poll.h>
#endif
#include <errno.h>
#include <stdarg.h>
#include <string>
#include <map>
#include <algorithm>  
#include "heap-profile-table.h"
#include "base/logging.h"
#include "raw_printer.h"
#include "symbolize.h"
#include <gperftools/stacktrace.h>
#include <gperftools/malloc_hook.h>
#include "memory_region_map.h"
#include "base/commandlineflags.h"
#include "base/logging.h"    
#include "base/sysinfo.h"
using std::sort;
using std::equal;
using std::copy;
using std::string;
using std::map;
using tcmalloc::FillProcSelfMaps;   
using tcmalloc::DumpProcSelfMaps;   
DEFINE_bool(cleanup_old_heap_profiles,
            EnvToBool("HEAP_PROFILE_CLEANUP", true),
            "At initialization time, delete old heap profiles.");
DEFINE_int32(heap_check_max_leaks,
             EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
             "The maximum number of leak reports to print.");
static const char kProfileHeader[] = "heap profile: ";
static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
const char HeapProfileTable::kFileExt[] = ".heap";
static const int kHashTableSize = 179999;
 const int HeapProfileTable::kMaxStackDepth;
#ifdef NDEBUG
static const int kStripFrames = 2;
#else
static const int kStripFrames = 3;
#endif
static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
                             HeapProfileTable::Stats* b) {
  
  return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
}
HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc)
    : alloc_(alloc), dealloc_(dealloc) {
  
  memset(&total_, 0, sizeof(total_));
  
  const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_);
  alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes));
  memset(alloc_table_, 0, alloc_table_bytes);
  num_alloc_buckets_ = 0;
  
  mmap_table_ = NULL;
  num_available_mmap_buckets_ = 0;
  
  alloc_address_map_ =
      new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
  mmap_address_map_ = NULL;
}
HeapProfileTable::~HeapProfileTable() {
  DeallocateBucketTable(alloc_table_);
  alloc_table_ = NULL;
  DeallocateBucketTable(mmap_table_);
  mmap_table_ = NULL;
  DeallocateAllocationMap(alloc_address_map_);
  alloc_address_map_ = NULL;
  DeallocateAllocationMap(mmap_address_map_);
  mmap_address_map_ = NULL;
}
void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) {
  if (allocation != NULL) {
    alloc_address_map_->~AllocationMap();
    dealloc_(allocation);
  }
}
void HeapProfileTable::DeallocateBucketTable(Bucket** table) {
  if (table != NULL) {
    for (int b = 0; b < kHashTableSize; b++) {
      for (Bucket* x = table[b]; x != 0; ) {
        Bucket* b = x;
        x = x->next;
        dealloc_(b->stack);
        dealloc_(b);
      }
    }
    dealloc_(table);
  }
}
HeapProfileTable::Bucket* HeapProfileTable::GetBucket(
    int depth, const void* const key[], Bucket** table,
    int* bucket_count) {
  
  uintptr_t h = 0;
  for (int i = 0; i < depth; i++) {
    h += reinterpret_cast<uintptr_t>(key[i]);
    h += h << 10;
    h ^= h >> 6;
  }
  h += h << 3;
  h ^= h >> 11;
  
  unsigned int buck = ((unsigned int) h) % kHashTableSize;
  for (Bucket* b = table[buck]; b != 0; b = b->next) {
    if ((b->hash == h) &&
        (b->depth == depth) &&
        equal(key, key + depth, b->stack)) {
      return b;
    }
  }
  
  const size_t key_size = sizeof(key[0]) * depth;
  const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
  copy(key, key + depth, kcopy);
  Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
  memset(b, 0, sizeof(*b));
  b->hash  = h;
  b->depth = depth;
  b->stack = kcopy;
  b->next  = table[buck];
  table[buck] = b;
  if (bucket_count != NULL) {
    ++(*bucket_count);
  }
  return b;
}
int HeapProfileTable::GetCallerStackTrace(
    int skip_count, void* stack[kMaxStackDepth]) {
  return MallocHook::GetCallerStackTrace(
      stack, kMaxStackDepth, kStripFrames + skip_count + 1);
}
void HeapProfileTable::RecordAlloc(
    const void* ptr, size_t bytes, int stack_depth,
    const void* const call_stack[]) {
  Bucket* b = GetBucket(stack_depth, call_stack, alloc_table_,
                        &num_alloc_buckets_);
  b->allocs++;
  b->alloc_size += bytes;
  total_.allocs++;
  total_.alloc_size += bytes;
  AllocValue v;
  v.set_bucket(b);  
  v.bytes = bytes;
  alloc_address_map_->Insert(ptr, v);
}
void HeapProfileTable::RecordFree(const void* ptr) {
  AllocValue v;
  if (alloc_address_map_->FindAndRemove(ptr, &v)) {
    Bucket* b = v.bucket();
    b->frees++;
    b->free_size += v.bytes;
    total_.frees++;
    total_.free_size += v.bytes;
  }
}
bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
  const AllocValue* alloc_value = alloc_address_map_->Find(ptr);
  if (alloc_value != NULL) *object_size = alloc_value->bytes;
  return alloc_value != NULL;
}
bool HeapProfileTable::FindAllocDetails(const void* ptr,
                                        AllocInfo* info) const {
  const AllocValue* alloc_value = alloc_address_map_->Find(ptr);
  if (alloc_value != NULL) {
    info->object_size = alloc_value->bytes;
    info->call_stack = alloc_value->bucket()->stack;
    info->stack_depth = alloc_value->bucket()->depth;
  }
  return alloc_value != NULL;
}
bool HeapProfileTable::FindInsideAlloc(const void* ptr,
                                       size_t max_size,
                                       const void** object_ptr,
                                       size_t* object_size) const {
  const AllocValue* alloc_value =
    alloc_address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
  if (alloc_value != NULL) *object_size = alloc_value->bytes;
  return alloc_value != NULL;
}
bool HeapProfileTable::MarkAsLive(const void* ptr) {
  AllocValue* alloc = alloc_address_map_->FindMutable(ptr);
  if (alloc && !alloc->live()) {
    alloc->set_live(true);
    return true;
  }
  return false;
}
void HeapProfileTable::MarkAsIgnored(const void* ptr) {
  AllocValue* alloc = alloc_address_map_->FindMutable(ptr);
  if (alloc) {
    alloc->set_ignore(true);
  }
}
int HeapProfileTable::UnparseBucket(const Bucket& b,
                                    char* buf, int buflen, int bufsize,
                                    const char* extra,
                                    Stats* profile_stats) {
  if (profile_stats != NULL) {
    profile_stats->allocs += b.allocs;
    profile_stats->alloc_size += b.alloc_size;
    profile_stats->frees += b.frees;
    profile_stats->free_size += b.free_size;
  }
  int printed =
    snprintf(buf + buflen, bufsize - buflen, "%6d: %8"PRId64" [%6d: %8"PRId64"] @%s",
             b.allocs - b.frees,
             b.alloc_size - b.free_size,
             b.allocs,
             b.alloc_size,
             extra);
  
  if (printed < 0 || printed >= bufsize - buflen) return buflen;
  buflen += printed;
  for (int d = 0; d < b.depth; d++) {
    printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
                       reinterpret_cast<uintptr_t>(b.stack[d]));
    if (printed < 0 || printed >= bufsize - buflen) return buflen;
    buflen += printed;
  }
  printed = snprintf(buf + buflen, bufsize - buflen, "\n");
  if (printed < 0 || printed >= bufsize - buflen) return buflen;
  buflen += printed;
  return buflen;
}
HeapProfileTable::Bucket**
HeapProfileTable::MakeSortedBucketList() const {
  Bucket** list = reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) *
      (num_alloc_buckets_ + num_available_mmap_buckets_)));
  RAW_DCHECK(mmap_table_ != NULL || num_available_mmap_buckets_ == 0, "");
  int n = 0;
  for (int b = 0; b < kHashTableSize; b++) {
    for (Bucket* x = alloc_table_[b]; x != 0; x = x->next) {
      list[n++] = x;
    }
  }
  RAW_DCHECK(n == num_alloc_buckets_, "");
  if (mmap_table_ != NULL) {
    for (int b = 0; b < kHashTableSize; b++) {
      for (Bucket* x = mmap_table_[b]; x != 0; x = x->next) {
        list[n++] = x;
      }
    }
  }
  RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, "");
  sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_,
       ByAllocatedSpace);
  return list;
}
void HeapProfileTable::RefreshMMapData() {
  
  static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_);
  if (mmap_table_ == NULL) {
    mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes));
    memset(mmap_table_, 0, mmap_table_bytes);
  }
  num_available_mmap_buckets_ = 0;
  ClearMMapData();
  mmap_address_map_ =
      new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
  MemoryRegionMap::LockHolder l;
  for (MemoryRegionMap::RegionIterator r =
           MemoryRegionMap::BeginRegionLocked();
       r != MemoryRegionMap::EndRegionLocked(); ++r) {
    Bucket* b =
        GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL);
    if (b->alloc_size == 0) {
      num_available_mmap_buckets_ += 1;
    }
    b->allocs += 1;
    b->alloc_size += r->end_addr - r->start_addr;
    AllocValue v;
    v.set_bucket(b);
    v.bytes = r->end_addr - r->start_addr;
    mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v);
  }
}
void HeapProfileTable::ClearMMapData() {
  if (mmap_address_map_ != NULL) {
    mmap_address_map_->Iterate(ZeroBucketCountsIterator, this);
    mmap_address_map_->~AllocationMap();
    dealloc_(mmap_address_map_);
    mmap_address_map_ = NULL;
  }
}
void HeapProfileTable::IterateOrderedAllocContexts(
    AllocContextIterator callback) const {
  Bucket** list = MakeSortedBucketList();
  AllocContextInfo info;
  for (int i = 0; i < num_alloc_buckets_; ++i) {
    *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
    info.stack_depth = list[i]->depth;
    info.call_stack = list[i]->stack;
    callback(info);
  }
  dealloc_(list);
}
int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
  Bucket** list = MakeSortedBucketList();
  
  
  
  
  
  
  
  int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
  if (map_length < 0 || map_length >= size) return 0;
  bool dummy;   
  map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
  RAW_DCHECK(map_length <= size, "");
  char* const map_start = buf + size - map_length;      
  memmove(map_start, buf, map_length);
  size -= map_length;
  Stats stats;
  memset(&stats, 0, sizeof(stats));
  int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
  if (bucket_length < 0 || bucket_length >= size) return 0;
  Bucket total_with_mmap(total_);
  if (mmap_table_ != NULL) {
    total_with_mmap.alloc_size += MemoryRegionMap::MapSize();
    total_with_mmap.free_size += MemoryRegionMap::UnmapSize();
  }
  bucket_length = UnparseBucket(total_with_mmap, buf, bucket_length, size,
                                " heapprofile", &stats);
  for (int i = 0; i < num_alloc_buckets_; i++) {
    bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
                                  &stats);
  }
  RAW_DCHECK(bucket_length < size, "");
  dealloc_(list);
  RAW_DCHECK(buf + bucket_length <= map_start, "");
  memmove(buf + bucket_length, map_start, map_length);  
  return bucket_length + map_length;
}
inline
void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
                                           const DumpArgs& args) {
  if (v->live()) {
    v->set_live(false);
    return;
  }
  if (v->ignore()) {
    return;
  }
  Bucket b;
  memset(&b, 0, sizeof(b));
  b.allocs = 1;
  b.alloc_size = v->bytes;
  b.depth = v->bucket()->depth;
  b.stack = v->bucket()->stack;
  char buf[1024];
  int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
  RawWrite(args.fd, buf, len);
}
inline void HeapProfileTable::ZeroBucketCountsIterator(
    const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) {
  Bucket* b = v->bucket();
  if (b != NULL) {
    b->allocs = 0;
    b->alloc_size = 0;
    b->free_size = 0;
    b->frees = 0;
  }
}
void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
                                    AddNonLiveArgs* arg) {
  if (v->live()) {
    v->set_live(false);
  } else {
    if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
      
    } else {
      arg->dest->Add(ptr, *v);
    }
  }
}
bool HeapProfileTable::WriteProfile(const char* file_name,
                                    const Bucket& total,
                                    AllocationMap* allocations) {
  RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
  RawFD fd = RawOpenForWriting(file_name);
  if (fd != kIllegalRawFD) {
    RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
    char buf[512];
    int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
                            NULL);
    RawWrite(fd, buf, len);
    const DumpArgs args(fd, NULL);
    allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
    RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
    DumpProcSelfMaps(fd);
    RawClose(fd);
    return true;
  } else {
    RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
    return false;
  }
}
void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
  if (!FLAGS_cleanup_old_heap_profiles)
    return;
  string pattern = string(prefix) + ".*" + kFileExt;
#if defined(HAVE_GLOB_H)
  glob_t g;
  const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
  if (r == 0 || r == GLOB_NOMATCH) {
    const int prefix_length = strlen(prefix);
    for (int i = 0; i < g.gl_pathc; i++) {
      const char* fname = g.gl_pathv[i];
      if ((strlen(fname) >= prefix_length) &&
          (memcmp(fname, prefix, prefix_length) == 0)) {
        RAW_VLOG(1, "Removing old heap profile %s", fname);
        unlink(fname);
      }
    }
  }
  globfree(&g);
#else   
  RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
#endif
}
HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
  Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
  alloc_address_map_->Iterate(AddToSnapshot, s);
  return s;
}
void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
  s->~Snapshot();
  dealloc_(s);
}
void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
                                     Snapshot* snapshot) {
  snapshot->Add(ptr, *v);
}
HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
    Snapshot* base) {
  RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
           int(total_.allocs - total_.frees),
           int(total_.alloc_size - total_.free_size));
  Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
  AddNonLiveArgs args;
  args.dest = s;
  args.base = base;
  alloc_address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
  RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
           int(s->total_.allocs - s->total_.frees),
           int(s->total_.alloc_size - s->total_.free_size));
  return s;
}
struct HeapProfileTable::Snapshot::Entry {
  int count;
  int bytes;
  Bucket* bucket;
  Entry() : count(0), bytes(0) { }
  
  bool operator<(const Entry& x) const {
    return this->bytes > x.bytes;
  }
};
struct HeapProfileTable::Snapshot::ReportState {
  map<Bucket*, Entry> buckets_;
};
void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
                                                AllocValue* v,
                                                ReportState* state) {
  Entry* e = &state->buckets_[v->bucket()]; 
  e->bucket = v->bucket();
  e->count++;
  e->bytes += v->bytes;
}
void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
                                             const char* filename,
                                             bool should_symbolize) {
  
  
  
  RAW_LOG(ERROR, "Leak check %s detected leaks of %"PRIuS" bytes "
          "in %"PRIuS" objects",
          checker_name,
          size_t(total_.alloc_size),
          size_t(total_.allocs));
  
  ReportState state;
  map_.Iterate(&ReportCallback, &state);
  
  const int n = state.buckets_.size();
  Entry* entries = new Entry[n];
  int dst = 0;
  for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
       iter != state.buckets_.end();
       ++iter) {
    entries[dst++] = iter->second;
  }
  sort(entries, entries + n);
  
  
  const int to_report =
      (FLAGS_heap_check_max_leaks > 0 &&
       n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
  RAW_LOG(ERROR, "The %d largest leaks:", to_report);
  
  SymbolTable symbolization_table;
  for (int i = 0; i < to_report; i++) {
    const Entry& e = entries[i];
    for (int j = 0; j < e.bucket->depth; j++) {
      symbolization_table.Add(e.bucket->stack[j]);
    }
  }
  static const int kBufSize = 2<<10;
  char buffer[kBufSize];
  if (should_symbolize)
    symbolization_table.Symbolize();
  for (int i = 0; i < to_report; i++) {
    const Entry& e = entries[i];
    base::RawPrinter printer(buffer, kBufSize);
    printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
                   e.bytes, e.count);
    for (int j = 0; j < e.bucket->depth; j++) {
      const void* pc = e.bucket->stack[j];
      printer.Printf("\t@ %"PRIxPTR" %s\n",
          reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
    }
    RAW_LOG(ERROR, "%s", buffer);
  }
  if (to_report < n) {
    RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
            to_report, n-1);
  }
  delete[] entries;
  
  
  if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
    RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
  }
}
void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
                                              AllocValue* v,
                                              char* unused) {
  
  
  RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr);
}
void HeapProfileTable::Snapshot::ReportIndividualObjects() {
  char unused;
  map_.Iterate(ReportObject, &unused);
}