root/src/platform-linux.cc
/* [<][>][^][v][top][bottom][index][help] */
DEFINITIONS
This source file includes following definitions.
- ceiling
- PostSetUp
- CpuFeaturesImpliedByPlatform
- CPUInfoContainsString
- ArmCpuHasFeature
- ArmUsingHardFloat
- MipsCpuHasFeature
- ActivationFrameAlignment
- ReleaseStore
- LocalTimezone
- LocalTimeOffset
- UpdateAllocatedSpaceLimits
- IsOutsideAllocatedSpace
- AllocateAlignment
- Allocate
- Free
- Sleep
- Abort
- DebugBreak
- size_
- memory
- size
- open
- create
- LogSharedLibraryAddresses
- SignalCodeMovingGC
- StackWalk
- size_
- size_
- IsReserved
- Reset
- Commit
- Uncommit
- Guard
- ReserveRegion
- CommitRegion
- UncommitRegion
- ReleaseRegion
- PlatformData
- stack_size_
- ThreadEntry
- set_name
- Start
- Join
- CreateThreadLocalKey
- DeleteThreadLocalKey
- GetThreadLocal
- SetThreadLocal
- YieldCPU
- Lock
- Unlock
- TryLock
- CreateMutex
- Signal
- Wait
- Wait
- CreateSemaphore
- GetThreadID
- ProfilerSignalHandler
- PlatformData
- vm_tid
- interval_
- SetUp
- TearDown
- InstallSignalHandler
- RestoreSignalHandler
- AddActiveSampler
- RemoveActiveSampler
- Run
- DoCpuProfile
- DoRuntimeProfile
- SendProfilingSignal
- Sleep
- SetUp
- TearDown
- samples_taken_
- Start
- Stop
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Platform specific code for Linux goes here. For the POSIX comaptible parts
// the implementation is in platform-posix.cc.
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <sys/prctl.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <stdlib.h>
// Ubuntu Dapper requires memory pages to be marked as
// executable. Otherwise, OS raises an exception when executing code
// in that page.
#include <sys/types.h> // mmap & munmap
#include <sys/mman.h> // mmap & munmap
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
#if defined(__GLIBC__) && !defined(__UCLIBC__)
#include <execinfo.h> // backtrace, backtrace_symbols
#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
#undef MAP_TYPE
#include "v8.h"
#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
namespace v8 {
namespace internal {
// 0 is never a valid thread id on Linux since tids and pids share a
// name space and pid 0 is reserved (see man 2 kill).
static const pthread_t kNoThread = (pthread_t) 0;
double ceiling(double x) {
return ceil(x);
}
static Mutex* limit_mutex = NULL;
void OS::PostSetUp() {
POSIXPostSetUp();
}
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // Linux runs on anything.
}
#ifdef __arm__
static bool CPUInfoContainsString(const char * search_string) {
const char* file_name = "/proc/cpuinfo";
// This is written as a straight shot one pass parser
// and not using STL string and ifstream because,
// on Linux, it's reading from a (non-mmap-able)
// character special device.
FILE* f = NULL;
const char* what = search_string;
if (NULL == (f = fopen(file_name, "r")))
return false;
int k;
while (EOF != (k = fgetc(f))) {
if (k == *what) {
++what;
while ((*what != '\0') && (*what == fgetc(f))) {
++what;
}
if (*what == '\0') {
fclose(f);
return true;
} else {
what = search_string;
}
}
}
fclose(f);
// Did not find string in the proc file.
return false;
}
bool OS::ArmCpuHasFeature(CpuFeature feature) {
const char* search_string = NULL;
// Simple detection of VFP at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to ARM (mid 2009), no similar
// facility is universally available on the ARM architectures,
// so it's up to individual OSes to provide such.
switch (feature) {
case VFP3:
search_string = "vfpv3";
break;
case ARMv7:
search_string = "ARMv7";
break;
default:
UNREACHABLE();
}
if (CPUInfoContainsString(search_string)) {
return true;
}
if (feature == VFP3) {
// Some old kernels will report vfp not vfpv3. Here we make a last attempt
// to detect vfpv3 by checking for vfp *and* neon, since neon is only
// available on architectures with vfpv3.
// Checking neon on its own is not enough as it is possible to have neon
// without vfp.
if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
return true;
}
}
return false;
}
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
// the Floating Point ABI used (PCS stands for Procedure Call Standard).
// We use these as well as a couple of other defines to statically determine
// what FP ABI used.
// GCC versions 4.4 and below don't support hard-fp.
// GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
// __ARM_PCS_VFP.
#define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
#if GCC_VERSION >= 40600
#if defined(__ARM_PCS_VFP)
return true;
#else
return false;
#endif
#elif GCC_VERSION < 40500
return false;
#else
#if defined(__ARM_PCS_VFP)
return true;
#elif defined(__ARM_PCS) || defined(__SOFTFP) || !defined(__VFP_FP__)
return false;
#else
#error "Your version of GCC does not report the FP ABI compiled for." \
"Please report it on this issue" \
"http://code.google.com/p/v8/issues/detail?id=2140"
#endif
#endif
#undef GCC_VERSION
}
#endif // def __arm__
#ifdef __mips__
bool OS::MipsCpuHasFeature(CpuFeature feature) {
const char* search_string = NULL;
const char* file_name = "/proc/cpuinfo";
// Simple detection of FPU at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to MIPS (early 2010), no similar
// facility is universally available on the MIPS architectures,
// so it's up to individual OSes to provide such.
//
// This is written as a straight shot one pass parser
// and not using STL string and ifstream because,
// on Linux, it's reading from a (non-mmap-able)
// character special device.
switch (feature) {
case FPU:
search_string = "FPU";
break;
default:
UNREACHABLE();
}
FILE* f = NULL;
const char* what = search_string;
if (NULL == (f = fopen(file_name, "r")))
return false;
int k;
while (EOF != (k = fgetc(f))) {
if (k == *what) {
++what;
while ((*what != '\0') && (*what == fgetc(f))) {
++what;
}
if (*what == '\0') {
fclose(f);
return true;
} else {
what = search_string;
}
}
}
fclose(f);
// Did not find string in the proc file.
return false;
}
#endif // def __mips__
int OS::ActivationFrameAlignment() {
#ifdef V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
// runtime system.
return 8;
#elif V8_TARGET_ARCH_MIPS
return 8;
#endif
// With gcc 4.4 the tree vectorization optimizer can generate code
// that requires 16 byte alignment such as movdqa on x86.
return 16;
}
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
(defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
// Only use on ARM or MIPS hardware.
MemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");
// An x86 store acts as a release barrier.
#endif
*ptr = value;
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
double OS::LocalTimeOffset() {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
ScopedLock lock(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
size_t OS::AllocateAlignment() {
return sysconf(_SC_PAGESIZE);
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(i::Isolate::Current(),
StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(address, size);
USE(result);
ASSERT(result == 0);
}
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
}
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
if (FLAG_break_on_abort) {
DebugBreak();
}
abort();
}
void OS::DebugBreak() {
// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
// which is the architecture of generated code).
#if (defined(__arm__) || defined(__thumb__))
# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
asm("bkpt 0");
# endif
#elif defined(__mips__)
asm("break");
#else
asm("int $3");
#endif
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
void OS::LogSharedLibraryAddresses() {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
if (fp == NULL) return;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
char attr_r, attr_w, attr_x, attr_p;
// Parse the addresses and permission bits at the beginning of the line.
if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
int c;
if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
// Found a read-only executable entry. Skip characters until we reach
// the beginning of the filename or the end of the line.
do {
c = getc(fp);
} while ((c != EOF) && (c != '\n') && (c != '/'));
if (c == EOF) break; // EOF: Was unexpected, just exit.
// Process the filename if found.
if (c == '/') {
ungetc(c, fp); // Push the '/' back into the stream to be read below.
// Read to the end of the line. Exit if the read fails.
if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
// Drop the newline character read by fgets. We do not need to check
// for a zero-length string because we know that we at least read the
// '/' character.
lib_name[strlen(lib_name) - 1] = '\0';
} else {
// No library name found, just record the raw address range.
snprintf(lib_name, kLibNameLen,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
}
LOG(isolate, SharedLibraryEvent(lib_name, start, end));
} else {
// Entry not describing executable data. Skip to end of line to set up
// reading the next entry.
do {
c = getc(fp);
} while ((c != EOF) && (c != '\n'));
if (c == EOF) break;
}
}
free(lib_name);
fclose(fp);
}
static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
// PROT_EXEC so that analysis tools can properly attribute ticks. We
// do a mmap with a name known by ll_prof.py and immediately munmap
// it. This injects a GC marker into the stream of events generated
// by the kernel and allows us to synchronize V8 code log and the
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(kGCFakeMmap, "w+");
void* addr = mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_EXEC,
MAP_PRIVATE,
fileno(f),
0);
ASSERT(addr != MAP_FAILED);
OS::Free(addr, size);
fclose(f);
}
int OS::StackWalk(Vector<OS::StackFrame> frames) {
// backtrace is a glibc extension.
#if defined(__GLIBC__) && !defined(__UCLIBC__)
int frames_size = frames.length();
ScopedVector<void*> addresses(frames_size);
int frames_count = backtrace(addresses.start(), frames_size);
char** symbols = backtrace_symbols(addresses.start(), frames_count);
if (symbols == NULL) {
return kStackWalkError;
}
for (int i = 0; i < frames_count; i++) {
frames[i].address = addresses[i];
// Format a text representation of the frame based on the information
// available.
SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
"%s",
symbols[i]);
// Make sure line termination is in place.
frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
}
free(symbols);
return frames_count;
#else // defined(__GLIBC__) && !defined(__UCLIBC__)
return 0;
#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size) {
address_ = ReserveRegion(size);
size_ = size;
}
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
ASSERT_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
ASSERT(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
}
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(base,
size,
prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false;
}
UpdateAllocatedSpaceLimits(base, size);
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
}
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
pthread_t thread_; // Thread handle for pthread.
};
Thread::Thread(const Options& options)
: data_(new PlatformData()),
stack_size_(options.stack_size()) {
set_name(options.name());
}
Thread::~Thread() {
delete data_;
}
static void* ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
#ifdef PR_SET_NAME
prctl(PR_SET_NAME,
reinterpret_cast<unsigned long>(thread->name()), // NOLINT
0, 0, 0);
#endif
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
thread->Run();
return NULL;
}
void Thread::set_name(const char* name) {
strncpy(name_, name, sizeof(name_));
name_[sizeof(name_) - 1] = '\0';
}
void Thread::Start() {
pthread_attr_t* attr_ptr = NULL;
pthread_attr_t attr;
if (stack_size_ > 0) {
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
attr_ptr = &attr;
}
int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
CHECK_EQ(0, result);
ASSERT(data_->thread_ != kNoThread);
}
void Thread::Join() {
pthread_join(data_->thread_, NULL);
}
Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
pthread_key_t key;
int result = pthread_key_create(&key, NULL);
USE(result);
ASSERT(result == 0);
return static_cast<LocalStorageKey>(key);
}
void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
int result = pthread_key_delete(pthread_key);
USE(result);
ASSERT(result == 0);
}
void* Thread::GetThreadLocal(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
return pthread_getspecific(pthread_key);
}
void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
pthread_setspecific(pthread_key, value);
}
void Thread::YieldCPU() {
sched_yield();
}
class LinuxMutex : public Mutex {
public:
LinuxMutex() {
pthread_mutexattr_t attrs;
int result = pthread_mutexattr_init(&attrs);
ASSERT(result == 0);
result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
ASSERT(result == 0);
result = pthread_mutex_init(&mutex_, &attrs);
ASSERT(result == 0);
USE(result);
}
virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
virtual int Lock() {
int result = pthread_mutex_lock(&mutex_);
return result;
}
virtual int Unlock() {
int result = pthread_mutex_unlock(&mutex_);
return result;
}
virtual bool TryLock() {
int result = pthread_mutex_trylock(&mutex_);
// Return false if the lock is busy and locking failed.
if (result == EBUSY) {
return false;
}
ASSERT(result == 0); // Verify no other errors.
return true;
}
private:
pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
};
Mutex* OS::CreateMutex() {
return new LinuxMutex();
}
class LinuxSemaphore : public Semaphore {
public:
explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
virtual void Wait();
virtual bool Wait(int timeout);
virtual void Signal() { sem_post(&sem_); }
private:
sem_t sem_;
};
void LinuxSemaphore::Wait() {
while (true) {
int result = sem_wait(&sem_);
if (result == 0) return; // Successfully got semaphore.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
}
}
#ifndef TIMEVAL_TO_TIMESPEC
#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
(ts)->tv_sec = (tv)->tv_sec; \
(ts)->tv_nsec = (tv)->tv_usec * 1000; \
} while (false)
#endif
bool LinuxSemaphore::Wait(int timeout) {
const long kOneSecondMicros = 1000000; // NOLINT
// Split timeout into second and nanosecond parts.
struct timeval delta;
delta.tv_usec = timeout % kOneSecondMicros;
delta.tv_sec = timeout / kOneSecondMicros;
struct timeval current_time;
// Get the current time.
if (gettimeofday(¤t_time, NULL) == -1) {
return false;
}
// Calculate time for end of timeout.
struct timeval end_time;
timeradd(¤t_time, &delta, &end_time);
struct timespec ts;
TIMEVAL_TO_TIMESPEC(&end_time, &ts);
// Wait for semaphore signalled or timeout.
while (true) {
int result = sem_timedwait(&sem_, &ts);
if (result == 0) return true; // Successfully got semaphore.
if (result > 0) {
// For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
errno = result;
result = -1;
}
if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
}
}
Semaphore* OS::CreateSemaphore(int count) {
return new LinuxSemaphore(count);
}
#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
// Android runs a fairly new Linux kernel, so signal info is there,
// but the C library doesn't have the structs defined.
struct sigcontext {
uint32_t trap_no;
uint32_t error_code;
uint32_t oldmask;
uint32_t gregs[16];
uint32_t arm_cpsr;
uint32_t fault_address;
};
typedef uint32_t __sigset_t;
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
__sigset_t uc_sigmask;
} ucontext_t;
enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
#elif !defined(__GLIBC__) && defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
struct sigcontext {
uint32_t regmask;
uint32_t status;
uint64_t pc;
uint64_t gregs[32];
uint64_t fpregs[32];
uint32_t acx;
uint32_t fpc_csr;
uint32_t fpc_eir;
uint32_t used_math;
uint32_t dsp;
uint64_t mdhi;
uint64_t mdlo;
uint32_t hi1;
uint32_t lo1;
uint32_t hi2;
uint32_t lo2;
uint32_t hi3;
uint32_t lo3;
};
typedef uint32_t __sigset_t;
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
__sigset_t uc_sigmask;
} ucontext_t;
#elif !defined(__GLIBC__) && defined(__i386__)
// x86 version for Android.
struct sigcontext {
uint32_t gregs[19];
void* fpregs;
uint32_t oldmask;
uint32_t cr2;
};
typedef uint32_t __sigset_t;
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
__sigset_t uc_sigmask;
} ucontext_t;
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
#endif
static int GetThreadID() {
// Glibc doesn't provide a wrapper for gettid(2).
#if defined(ANDROID)
return syscall(__NR_gettid);
#else
return syscall(SYS_gettid);
#endif
}
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
return;
}
if (v8::Locker::IsActive() &&
!isolate->thread_manager()->IsLockedByCurrentThread()) {
return;
}
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
sample->state = isolate->current_vm_state();
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3) && \
!defined(__UCLIBC__))
sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
#else
sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3) &&
// !defined(__UCLIBC__))
#elif V8_HOST_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(mcontext.pc);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif // V8_HOST_ARCH_*
sampler->SampleStack(sample);
sampler->Tick(sample);
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData() : vm_tid_(GetThreadID()) {}
int vm_tid() const { return vm_tid_; }
private:
const int vm_tid_;
};
class SignalSender : public Thread {
public:
enum SleepInterval {
HALF_INTERVAL,
FULL_INTERVAL
};
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
vm_tgid_(getpid()),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
static void RestoreSignalHandler() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
instance_ = new SignalSender(sampler->interval());
instance_->Start();
} else {
ASSERT(instance_->interval_ == sampler->interval());
}
}
static void RemoveActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
RestoreSignalHandler();
}
}
// Implement Thread::Run().
virtual void Run() {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
bool cpu_profiling_enabled =
(state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
if (cpu_profiling_enabled && !signal_handler_installed_) {
InstallSignalHandler();
} else if (!cpu_profiling_enabled && signal_handler_installed_) {
RestoreSignalHandler();
}
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
if (cpu_profiling_enabled && runtime_profiler_enabled) {
if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
return;
}
Sleep(HALF_INTERVAL);
if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
return;
}
Sleep(HALF_INTERVAL);
} else {
if (cpu_profiling_enabled) {
if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
this)) {
return;
}
}
if (runtime_profiler_enabled) {
if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
NULL)) {
return;
}
}
Sleep(FULL_INTERVAL);
}
}
}
static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
if (!sampler->IsProfiling()) return;
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
if (!sampler->isolate()->IsInitialized()) return;
sampler->isolate()->runtime_profiler()->NotifyTick();
}
void SendProfilingSignal(int tid) {
if (!signal_handler_installed_) return;
// Glibc doesn't provide a wrapper for tgkill(2).
#if defined(ANDROID)
syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
#else
syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
#endif
}
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
#if defined(ANDROID)
usleep(interval);
#else
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
fprintf(stderr,
"SignalSender usleep error; interval = %u, errno = %d\n",
interval,
errno);
ASSERT(result == 0 || errno == EINTR);
}
#endif // DEBUG
USE(result);
#endif // ANDROID
}
const int vm_tgid_;
const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
#ifdef __arm__
// When running on ARM hardware check that the EABI used by V8 and
// by the C code is the same.
bool hard_float = OS::ArmUsingHardFloat();
if (hard_float) {
#if !USE_EABI_HARDFLOAT
PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
"-DUSE_EABI_HARDFLOAT\n");
exit(1);
#endif
} else {
#if USE_EABI_HARDFLOAT
PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
"-DUSE_EABI_HARDFLOAT\n");
exit(1);
#endif
}
#endif
SignalSender::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
delete limit_mutex;
}
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
Sampler::~Sampler() {
ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
ASSERT(IsActive());
SignalSender::RemoveActiveSampler(this);
SetActive(false);
}
} } // namespace v8::internal