This source file includes following definitions.
- runtime·schedinit
- runtime·main
- runtime·goroutineheader
- runtime·tracebackothers
- checkmcount
- mcommoninit
- runtime·ready
- runtime·gcprocs
- needaddgcproc
- runtime·helpgc
- runtime·freezetheworld
- runtime·stoptheworld
- mhelpgc
- runtime·starttheworld
- runtime·mstart
- runtime·allocm
- allocg
- runtime·needm
- runtime·newextram
- runtime·dropm
- lockextra
- unlockextra
- newm
- stopm
- mspinning
- startm
- handoffp
- wakep
- stoplockedm
- startlockedm
- gcstopm
- execute
- findrunnable
- resetspinning
- injectglist
- schedule
- runtime·park
- parkunlock
- runtime·parkunlock
- park0
- runtime·gosched
- runtime·gosched0
- runtime·goexit
- goexit0
- save
- ·entersyscall
- ·entersyscallblock
- runtime·exitsyscall
- exitsyscallfast
- exitsyscall0
- syscall·runtime_BeforeFork
- syscall·runtime_AfterFork
- mstackalloc
- runtime·malg
- runtime·newproc
- runtime·newproc1
- allgadd
- gfput
- gfget
- gfpurge
- runtime·Breakpoint
- runtime·Gosched
- runtime·gomaxprocsfunc
- lockOSThread
- runtime·LockOSThread
- runtime·lockOSThread
- unlockOSThread
- runtime·UnlockOSThread
- runtime·unlockOSThread
- runtime·lockedOSThread
- runtime·gcount
- runtime·mcount
- runtime·badmcall
- runtime·badmcall2
- runtime·badreflectcall
- System
- ExternalCode
- GC
- runtime·sigprof
- runtime·setcpuprofilerate
- procresize
- acquirep
- releasep
- incidlelocked
- checkdead
- sysmon
- retake
- preemptall
- preemptone
- runtime·schedtrace
- mput
- mget
- globrunqput
- globrunqputbatch
- globrunqget
- pidleput
- pidleget
- runqput
- runqputslow
- runqget
- runqgrab
- runqsteal
- runtime·testSchedLocalQueue
- runtime·testSchedLocalQueueSteal
- runtime·topofstack
- runtime·setmaxthreads
- haveexperiment
#include "runtime.h"
#include "arch_GOARCH.h"
#include "zaexperiment.h"
#include "malloc.h"
#include "stack.h"
#include "race.h"
#include "type.h"
#include "../../cmd/ld/textflag.h"
typedef struct Sched Sched;
struct Sched {
Lock;
uint64 goidgen;
M* midle;
int32 nmidle;
int32 nmidlelocked;
int32 mcount;
int32 maxmcount;
P* pidle;
uint32 npidle;
uint32 nmspinning;
G* runqhead;
G* runqtail;
int32 runqsize;
Lock gflock;
G* gfree;
uint32 gcwaiting;
int32 stopwait;
Note stopnote;
uint32 sysmonwait;
Note sysmonnote;
uint64 lastpoll;
int32 profilehz;
};
enum
{
MaxGomaxprocs = 1<<8,
GoidCacheBatch = 16,
};
Sched runtime·sched;
int32 runtime·gomaxprocs;
uint32 runtime·needextram;
bool runtime·iscgo;
M runtime·m0;
G runtime·g0;
G* runtime·lastg;
M* runtime·allm;
M* runtime·extram;
int8* runtime·goos;
int32 runtime·ncpu;
static int32 newprocs;
static Lock allglock;
G** runtime·allg;
uintptr runtime·allglen;
static uintptr allgcap;
void runtime·mstart(void);
static void runqput(P*, G*);
static G* runqget(P*);
static bool runqputslow(P*, G*, uint32, uint32);
static G* runqsteal(P*, P*);
static void mput(M*);
static M* mget(void);
static void mcommoninit(M*);
static void schedule(void);
static void procresize(int32);
static void acquirep(P*);
static P* releasep(void);
static void newm(void(*)(void), P*);
static void stopm(void);
static void startm(P*, bool);
static void handoffp(P*);
static void wakep(void);
static void stoplockedm(void);
static void startlockedm(G*);
static void sysmon(void);
static uint32 retake(int64);
static void incidlelocked(int32);
static void checkdead(void);
static void exitsyscall0(G*);
static void park0(G*);
static void goexit0(G*);
static void gfput(P*, G*);
static G* gfget(P*);
static void gfpurge(P*);
static void globrunqput(G*);
static void globrunqputbatch(G*, G*, int32);
static G* globrunqget(P*, int32);
static P* pidleget(void);
static void pidleput(P*);
static void injectglist(G*);
static bool preemptall(void);
static bool preemptone(P*);
static bool exitsyscallfast(void);
static bool haveexperiment(int8*);
static void allgadd(G*);
void
runtime·schedinit(void)
{
int32 n, procs;
byte *p;
Eface i;
runtime·sched.maxmcount = 10000;
runtime·precisestack = true;
runtime·symtabinit();
runtime·mallocinit();
mcommoninit(m);
runtime·newErrorCString(0, &i);
runtime·gotraceback(nil);
runtime·goargs();
runtime·goenvs();
runtime·parsedebugvars();
runtime·sched.lastpoll = runtime·nanotime();
procs = 1;
p = runtime·getenv("GOMAXPROCS");
if(p != nil && (n = runtime·atoi(p)) > 0) {
if(n > MaxGomaxprocs)
n = MaxGomaxprocs;
procs = n;
}
runtime·allp = runtime·malloc((MaxGomaxprocs+1)*sizeof(runtime·allp[0]));
procresize(procs);
runtime·copystack = runtime·precisestack;
p = runtime·getenv("GOCOPYSTACK");
if(p != nil && !runtime·strcmp(p, (byte*)"0"))
runtime·copystack = false;
mstats.enablegc = 1;
if(raceenabled)
g->racectx = runtime·raceinit();
}
extern void main·init(void);
extern void main·main(void);
static FuncVal scavenger = {runtime·MHeap_Scavenger};
static FuncVal initDone = { runtime·unlockOSThread };
void
runtime·main(void)
{
Defer d;
if(sizeof(void*) == 8)
runtime·maxstacksize = 1000000000;
else
runtime·maxstacksize = 250000000;
newm(sysmon, nil);
runtime·lockOSThread();
d.fn = &initDone;
d.siz = 0;
d.link = g->defer;
d.argp = NoArgs;
d.special = true;
g->defer = &d;
if(m != &runtime·m0)
runtime·throw("runtime·main not on m0");
runtime·newproc1(&scavenger, nil, 0, 0, runtime·main);
main·init();
if(g->defer != &d || d.fn != &initDone)
runtime·throw("runtime: bad defer entry after init");
g->defer = d.link;
runtime·unlockOSThread();
main·main();
if(raceenabled)
runtime·racefini();
if(runtime·panicking)
runtime·park(nil, nil, "panicwait");
runtime·exit(0);
for(;;)
*(int32*)runtime·main = 0;
}
void
runtime·goroutineheader(G *gp)
{
int8 *status;
int64 waitfor;
switch(gp->status) {
case Gidle:
status = "idle";
break;
case Grunnable:
status = "runnable";
break;
case Grunning:
status = "running";
break;
case Gsyscall:
status = "syscall";
break;
case Gwaiting:
if(gp->waitreason)
status = gp->waitreason;
else
status = "waiting";
break;
default:
status = "???";
break;
}
waitfor = 0;
if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince != 0)
waitfor = (runtime·nanotime() - gp->waitsince) / (60LL*1000*1000*1000);
if(waitfor < 1)
runtime·printf("goroutine %D [%s]:\n", gp->goid, status);
else
runtime·printf("goroutine %D [%s, %D minutes]:\n", gp->goid, status, waitfor);
}
void
runtime·tracebackothers(G *me)
{
G *gp;
int32 traceback;
uintptr i;
traceback = runtime·gotraceback(nil);
if((gp = m->curg) != nil && gp != me) {
runtime·printf("\n");
runtime·goroutineheader(gp);
runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
}
runtime·lock(&allglock);
for(i = 0; i < runtime·allglen; i++) {
gp = runtime·allg[i];
if(gp == me || gp == m->curg || gp->status == Gdead)
continue;
if(gp->issystem && traceback < 2)
continue;
runtime·printf("\n");
runtime·goroutineheader(gp);
if(gp->status == Grunning) {
runtime·printf("\tgoroutine running on other thread; stack unavailable\n");
runtime·printcreatedby(gp);
} else
runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
}
runtime·unlock(&allglock);
}
static void
checkmcount(void)
{
if(runtime·sched.mcount > runtime·sched.maxmcount) {
runtime·printf("runtime: program exceeds %d-thread limit\n", runtime·sched.maxmcount);
runtime·throw("thread exhaustion");
}
}
static void
mcommoninit(M *mp)
{
if(m->mcache)
runtime·callers(1, mp->createstack, nelem(mp->createstack));
mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks();
runtime·lock(&runtime·sched);
mp->id = runtime·sched.mcount++;
checkmcount();
runtime·mpreinit(mp);
mp->alllink = runtime·allm;
runtime·atomicstorep(&runtime·allm, mp);
runtime·unlock(&runtime·sched);
}
void
runtime·ready(G *gp)
{
m->locks++;
if(gp->status != Gwaiting) {
runtime·printf("goroutine %D has status %d\n", gp->goid, gp->status);
runtime·throw("bad g->status in ready");
}
gp->status = Grunnable;
runqput(m->p, gp);
if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0)
wakep();
m->locks--;
if(m->locks == 0 && g->preempt)
g->stackguard0 = StackPreempt;
}
int32
runtime·gcprocs(void)
{
int32 n;
runtime·lock(&runtime·sched);
n = runtime·gomaxprocs;
if(n > runtime·ncpu)
n = runtime·ncpu;
if(n > MaxGcproc)
n = MaxGcproc;
if(n > runtime·sched.nmidle+1)
n = runtime·sched.nmidle+1;
runtime·unlock(&runtime·sched);
return n;
}
static bool
needaddgcproc(void)
{
int32 n;
runtime·lock(&runtime·sched);
n = runtime·gomaxprocs;
if(n > runtime·ncpu)
n = runtime·ncpu;
if(n > MaxGcproc)
n = MaxGcproc;
n -= runtime·sched.nmidle+1;
runtime·unlock(&runtime·sched);
return n > 0;
}
void
runtime·helpgc(int32 nproc)
{
M *mp;
int32 n, pos;
runtime·lock(&runtime·sched);
pos = 0;
for(n = 1; n < nproc; n++) {
if(runtime·allp[pos]->mcache == m->mcache)
pos++;
mp = mget();
if(mp == nil)
runtime·throw("runtime·gcprocs inconsistency");
mp->helpgc = n;
mp->mcache = runtime·allp[pos]->mcache;
pos++;
runtime·notewakeup(&mp->park);
}
runtime·unlock(&runtime·sched);
}
void
runtime·freezetheworld(void)
{
int32 i;
if(runtime·gomaxprocs == 1)
return;
for(i = 0; i < 5; i++) {
runtime·sched.stopwait = 0x7fffffff;
runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
if(!preemptall())
break;
runtime·usleep(1000);
}
runtime·usleep(1000);
preemptall();
runtime·usleep(1000);
}
void
runtime·stoptheworld(void)
{
int32 i;
uint32 s;
P *p;
bool wait;
runtime·lock(&runtime·sched);
runtime·sched.stopwait = runtime·gomaxprocs;
runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
preemptall();
m->p->status = Pgcstop;
runtime·sched.stopwait--;
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
s = p->status;
if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
runtime·sched.stopwait--;
}
while(p = pidleget()) {
p->status = Pgcstop;
runtime·sched.stopwait--;
}
wait = runtime·sched.stopwait > 0;
runtime·unlock(&runtime·sched);
if(wait) {
for(;;) {
if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) {
runtime·noteclear(&runtime·sched.stopnote);
break;
}
preemptall();
}
}
if(runtime·sched.stopwait)
runtime·throw("stoptheworld: not stopped");
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p->status != Pgcstop)
runtime·throw("stoptheworld: not stopped");
}
}
static void
mhelpgc(void)
{
m->helpgc = -1;
}
void
runtime·starttheworld(void)
{
P *p, *p1;
M *mp;
G *gp;
bool add;
m->locks++;
gp = runtime·netpoll(false);
injectglist(gp);
add = needaddgcproc();
runtime·lock(&runtime·sched);
if(newprocs) {
procresize(newprocs);
newprocs = 0;
} else
procresize(runtime·gomaxprocs);
runtime·sched.gcwaiting = 0;
p1 = nil;
while(p = pidleget()) {
if(p->runqhead == p->runqtail) {
pidleput(p);
break;
}
p->m = mget();
p->link = p1;
p1 = p;
}
if(runtime·sched.sysmonwait) {
runtime·sched.sysmonwait = false;
runtime·notewakeup(&runtime·sched.sysmonnote);
}
runtime·unlock(&runtime·sched);
while(p1) {
p = p1;
p1 = p1->link;
if(p->m) {
mp = p->m;
p->m = nil;
if(mp->nextp)
runtime·throw("starttheworld: inconsistent mp->nextp");
mp->nextp = p;
runtime·notewakeup(&mp->park);
} else {
newm(nil, p);
add = false;
}
}
if(add) {
newm(mhelpgc, nil);
}
m->locks--;
if(m->locks == 0 && g->preempt)
g->stackguard0 = StackPreempt;
}
void
runtime·mstart(void)
{
if(g != m->g0)
runtime·throw("bad runtime·mstart");
runtime·gosave(&m->g0->sched);
m->g0->sched.pc = (uintptr)-1;
m->g0->stackguard = m->g0->stackguard0;
runtime·asminit();
runtime·minit();
if(m == &runtime·m0)
runtime·initsig();
if(m->mstartfn)
m->mstartfn();
if(m->helpgc) {
m->helpgc = 0;
stopm();
} else if(m != &runtime·m0) {
acquirep(m->nextp);
m->nextp = nil;
}
schedule();
}
void (*_cgo_thread_start)(void*);
typedef struct CgoThreadStart CgoThreadStart;
struct CgoThreadStart
{
M *m;
G *g;
uintptr *tls;
void (*fn)(void);
};
M*
runtime·allocm(P *p)
{
M *mp;
static Type *mtype;
m->locks++;
if(m->p == nil)
acquirep(p);
if(mtype == nil) {
Eface e;
runtime·gc_m_ptr(&e);
mtype = ((PtrType*)e.type)->elem;
}
mp = runtime·cnew(mtype);
mcommoninit(mp);
if(runtime·iscgo || Solaris || Windows)
mp->g0 = runtime·malg(-1);
else
mp->g0 = runtime·malg(8192);
if(p == m->p)
releasep();
m->locks--;
if(m->locks == 0 && g->preempt)
g->stackguard0 = StackPreempt;
return mp;
}
static G*
allocg(void)
{
G *gp;
static Type *gtype;
if(gtype == nil) {
Eface e;
runtime·gc_g_ptr(&e);
gtype = ((PtrType*)e.type)->elem;
}
gp = runtime·cnew(gtype);
return gp;
}
static M* lockextra(bool nilokay);
static void unlockextra(M*);
#pragma textflag NOSPLIT
void
runtime·needm(byte x)
{
M *mp;
if(runtime·needextram) {
runtime·write(2, "fatal error: cgo callback before cgo call\n",
sizeof("fatal error: cgo callback before cgo call\n")-1);
runtime·exit(1);
}
mp = lockextra(false);
mp->needextram = mp->schedlink == nil;
unlockextra(mp->schedlink);
runtime·setmg(mp, mp->g0);
g->stackbase = (uintptr)(&x + 1024);
g->stackguard = (uintptr)(&x - 32*1024);
g->stackguard0 = g->stackguard;
runtime·asminit();
runtime·minit();
}
void
runtime·newextram(void)
{
M *mp, *mnext;
G *gp;
mp = runtime·allocm(nil);
gp = runtime·malg(4096);
gp->sched.pc = (uintptr)runtime·goexit;
gp->sched.sp = gp->stackbase;
gp->sched.lr = 0;
gp->sched.g = gp;
gp->syscallpc = gp->sched.pc;
gp->syscallsp = gp->sched.sp;
gp->syscallstack = gp->stackbase;
gp->syscallguard = gp->stackguard;
gp->status = Gsyscall;
mp->curg = gp;
mp->locked = LockInternal;
mp->lockedg = gp;
gp->lockedm = mp;
gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
if(raceenabled)
gp->racectx = runtime·racegostart(runtime·newextram);
allgadd(gp);
mnext = lockextra(true);
mp->schedlink = mnext;
unlockextra(mp);
}
void
runtime·dropm(void)
{
M *mp, *mnext;
runtime·unminit();
mp = m;
runtime·setmg(nil, nil);
mnext = lockextra(true);
mp->schedlink = mnext;
unlockextra(mp);
}
#define MLOCKED ((M*)1)
#pragma textflag NOSPLIT
static M*
lockextra(bool nilokay)
{
M *mp;
void (*yield)(void);
for(;;) {
mp = runtime·atomicloadp(&runtime·extram);
if(mp == MLOCKED) {
yield = runtime·osyield;
yield();
continue;
}
if(mp == nil && !nilokay) {
runtime·usleep(1);
continue;
}
if(!runtime·casp(&runtime·extram, mp, MLOCKED)) {
yield = runtime·osyield;
yield();
continue;
}
break;
}
return mp;
}
#pragma textflag NOSPLIT
static void
unlockextra(M *mp)
{
runtime·atomicstorep(&runtime·extram, mp);
}
static void
newm(void(*fn)(void), P *p)
{
M *mp;
mp = runtime·allocm(p);
mp->nextp = p;
mp->mstartfn = fn;
if(runtime·iscgo) {
CgoThreadStart ts;
if(_cgo_thread_start == nil)
runtime·throw("_cgo_thread_start missing");
ts.m = mp;
ts.g = mp->g0;
ts.tls = mp->tls;
ts.fn = runtime·mstart;
runtime·asmcgocall(_cgo_thread_start, &ts);
return;
}
runtime·newosproc(mp, (byte*)mp->g0->stackbase);
}
static void
stopm(void)
{
if(m->locks)
runtime·throw("stopm holding locks");
if(m->p)
runtime·throw("stopm holding p");
if(m->spinning) {
m->spinning = false;
runtime·xadd(&runtime·sched.nmspinning, -1);
}
retry:
runtime·lock(&runtime·sched);
mput(m);
runtime·unlock(&runtime·sched);
runtime·notesleep(&m->park);
runtime·noteclear(&m->park);
if(m->helpgc) {
runtime·gchelper();
m->helpgc = 0;
m->mcache = nil;
goto retry;
}
acquirep(m->nextp);
m->nextp = nil;
}
static void
mspinning(void)
{
m->spinning = true;
}
static void
startm(P *p, bool spinning)
{
M *mp;
void (*fn)(void);
runtime·lock(&runtime·sched);
if(p == nil) {
p = pidleget();
if(p == nil) {
runtime·unlock(&runtime·sched);
if(spinning)
runtime·xadd(&runtime·sched.nmspinning, -1);
return;
}
}
mp = mget();
runtime·unlock(&runtime·sched);
if(mp == nil) {
fn = nil;
if(spinning)
fn = mspinning;
newm(fn, p);
return;
}
if(mp->spinning)
runtime·throw("startm: m is spinning");
if(mp->nextp)
runtime·throw("startm: m has p");
mp->spinning = spinning;
mp->nextp = p;
runtime·notewakeup(&mp->park);
}
static void
handoffp(P *p)
{
if(p->runqhead != p->runqtail || runtime·sched.runqsize) {
startm(p, false);
return;
}
if(runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) == 0 &&
runtime·cas(&runtime·sched.nmspinning, 0, 1)) {
startm(p, true);
return;
}
runtime·lock(&runtime·sched);
if(runtime·sched.gcwaiting) {
p->status = Pgcstop;
if(--runtime·sched.stopwait == 0)
runtime·notewakeup(&runtime·sched.stopnote);
runtime·unlock(&runtime·sched);
return;
}
if(runtime·sched.runqsize) {
runtime·unlock(&runtime·sched);
startm(p, false);
return;
}
if(runtime·sched.npidle == runtime·gomaxprocs-1 && runtime·atomicload64(&runtime·sched.lastpoll) != 0) {
runtime·unlock(&runtime·sched);
startm(p, false);
return;
}
pidleput(p);
runtime·unlock(&runtime·sched);
}
static void
wakep(void)
{
if(!runtime·cas(&runtime·sched.nmspinning, 0, 1))
return;
startm(nil, true);
}
static void
stoplockedm(void)
{
P *p;
if(m->lockedg == nil || m->lockedg->lockedm != m)
runtime·throw("stoplockedm: inconsistent locking");
if(m->p) {
p = releasep();
handoffp(p);
}
incidlelocked(1);
runtime·notesleep(&m->park);
runtime·noteclear(&m->park);
if(m->lockedg->status != Grunnable)
runtime·throw("stoplockedm: not runnable");
acquirep(m->nextp);
m->nextp = nil;
}
static void
startlockedm(G *gp)
{
M *mp;
P *p;
mp = gp->lockedm;
if(mp == m)
runtime·throw("startlockedm: locked to me");
if(mp->nextp)
runtime·throw("startlockedm: m has p");
incidlelocked(-1);
p = releasep();
mp->nextp = p;
runtime·notewakeup(&mp->park);
stopm();
}
static void
gcstopm(void)
{
P *p;
if(!runtime·sched.gcwaiting)
runtime·throw("gcstopm: not waiting for gc");
if(m->spinning) {
m->spinning = false;
runtime·xadd(&runtime·sched.nmspinning, -1);
}
p = releasep();
runtime·lock(&runtime·sched);
p->status = Pgcstop;
if(--runtime·sched.stopwait == 0)
runtime·notewakeup(&runtime·sched.stopnote);
runtime·unlock(&runtime·sched);
stopm();
}
static void
execute(G *gp)
{
int32 hz;
if(gp->status != Grunnable) {
runtime·printf("execute: bad g status %d\n", gp->status);
runtime·throw("execute: bad g status");
}
gp->status = Grunning;
gp->waitsince = 0;
gp->preempt = false;
gp->stackguard0 = gp->stackguard;
m->p->schedtick++;
m->curg = gp;
gp->m = m;
hz = runtime·sched.profilehz;
if(m->profilehz != hz)
runtime·resetcpuprofiler(hz);
runtime·gogo(&gp->sched);
}
static G*
findrunnable(void)
{
G *gp;
P *p;
int32 i;
top:
if(runtime·sched.gcwaiting) {
gcstopm();
goto top;
}
if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
runtime·ready(gp);
gp = runqget(m->p);
if(gp)
return gp;
if(runtime·sched.runqsize) {
runtime·lock(&runtime·sched);
gp = globrunqget(m->p, 0);
runtime·unlock(&runtime·sched);
if(gp)
return gp;
}
gp = runtime·netpoll(false);
if(gp) {
injectglist(gp->schedlink);
gp->status = Grunnable;
return gp;
}
if(!m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle))
goto stop;
if(!m->spinning) {
m->spinning = true;
runtime·xadd(&runtime·sched.nmspinning, 1);
}
for(i = 0; i < 2*runtime·gomaxprocs; i++) {
if(runtime·sched.gcwaiting)
goto top;
p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs];
if(p == m->p)
gp = runqget(p);
else
gp = runqsteal(m->p, p);
if(gp)
return gp;
}
stop:
runtime·lock(&runtime·sched);
if(runtime·sched.gcwaiting) {
runtime·unlock(&runtime·sched);
goto top;
}
if(runtime·sched.runqsize) {
gp = globrunqget(m->p, 0);
runtime·unlock(&runtime·sched);
return gp;
}
p = releasep();
pidleput(p);
runtime·unlock(&runtime·sched);
if(m->spinning) {
m->spinning = false;
runtime·xadd(&runtime·sched.nmspinning, -1);
}
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p && p->runqhead != p->runqtail) {
runtime·lock(&runtime·sched);
p = pidleget();
runtime·unlock(&runtime·sched);
if(p) {
acquirep(p);
goto top;
}
break;
}
}
if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) {
if(m->p)
runtime·throw("findrunnable: netpoll with p");
if(m->spinning)
runtime·throw("findrunnable: netpoll with spinning");
gp = runtime·netpoll(true);
runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime());
if(gp) {
runtime·lock(&runtime·sched);
p = pidleget();
runtime·unlock(&runtime·sched);
if(p) {
acquirep(p);
injectglist(gp->schedlink);
gp->status = Grunnable;
return gp;
}
injectglist(gp);
}
}
stopm();
goto top;
}
static void
resetspinning(void)
{
int32 nmspinning;
if(m->spinning) {
m->spinning = false;
nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
if(nmspinning < 0)
runtime·throw("findrunnable: negative nmspinning");
} else
nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
wakep();
}
static void
injectglist(G *glist)
{
int32 n;
G *gp;
if(glist == nil)
return;
runtime·lock(&runtime·sched);
for(n = 0; glist; n++) {
gp = glist;
glist = gp->schedlink;
gp->status = Grunnable;
globrunqput(gp);
}
runtime·unlock(&runtime·sched);
for(; n && runtime·sched.npidle; n--)
startm(nil, false);
}
static void
schedule(void)
{
G *gp;
uint32 tick;
if(m->locks)
runtime·throw("schedule: holding locks");
top:
if(runtime·sched.gcwaiting) {
gcstopm();
goto top;
}
gp = nil;
tick = m->p->schedtick;
if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runqsize > 0) {
runtime·lock(&runtime·sched);
gp = globrunqget(m->p, 1);
runtime·unlock(&runtime·sched);
if(gp)
resetspinning();
}
if(gp == nil) {
gp = runqget(m->p);
if(gp && m->spinning)
runtime·throw("schedule: spinning with local work");
}
if(gp == nil) {
gp = findrunnable();
resetspinning();
}
if(gp->lockedm) {
startlockedm(gp);
goto top;
}
execute(gp);
}
void
runtime·park(bool(*unlockf)(G*, void*), void *lock, int8 *reason)
{
if(g->status != Grunning)
runtime·throw("bad g status");
m->waitlock = lock;
m->waitunlockf = unlockf;
g->waitreason = reason;
runtime·mcall(park0);
}
static bool
parkunlock(G *gp, void *lock)
{
USED(gp);
runtime·unlock(lock);
return true;
}
void
runtime·parkunlock(Lock *lock, int8 *reason)
{
runtime·park(parkunlock, lock, reason);
}
static void
park0(G *gp)
{
bool ok;
gp->status = Gwaiting;
gp->m = nil;
m->curg = nil;
if(m->waitunlockf) {
ok = m->waitunlockf(gp, m->waitlock);
m->waitunlockf = nil;
m->waitlock = nil;
if(!ok) {
gp->status = Grunnable;
execute(gp);
}
}
if(m->lockedg) {
stoplockedm();
execute(gp);
}
schedule();
}
void
runtime·gosched(void)
{
if(g->status != Grunning)
runtime·throw("bad g status");
runtime·mcall(runtime·gosched0);
}
void
runtime·gosched0(G *gp)
{
gp->status = Grunnable;
gp->m = nil;
m->curg = nil;
runtime·lock(&runtime·sched);
globrunqput(gp);
runtime·unlock(&runtime·sched);
if(m->lockedg) {
stoplockedm();
execute(gp);
}
schedule();
}
#pragma textflag NOSPLIT
void
runtime·goexit(void)
{
if(g->status != Grunning)
runtime·throw("bad g status");
if(raceenabled)
runtime·racegoend();
runtime·mcall(goexit0);
}
static void
goexit0(G *gp)
{
gp->status = Gdead;
gp->m = nil;
gp->lockedm = nil;
gp->paniconfault = 0;
gp->defer = nil;
gp->panic = nil;
gp->writenbuf = 0;
gp->writebuf = nil;
gp->waitreason = nil;
gp->param = nil;
m->curg = nil;
m->lockedg = nil;
if(m->locked & ~LockExternal) {
runtime·printf("invalid m->locked = %d\n", m->locked);
runtime·throw("internal lockOSThread error");
}
m->locked = 0;
runtime·unwindstack(gp, nil);
gfput(m->p, gp);
schedule();
}
#pragma textflag NOSPLIT
static void
save(void *pc, uintptr sp)
{
g->sched.pc = (uintptr)pc;
g->sched.sp = sp;
g->sched.lr = 0;
g->sched.ret = 0;
g->sched.ctxt = 0;
g->sched.g = g;
}
#pragma textflag NOSPLIT
void
·entersyscall(int32 dummy)
{
m->locks++;
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
g->syscallsp = g->sched.sp;
g->syscallpc = g->sched.pc;
g->syscallstack = g->stackbase;
g->syscallguard = g->stackguard;
g->status = Gsyscall;
if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->syscallsp) {
runtime·throw("entersyscall");
}
if(runtime·atomicload(&runtime·sched.sysmonwait)) {
runtime·lock(&runtime·sched);
if(runtime·atomicload(&runtime·sched.sysmonwait)) {
runtime·atomicstore(&runtime·sched.sysmonwait, 0);
runtime·notewakeup(&runtime·sched.sysmonnote);
}
runtime·unlock(&runtime·sched);
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
}
m->mcache = nil;
m->p->m = nil;
runtime·atomicstore(&m->p->status, Psyscall);
if(runtime·sched.gcwaiting) {
runtime·lock(&runtime·sched);
if (runtime·sched.stopwait > 0 && runtime·cas(&m->p->status, Psyscall, Pgcstop)) {
if(--runtime·sched.stopwait == 0)
runtime·notewakeup(&runtime·sched.stopnote);
}
runtime·unlock(&runtime·sched);
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
}
g->stackguard0 = StackPreempt;
m->locks--;
}
#pragma textflag NOSPLIT
void
·entersyscallblock(int32 dummy)
{
P *p;
m->locks++;
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
g->syscallsp = g->sched.sp;
g->syscallpc = g->sched.pc;
g->syscallstack = g->stackbase;
g->syscallguard = g->stackguard;
g->status = Gsyscall;
if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->syscallsp) {
runtime·throw("entersyscallblock");
}
p = releasep();
handoffp(p);
if(g->isbackground)
incidlelocked(1);
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
g->stackguard0 = StackPreempt;
m->locks--;
}
#pragma textflag NOSPLIT
void
runtime·exitsyscall(void)
{
m->locks++;
if(g->isbackground)
incidlelocked(-1);
g->waitsince = 0;
if(exitsyscallfast()) {
m->p->syscalltick++;
g->status = Grunning;
g->syscallstack = (uintptr)nil;
g->syscallsp = (uintptr)nil;
m->locks--;
if(g->preempt) {
g->stackguard0 = StackPreempt;
} else {
g->stackguard0 = g->stackguard;
}
return;
}
m->locks--;
runtime·mcall(exitsyscall0);
g->syscallstack = (uintptr)nil;
g->syscallsp = (uintptr)nil;
m->p->syscalltick++;
}
#pragma textflag NOSPLIT
static bool
exitsyscallfast(void)
{
P *p;
if(runtime·sched.stopwait) {
m->p = nil;
return false;
}
if(m->p && m->p->status == Psyscall && runtime·cas(&m->p->status, Psyscall, Prunning)) {
m->mcache = m->p->mcache;
m->p->m = m;
return true;
}
m->p = nil;
if(runtime·sched.pidle) {
runtime·lock(&runtime·sched);
p = pidleget();
if(p && runtime·atomicload(&runtime·sched.sysmonwait)) {
runtime·atomicstore(&runtime·sched.sysmonwait, 0);
runtime·notewakeup(&runtime·sched.sysmonnote);
}
runtime·unlock(&runtime·sched);
if(p) {
acquirep(p);
return true;
}
}
return false;
}
static void
exitsyscall0(G *gp)
{
P *p;
gp->status = Grunnable;
gp->m = nil;
m->curg = nil;
runtime·lock(&runtime·sched);
p = pidleget();
if(p == nil)
globrunqput(gp);
else if(runtime·atomicload(&runtime·sched.sysmonwait)) {
runtime·atomicstore(&runtime·sched.sysmonwait, 0);
runtime·notewakeup(&runtime·sched.sysmonnote);
}
runtime·unlock(&runtime·sched);
if(p) {
acquirep(p);
execute(gp);
}
if(m->lockedg) {
stoplockedm();
execute(gp);
}
stopm();
schedule();
}
#pragma textflag NOSPLIT
void
syscall·runtime_BeforeFork(void)
{
m->locks++;
if(m->profilehz != 0)
runtime·resetcpuprofiler(0);
m->forkstackguard = g->stackguard;
g->stackguard0 = StackPreempt-1;
g->stackguard = StackPreempt-1;
}
#pragma textflag NOSPLIT
void
syscall·runtime_AfterFork(void)
{
int32 hz;
g->stackguard0 = m->forkstackguard;
g->stackguard = m->forkstackguard;
m->forkstackguard = 0;
hz = runtime·sched.profilehz;
if(hz != 0)
runtime·resetcpuprofiler(hz);
m->locks--;
}
static void
mstackalloc(G *gp)
{
G *newg;
uintptr size;
newg = (G*)gp->param;
size = newg->stacksize;
newg->stacksize = 0;
gp->param = runtime·stackalloc(newg, size);
runtime·gogo(&gp->sched);
}
G*
runtime·malg(int32 stacksize)
{
G *newg;
byte *stk;
if(StackTop < sizeof(Stktop)) {
runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (int32)StackTop, (int32)sizeof(Stktop));
runtime·throw("runtime: bad stack.h");
}
newg = allocg();
if(stacksize >= 0) {
stacksize = runtime·round2(StackSystem + stacksize);
if(g == m->g0) {
stk = runtime·stackalloc(newg, stacksize);
} else {
newg->stacksize = stacksize;
g->param = newg;
runtime·mcall(mstackalloc);
stk = g->param;
g->param = nil;
}
newg->stack0 = (uintptr)stk;
newg->stackguard = (uintptr)stk + StackGuard;
newg->stackguard0 = newg->stackguard;
newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop);
}
return newg;
}
#pragma textflag NOSPLIT
void
runtime·newproc(int32 siz, FuncVal* fn, ...)
{
byte *argp;
if(thechar == '5')
argp = (byte*)(&fn+2);
else
argp = (byte*)(&fn+1);
runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz));
}
G*
runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
{
byte *sp;
G *newg;
P *p;
int32 siz;
if(fn == nil) {
m->throwing = -1;
runtime·throw("go of nil func value");
}
m->locks++;
siz = narg + nret;
siz = (siz+7) & ~7;
if(siz > StackMin - 1024)
runtime·throw("runtime.newproc: function arguments too large for new goroutine");
p = m->p;
if((newg = gfget(p)) != nil) {
if(newg->stackguard - StackGuard != newg->stack0)
runtime·throw("invalid stack in newg");
} else {
newg = runtime·malg(StackMin);
allgadd(newg);
}
sp = (byte*)newg->stackbase;
sp -= siz;
runtime·memmove(sp, argp, narg);
if(thechar == '5') {
sp -= sizeof(void*);
*(void**)sp = nil;
}
runtime·memclr((byte*)&newg->sched, sizeof newg->sched);
newg->sched.sp = (uintptr)sp;
newg->sched.pc = (uintptr)runtime·goexit;
newg->sched.g = newg;
runtime·gostartcallfn(&newg->sched, fn);
newg->gopc = (uintptr)callerpc;
newg->status = Grunnable;
if(p->goidcache == p->goidcacheend) {
p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheBatch);
p->goidcacheend = p->goidcache + GoidCacheBatch;
}
newg->goid = p->goidcache++;
newg->panicwrap = 0;
if(raceenabled)
newg->racectx = runtime·racegostart((void*)callerpc);
runqput(p, newg);
if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main)
wakep();
m->locks--;
if(m->locks == 0 && g->preempt)
g->stackguard0 = StackPreempt;
return newg;
}
static void
allgadd(G *gp)
{
G **new;
uintptr cap;
runtime·lock(&allglock);
if(runtime·allglen >= allgcap) {
cap = 4096/sizeof(new[0]);
if(cap < 2*allgcap)
cap = 2*allgcap;
new = runtime·malloc(cap*sizeof(new[0]));
if(new == nil)
runtime·throw("runtime: cannot allocate memory");
if(runtime·allg != nil) {
runtime·memmove(new, runtime·allg, runtime·allglen*sizeof(new[0]));
runtime·free(runtime·allg);
}
runtime·allg = new;
allgcap = cap;
}
runtime·allg[runtime·allglen++] = gp;
runtime·unlock(&allglock);
}
static void
gfput(P *p, G *gp)
{
uintptr stksize;
Stktop *top;
if(gp->stackguard - StackGuard != gp->stack0)
runtime·throw("invalid stack in gfput");
stksize = gp->stackbase + sizeof(Stktop) - gp->stack0;
if(stksize != gp->stacksize) {
runtime·printf("runtime: bad stacksize, goroutine %D, remain=%d, last=%d\n",
gp->goid, (int32)gp->stacksize, (int32)stksize);
runtime·throw("gfput: bad stacksize");
}
top = (Stktop*)gp->stackbase;
if(top->malloced) {
runtime·stackfree(gp, (void*)gp->stack0, top);
gp->stack0 = 0;
gp->stackguard = 0;
gp->stackguard0 = 0;
gp->stackbase = 0;
}
gp->schedlink = p->gfree;
p->gfree = gp;
p->gfreecnt++;
if(p->gfreecnt >= 64) {
runtime·lock(&runtime·sched.gflock);
while(p->gfreecnt >= 32) {
p->gfreecnt--;
gp = p->gfree;
p->gfree = gp->schedlink;
gp->schedlink = runtime·sched.gfree;
runtime·sched.gfree = gp;
}
runtime·unlock(&runtime·sched.gflock);
}
}
static G*
gfget(P *p)
{
G *gp;
byte *stk;
retry:
gp = p->gfree;
if(gp == nil && runtime·sched.gfree) {
runtime·lock(&runtime·sched.gflock);
while(p->gfreecnt < 32 && runtime·sched.gfree) {
p->gfreecnt++;
gp = runtime·sched.gfree;
runtime·sched.gfree = gp->schedlink;
gp->schedlink = p->gfree;
p->gfree = gp;
}
runtime·unlock(&runtime·sched.gflock);
goto retry;
}
if(gp) {
p->gfree = gp->schedlink;
p->gfreecnt--;
if(gp->stack0 == 0) {
if(g == m->g0) {
stk = runtime·stackalloc(gp, FixedStack);
} else {
gp->stacksize = FixedStack;
g->param = gp;
runtime·mcall(mstackalloc);
stk = g->param;
g->param = nil;
}
gp->stack0 = (uintptr)stk;
gp->stackbase = (uintptr)stk + FixedStack - sizeof(Stktop);
gp->stackguard = (uintptr)stk + StackGuard;
gp->stackguard0 = gp->stackguard;
}
}
return gp;
}
static void
gfpurge(P *p)
{
G *gp;
runtime·lock(&runtime·sched.gflock);
while(p->gfreecnt) {
p->gfreecnt--;
gp = p->gfree;
p->gfree = gp->schedlink;
gp->schedlink = runtime·sched.gfree;
runtime·sched.gfree = gp;
}
runtime·unlock(&runtime·sched.gflock);
}
void
runtime·Breakpoint(void)
{
runtime·breakpoint();
}
void
runtime·Gosched(void)
{
runtime·gosched();
}
int32
runtime·gomaxprocsfunc(int32 n)
{
int32 ret;
if(n > MaxGomaxprocs)
n = MaxGomaxprocs;
runtime·lock(&runtime·sched);
ret = runtime·gomaxprocs;
if(n <= 0 || n == ret) {
runtime·unlock(&runtime·sched);
return ret;
}
runtime·unlock(&runtime·sched);
runtime·semacquire(&runtime·worldsema, false);
m->gcing = 1;
runtime·stoptheworld();
newprocs = n;
m->gcing = 0;
runtime·semrelease(&runtime·worldsema);
runtime·starttheworld();
return ret;
}
#pragma textflag NOSPLIT
static void
lockOSThread(void)
{
m->lockedg = g;
g->lockedm = m;
}
void
runtime·LockOSThread(void)
{
m->locked |= LockExternal;
lockOSThread();
}
void
runtime·lockOSThread(void)
{
m->locked += LockInternal;
lockOSThread();
}
#pragma textflag NOSPLIT
static void
unlockOSThread(void)
{
if(m->locked != 0)
return;
m->lockedg = nil;
g->lockedm = nil;
}
void
runtime·UnlockOSThread(void)
{
m->locked &= ~LockExternal;
unlockOSThread();
}
void
runtime·unlockOSThread(void)
{
if(m->locked < LockInternal)
runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
m->locked -= LockInternal;
unlockOSThread();
}
bool
runtime·lockedOSThread(void)
{
return g->lockedm != nil && m->lockedg != nil;
}
int32
runtime·gcount(void)
{
G *gp;
int32 n, s;
uintptr i;
n = 0;
runtime·lock(&allglock);
for(i = 0; i < runtime·allglen; i++) {
gp = runtime·allg[i];
s = gp->status;
if(s == Grunnable || s == Grunning || s == Gsyscall || s == Gwaiting)
n++;
}
runtime·unlock(&allglock);
return n;
}
int32
runtime·mcount(void)
{
return runtime·sched.mcount;
}
void
runtime·badmcall(void (*fn)(G*))
{
USED(fn);
runtime·throw("runtime: mcall called on m->g0 stack");
}
void
runtime·badmcall2(void (*fn)(G*))
{
USED(fn);
runtime·throw("runtime: mcall function returned");
}
void
runtime·badreflectcall(void)
{
runtime·panicstring("runtime: arg size to reflect.call more than 1GB");
}
static struct {
Lock;
void (*fn)(uintptr*, int32);
int32 hz;
uintptr pcbuf[100];
} prof;
static void System(void) {}
static void ExternalCode(void) {}
static void GC(void) {}
extern byte etext[];
void
runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
{
int32 n;
bool traceback;
byte m;
m = 0;
USED(m);
if(prof.fn == nil || prof.hz == 0)
return;
mp->mallocing++;
traceback = true;
if(gp == nil || gp != mp->curg ||
(uintptr)sp < gp->stackguard - StackGuard || gp->stackbase < (uintptr)sp ||
((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGogoBytes))
traceback = false;
runtime·lock(&prof);
if(prof.fn == nil) {
runtime·unlock(&prof);
mp->mallocing--;
return;
}
n = 0;
if(traceback)
n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil, false);
if(!traceback || n <= 0) {
n = 0;
if(mp->ncgo > 0 && mp->curg != nil &&
mp->curg->syscallpc != 0 && mp->curg->syscallsp != 0) {
n = runtime·gentraceback(mp->curg->syscallpc, mp->curg->syscallsp, 0, mp->curg, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil, false);
}
#ifdef GOOS_windows
if(n == 0 && mp->libcallg != nil && mp->libcallpc != 0 && mp->libcallsp != 0) {
n = runtime·gentraceback(mp->libcallpc, mp->libcallsp, 0, mp->libcallg, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil, false);
}
#endif
if(n == 0) {
n = 2;
if((uintptr)pc > (uintptr)etext)
pc = (byte*)ExternalCode + PCQuantum;
prof.pcbuf[0] = (uintptr)pc;
if(mp->gcing || mp->helpgc)
prof.pcbuf[1] = (uintptr)GC + PCQuantum;
else
prof.pcbuf[1] = (uintptr)System + PCQuantum;
}
}
prof.fn(prof.pcbuf, n);
runtime·unlock(&prof);
mp->mallocing--;
}
void
runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
{
if(hz < 0)
hz = 0;
if(hz == 0)
fn = nil;
if(fn == nil)
hz = 0;
m->locks++;
runtime·resetcpuprofiler(0);
runtime·lock(&prof);
prof.fn = fn;
prof.hz = hz;
runtime·unlock(&prof);
runtime·lock(&runtime·sched);
runtime·sched.profilehz = hz;
runtime·unlock(&runtime·sched);
if(hz != 0)
runtime·resetcpuprofiler(hz);
m->locks--;
}
static void
procresize(int32 new)
{
int32 i, old;
bool empty;
G *gp;
P *p;
old = runtime·gomaxprocs;
if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
runtime·throw("procresize: invalid arg");
for(i = 0; i < new; i++) {
p = runtime·allp[i];
if(p == nil) {
p = (P*)runtime·mallocgc(sizeof(*p), 0, FlagNoInvokeGC);
p->id = i;
p->status = Pgcstop;
runtime·atomicstorep(&runtime·allp[i], p);
}
if(p->mcache == nil) {
if(old==0 && i==0)
p->mcache = m->mcache;
else
p->mcache = runtime·allocmcache();
}
}
empty = false;
while(!empty) {
empty = true;
for(i = 0; i < old; i++) {
p = runtime·allp[i];
if(p->runqhead == p->runqtail)
continue;
empty = false;
p->runqtail--;
gp = p->runq[p->runqtail%nelem(p->runq)];
gp->schedlink = runtime·sched.runqhead;
runtime·sched.runqhead = gp;
if(runtime·sched.runqtail == nil)
runtime·sched.runqtail = gp;
runtime·sched.runqsize++;
}
}
for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++) {
gp = runtime·sched.runqhead;
runtime·sched.runqhead = gp->schedlink;
if(runtime·sched.runqhead == nil)
runtime·sched.runqtail = nil;
runtime·sched.runqsize--;
runqput(runtime·allp[i%new], gp);
}
for(i = new; i < old; i++) {
p = runtime·allp[i];
runtime·freemcache(p->mcache);
p->mcache = nil;
gfpurge(p);
p->status = Pdead;
}
if(m->p)
m->p->m = nil;
m->p = nil;
m->mcache = nil;
p = runtime·allp[0];
p->m = nil;
p->status = Pidle;
acquirep(p);
for(i = new-1; i > 0; i--) {
p = runtime·allp[i];
p->status = Pidle;
pidleput(p);
}
runtime·atomicstore((uint32*)&runtime·gomaxprocs, new);
}
static void
acquirep(P *p)
{
if(m->p || m->mcache)
runtime·throw("acquirep: already in go");
if(p->m || p->status != Pidle) {
runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p->m ? p->m->id : 0, p->status);
runtime·throw("acquirep: invalid p state");
}
m->mcache = p->mcache;
m->p = p;
p->m = m;
p->status = Prunning;
}
static P*
releasep(void)
{
P *p;
if(m->p == nil || m->mcache == nil)
runtime·throw("releasep: invalid arg");
p = m->p;
if(p->m != m || p->mcache != m->mcache || p->status != Prunning) {
runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->mcache=%p p->status=%d\n",
m, m->p, p->m, m->mcache, p->mcache, p->status);
runtime·throw("releasep: invalid p state");
}
m->p = nil;
m->mcache = nil;
p->m = nil;
p->status = Pidle;
return p;
}
static void
incidlelocked(int32 v)
{
runtime·lock(&runtime·sched);
runtime·sched.nmidlelocked += v;
if(v > 0)
checkdead();
runtime·unlock(&runtime·sched);
}
static void
checkdead(void)
{
G *gp;
int32 run, grunning, s;
uintptr i;
run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
if(run > 0)
return;
if(runtime·panicking > 0)
return;
if(run < 0) {
runtime·printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
runtime·sched.nmidle, runtime·sched.nmidlelocked, runtime·sched.mcount);
runtime·throw("checkdead: inconsistent counts");
}
grunning = 0;
runtime·lock(&allglock);
for(i = 0; i < runtime·allglen; i++) {
gp = runtime·allg[i];
if(gp->isbackground)
continue;
s = gp->status;
if(s == Gwaiting)
grunning++;
else if(s == Grunnable || s == Grunning || s == Gsyscall) {
runtime·unlock(&allglock);
runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
runtime·throw("checkdead: runnable g");
}
}
runtime·unlock(&allglock);
if(grunning == 0)
runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
m->throwing = -1;
runtime·throw("all goroutines are asleep - deadlock!");
}
static void
sysmon(void)
{
uint32 idle, delay;
int64 now, lastpoll, lasttrace;
G *gp;
lasttrace = 0;
idle = 0;
delay = 0;
for(;;) {
if(idle == 0)
delay = 20;
else if(idle > 50)
delay *= 2;
if(delay > 10*1000)
delay = 10*1000;
runtime·usleep(delay);
if(runtime·debug.schedtrace <= 0 &&
(runtime·sched.gcwaiting || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs)) {
runtime·lock(&runtime·sched);
if(runtime·atomicload(&runtime·sched.gcwaiting) || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs) {
runtime·atomicstore(&runtime·sched.sysmonwait, 1);
runtime·unlock(&runtime·sched);
runtime·notesleep(&runtime·sched.sysmonnote);
runtime·noteclear(&runtime·sched.sysmonnote);
idle = 0;
delay = 20;
} else
runtime·unlock(&runtime·sched);
}
lastpoll = runtime·atomicload64(&runtime·sched.lastpoll);
now = runtime·nanotime();
if(lastpoll != 0 && lastpoll + 10*1000*1000 < now) {
runtime·cas64(&runtime·sched.lastpoll, lastpoll, now);
gp = runtime·netpoll(false);
if(gp) {
incidlelocked(-1);
injectglist(gp);
incidlelocked(1);
}
}
if(retake(now))
idle = 0;
else
idle++;
if(runtime·debug.schedtrace > 0 && lasttrace + runtime·debug.schedtrace*1000000ll <= now) {
lasttrace = now;
runtime·schedtrace(runtime·debug.scheddetail);
}
}
}
typedef struct Pdesc Pdesc;
struct Pdesc
{
uint32 schedtick;
int64 schedwhen;
uint32 syscalltick;
int64 syscallwhen;
};
#pragma dataflag NOPTR
static Pdesc pdesc[MaxGomaxprocs];
static uint32
retake(int64 now)
{
uint32 i, s, n;
int64 t;
P *p;
Pdesc *pd;
n = 0;
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p==nil)
continue;
pd = &pdesc[i];
s = p->status;
if(s == Psyscall) {
t = p->syscalltick;
if(pd->syscalltick != t) {
pd->syscalltick = t;
pd->syscallwhen = now;
continue;
}
if(p->runqhead == p->runqtail &&
runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0 &&
pd->syscallwhen + 10*1000*1000 > now)
continue;
incidlelocked(-1);
if(runtime·cas(&p->status, s, Pidle)) {
n++;
handoffp(p);
}
incidlelocked(1);
} else if(s == Prunning) {
t = p->schedtick;
if(pd->schedtick != t) {
pd->schedtick = t;
pd->schedwhen = now;
continue;
}
if(pd->schedwhen + 10*1000*1000 > now)
continue;
preemptone(p);
}
}
return n;
}
static bool
preemptall(void)
{
P *p;
int32 i;
bool res;
res = false;
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p == nil || p->status != Prunning)
continue;
res |= preemptone(p);
}
return res;
}
static bool
preemptone(P *p)
{
M *mp;
G *gp;
mp = p->m;
if(mp == nil || mp == m)
return false;
gp = mp->curg;
if(gp == nil || gp == mp->g0)
return false;
gp->preempt = true;
gp->stackguard0 = StackPreempt;
return true;
}
void
runtime·schedtrace(bool detailed)
{
static int64 starttime;
int64 now;
int64 id1, id2, id3;
int32 i, t, h;
uintptr gi;
int8 *fmt;
M *mp, *lockedm;
G *gp, *lockedg;
P *p;
now = runtime·nanotime();
if(starttime == 0)
starttime = now;
runtime·lock(&runtime·sched);
runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idlethreads=%d runqueue=%d",
(now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidle, runtime·sched.mcount,
runtime·sched.nmidle, runtime·sched.runqsize);
if(detailed) {
runtime·printf(" gcwaiting=%d nmidlelocked=%d nmspinning=%d stopwait=%d sysmonwait=%d\n",
runtime·sched.gcwaiting, runtime·sched.nmidlelocked, runtime·sched.nmspinning,
runtime·sched.stopwait, runtime·sched.sysmonwait);
}
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p == nil)
continue;
mp = p->m;
h = runtime·atomicload(&p->runqhead);
t = runtime·atomicload(&p->runqtail);
if(detailed)
runtime·printf(" P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
else {
fmt = " %d";
if(runtime·gomaxprocs == 1)
fmt = " [%d]\n";
else if(i == 0)
fmt = " [%d";
else if(i == runtime·gomaxprocs-1)
fmt = " %d]\n";
runtime·printf(fmt, t-h);
}
}
if(!detailed) {
runtime·unlock(&runtime·sched);
return;
}
for(mp = runtime·allm; mp; mp = mp->alllink) {
p = mp->p;
gp = mp->curg;
lockedg = mp->lockedg;
id1 = -1;
if(p)
id1 = p->id;
id2 = -1;
if(gp)
id2 = gp->goid;
id3 = -1;
if(lockedg)
id3 = lockedg->goid;
runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
" locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
mp->id, id1, id2,
mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
mp->spinning, m->blocked, id3);
}
runtime·lock(&allglock);
for(gi = 0; gi < runtime·allglen; gi++) {
gp = runtime·allg[gi];
mp = gp->m;
lockedm = gp->lockedm;
runtime·printf(" G%D: status=%d(%s) m=%d lockedm=%d\n",
gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1,
lockedm ? lockedm->id : -1);
}
runtime·unlock(&allglock);
runtime·unlock(&runtime·sched);
}
static void
mput(M *mp)
{
mp->schedlink = runtime·sched.midle;
runtime·sched.midle = mp;
runtime·sched.nmidle++;
checkdead();
}
static M*
mget(void)
{
M *mp;
if((mp = runtime·sched.midle) != nil){
runtime·sched.midle = mp->schedlink;
runtime·sched.nmidle--;
}
return mp;
}
static void
globrunqput(G *gp)
{
gp->schedlink = nil;
if(runtime·sched.runqtail)
runtime·sched.runqtail->schedlink = gp;
else
runtime·sched.runqhead = gp;
runtime·sched.runqtail = gp;
runtime·sched.runqsize++;
}
static void
globrunqputbatch(G *ghead, G *gtail, int32 n)
{
gtail->schedlink = nil;
if(runtime·sched.runqtail)
runtime·sched.runqtail->schedlink = ghead;
else
runtime·sched.runqhead = ghead;
runtime·sched.runqtail = gtail;
runtime·sched.runqsize += n;
}
static G*
globrunqget(P *p, int32 max)
{
G *gp, *gp1;
int32 n;
if(runtime·sched.runqsize == 0)
return nil;
n = runtime·sched.runqsize/runtime·gomaxprocs+1;
if(n > runtime·sched.runqsize)
n = runtime·sched.runqsize;
if(max > 0 && n > max)
n = max;
if(n > nelem(p->runq)/2)
n = nelem(p->runq)/2;
runtime·sched.runqsize -= n;
if(runtime·sched.runqsize == 0)
runtime·sched.runqtail = nil;
gp = runtime·sched.runqhead;
runtime·sched.runqhead = gp->schedlink;
n--;
while(n--) {
gp1 = runtime·sched.runqhead;
runtime·sched.runqhead = gp1->schedlink;
runqput(p, gp1);
}
return gp;
}
static void
pidleput(P *p)
{
p->link = runtime·sched.pidle;
runtime·sched.pidle = p;
runtime·xadd(&runtime·sched.npidle, 1);
}
static P*
pidleget(void)
{
P *p;
p = runtime·sched.pidle;
if(p) {
runtime·sched.pidle = p->link;
runtime·xadd(&runtime·sched.npidle, -1);
}
return p;
}
static void
runqput(P *p, G *gp)
{
uint32 h, t;
retry:
h = runtime·atomicload(&p->runqhead);
t = p->runqtail;
if(t - h < nelem(p->runq)) {
p->runq[t%nelem(p->runq)] = gp;
runtime·atomicstore(&p->runqtail, t+1);
return;
}
if(runqputslow(p, gp, h, t))
return;
goto retry;
}
static bool
runqputslow(P *p, G *gp, uint32 h, uint32 t)
{
G *batch[nelem(p->runq)/2+1];
uint32 n, i;
n = t-h;
n = n/2;
if(n != nelem(p->runq)/2)
runtime·throw("runqputslow: queue is not full");
for(i=0; i<n; i++)
batch[i] = p->runq[(h+i)%nelem(p->runq)];
if(!runtime·cas(&p->runqhead, h, h+n))
return false;
batch[n] = gp;
for(i=0; i<n; i++)
batch[i]->schedlink = batch[i+1];
runtime·lock(&runtime·sched);
globrunqputbatch(batch[0], batch[n], n+1);
runtime·unlock(&runtime·sched);
return true;
}
static G*
runqget(P *p)
{
G *gp;
uint32 t, h;
for(;;) {
h = runtime·atomicload(&p->runqhead);
t = p->runqtail;
if(t == h)
return nil;
gp = p->runq[h%nelem(p->runq)];
if(runtime·cas(&p->runqhead, h, h+1))
return gp;
}
}
static uint32
runqgrab(P *p, G **batch)
{
uint32 t, h, n, i;
for(;;) {
h = runtime·atomicload(&p->runqhead);
t = runtime·atomicload(&p->runqtail);
n = t-h;
n = n - n/2;
if(n == 0)
break;
if(n > nelem(p->runq)/2)
continue;
for(i=0; i<n; i++)
batch[i] = p->runq[(h+i)%nelem(p->runq)];
if(runtime·cas(&p->runqhead, h, h+n))
break;
}
return n;
}
static G*
runqsteal(P *p, P *p2)
{
G *gp;
G *batch[nelem(p->runq)/2];
uint32 t, h, n, i;
n = runqgrab(p2, batch);
if(n == 0)
return nil;
n--;
gp = batch[n];
if(n == 0)
return gp;
h = runtime·atomicload(&p->runqhead);
t = p->runqtail;
if(t - h + n >= nelem(p->runq))
runtime·throw("runqsteal: runq overflow");
for(i=0; i<n; i++, t++)
p->runq[t%nelem(p->runq)] = batch[i];
runtime·atomicstore(&p->runqtail, t);
return gp;
}
void
runtime·testSchedLocalQueue(void)
{
P p;
G gs[nelem(p.runq)];
int32 i, j;
runtime·memclr((byte*)&p, sizeof(p));
for(i = 0; i < nelem(gs); i++) {
if(runqget(&p) != nil)
runtime·throw("runq is not empty initially");
for(j = 0; j < i; j++)
runqput(&p, &gs[i]);
for(j = 0; j < i; j++) {
if(runqget(&p) != &gs[i]) {
runtime·printf("bad element at iter %d/%d\n", i, j);
runtime·throw("bad element");
}
}
if(runqget(&p) != nil)
runtime·throw("runq is not empty afterwards");
}
}
void
runtime·testSchedLocalQueueSteal(void)
{
P p1, p2;
G gs[nelem(p1.runq)], *gp;
int32 i, j, s;
runtime·memclr((byte*)&p1, sizeof(p1));
runtime·memclr((byte*)&p2, sizeof(p2));
for(i = 0; i < nelem(gs); i++) {
for(j = 0; j < i; j++) {
gs[j].sig = 0;
runqput(&p1, &gs[j]);
}
gp = runqsteal(&p2, &p1);
s = 0;
if(gp) {
s++;
gp->sig++;
}
while(gp = runqget(&p2)) {
s++;
gp->sig++;
}
while(gp = runqget(&p1))
gp->sig++;
for(j = 0; j < i; j++) {
if(gs[j].sig != 1) {
runtime·printf("bad element %d(%d) at iter %d\n", j, gs[j].sig, i);
runtime·throw("bad element");
}
}
if(s != i/2 && s != i/2+1) {
runtime·printf("bad steal %d, want %d or %d, iter %d\n",
s, i/2, i/2+1, i);
runtime·throw("bad steal");
}
}
}
extern void runtime·morestack(void);
uintptr runtime·externalthreadhandlerp;
bool
runtime·topofstack(Func *f)
{
return f->entry == (uintptr)runtime·goexit ||
f->entry == (uintptr)runtime·mstart ||
f->entry == (uintptr)runtime·mcall ||
f->entry == (uintptr)runtime·morestack ||
f->entry == (uintptr)runtime·lessstack ||
f->entry == (uintptr)_rt0_go ||
(runtime·externalthreadhandlerp != 0 && f->entry == runtime·externalthreadhandlerp);
}
int32
runtime·setmaxthreads(int32 in)
{
int32 out;
runtime·lock(&runtime·sched);
out = runtime·sched.maxmcount;
runtime·sched.maxmcount = in;
checkmcount();
runtime·unlock(&runtime·sched);
return out;
}
static int8 experiment[] = GOEXPERIMENT;
static bool
haveexperiment(int8 *name)
{
int32 i, j;
for(i=0; i<sizeof(experiment); i++) {
if((i == 0 || experiment[i-1] == ',') && experiment[i] == name[0]) {
for(j=0; name[j]; j++)
if(experiment[i+j] != name[j])
goto nomatch;
if(experiment[i+j] != '\0' && experiment[i+j] != ',')
goto nomatch;
return 1;
}
nomatch:;
}
return 0;
}