@@ -1,4 +1,4 @@
-43cf9b39b647
+96bd78e7d35e
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
@@ -504,7 +504,7 @@
mv -f $@.tmp $@
sema.c: $(srcdir)/runtime/sema.goc goc2c
- ./goc2c --gcc --go-prefix libgo_runtime $< > $@.tmp
+ ./goc2c --gcc --go-prefix libgo_sync $< > $@.tmp
mv -f $@.tmp $@
sigqueue.c: $(srcdir)/runtime/sigqueue.goc goc2c
@@ -847,6 +847,7 @@
go/sync/cond.go \
go/sync/mutex.go \
go/sync/once.go \
+ go/sync/runtime.go \
go/sync/rwmutex.go \
go/sync/waitgroup.go
@@ -878,6 +879,7 @@
go/time/tick.go \
go/time/time.go \
go/time/zoneinfo.go \
+ go/time/zoneinfo_read.go \
go/time/zoneinfo_unix.go
go_unicode_files = \
@@ -1091,6 +1093,7 @@
go/exp/norm/composition.go \
go/exp/norm/forminfo.go \
go/exp/norm/input.go \
+ go/exp/norm/iter.go \
go/exp/norm/normalize.go \
go/exp/norm/readwriter.go \
go/exp/norm/tables.go \
@@ -1132,7 +1135,8 @@
go/go/doc/example.go \
go/go/doc/exports.go \
go/go/doc/filter.go \
- go/go/doc/reader.go
+ go/go/doc/reader.go \
+ go/go/doc/synopsis.go
go_go_parser_files = \
go/go/parser/interface.go \
go/go/parser/parser.go
@@ -1159,7 +1163,6 @@
go_html_template_files = \
go/html/template/attr.go \
- go/html/template/clone.go \
go/html/template/content.go \
go/html/template/context.go \
go/html/template/css.go \
@@ -19,6 +19,7 @@
#include "go-type.h"
MHeap runtime_mheap;
+
extern MStats mstats; // defined in extern.go
extern volatile int32 runtime_MemProfileRate
@@ -429,18 +430,6 @@
ret = runtime_mallocgc(typ->__size, flag, 1, 1);
}
-func Alloc(n uintptr) (p *byte) {
- p = runtime_malloc(n);
-}
-
-func Free(p *byte) {
- runtime_free(p);
-}
-
-func Lookup(p *byte) (base *byte, size uintptr) {
- runtime_mlookup(p, &base, &size, nil);
-}
-
func GC() {
runtime_gc(1);
}
@@ -205,6 +205,7 @@
uint64 heap_sys; // bytes obtained from system
uint64 heap_idle; // bytes in idle spans
uint64 heap_inuse; // bytes in non-idle spans
+ uint64 heap_released; // bytes released to the OS
uint64 heap_objects; // total number of allocated objects
// Statistics about allocation of low-level fixed-size structures.
@@ -220,6 +221,7 @@
// Statistics about garbage collector.
// Protected by stopping the world during GC.
uint64 next_gc; // next GC (in heap_alloc time)
+ uint64 last_gc; // last GC (in absolute time)
uint64 pause_total_ns;
uint64 pause_ns[256];
uint32 numgc;
@@ -304,14 +306,16 @@
{
MSpan *next; // in a span linked list
MSpan *prev; // in a span linked list
- MSpan *allnext; // in the list of all spans
+ MSpan *allnext; // in the list of all spans
PageID start; // starting page number
uintptr npages; // number of pages in span
MLink *freelist; // list of free objects
uint32 ref; // number of allocated objects in this span
uint32 sizeclass; // size class
uint32 state; // MSpanInUse etc
- byte *limit; // end of data in span
+ int64 unusedsince; // First time spotted by GC in MSpanFree state
+ uintptr npreleased; // number of pages released to the OS
+ byte *limit; // end of data in span
};
void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
@@ -381,6 +385,7 @@
void runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *size, int32 *npages, int32 *nobj);
void* runtime_MHeap_SysAlloc(MHeap *h, uintptr n);
void runtime_MHeap_MapBits(MHeap *h);
+void runtime_MHeap_Scavenger(void*);
void* runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
@@ -406,19 +411,11 @@
void runtime_MProf_Malloc(void*, uintptr);
void runtime_MProf_Free(void*, uintptr);
+void runtime_MProf_GC(void);
void runtime_MProf_Mark(void (*scan)(byte *, int64));
int32 runtime_helpgc(bool*);
void runtime_gchelper(void);
-// Malloc profiling settings.
-// Must match definition in extern.go.
-enum {
- MProf_None = 0,
- MProf_Sample = 1,
- MProf_All = 2,
-};
-extern int32 runtime_malloc_profile;
-
struct __go_func_type;
bool runtime_getfinalizer(void *p, bool del, void (**fn)(void*), const struct __go_func_type **ft);
void runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64));
@@ -61,6 +61,21 @@
#define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
+// Holding worldsema grants an M the right to try to stop the world.
+// The procedure is:
+//
+// runtime_semacquire(&runtime_worldsema);
+// m->gcing = 1;
+// runtime_stoptheworld();
+//
+// ... do stuff ...
+//
+// m->gcing = 0;
+// runtime_semrelease(&runtime_worldsema);
+// runtime_starttheworld();
+//
+uint32 runtime_worldsema = 1;
+
// TODO: Make these per-M.
static uint64 nhandoff;
@@ -92,7 +107,6 @@
Finalizer fin[1];
};
-
static G *fing;
static FinBlock *finq; // list of finalizers that are to be executed
static FinBlock *finc; // cache of free blocks
@@ -778,9 +792,11 @@
byte *p;
MCache *c;
byte *arena_start;
+ int64 now;
m = runtime_m();
arena_start = runtime_mheap.arena_start;
+ now = runtime_nanotime();
for(;;) {
s = work.spans;
@@ -789,6 +805,11 @@
if(!runtime_casp(&work.spans, s, s->allnext))
continue;
+ // Stamp newly unused spans. The scavenger will use that
+ // info to potentially give back some pages to the OS.
+ if(s->state == MSpanFree && s->unusedsince == 0)
+ s->unusedsince = now;
+
if(s->state != MSpanInUse)
continue;
@@ -875,11 +896,6 @@
runtime_notewakeup(&work.alldone);
}
-// Semaphore, not Lock, so that the goroutine
-// reschedules when there is contention rather
-// than spinning.
-static uint32 gcsema = 1;
-
// Initialized from $GOGC. GOGC=off means no gc.
//
// Next gc is after we've allocated an extra amount of
@@ -968,9 +984,9 @@
if(gcpercent < 0)
return;
- runtime_semacquire(&gcsema);
+ runtime_semacquire(&runtime_worldsema);
if(!force && mstats.heap_alloc < mstats.next_gc) {
- runtime_semrelease(&gcsema);
+ runtime_semrelease(&runtime_worldsema);
return;
}
@@ -1032,6 +1048,7 @@
obj1 = mstats.nmalloc - mstats.nfree;
t3 = runtime_nanotime();
+ mstats.last_gc = t3;
mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0;
mstats.pause_total_ns += t3 - t0;
mstats.numgc++;
@@ -1045,8 +1062,9 @@
(unsigned long long) mstats.nmalloc, (unsigned long long)mstats.nfree,
(unsigned long long) nhandoff);
}
-
- runtime_semrelease(&gcsema);
+
+ runtime_MProf_GC();
+ runtime_semrelease(&runtime_worldsema);
// If we could have used another helper proc, start one now,
// in the hope that it will be available next time.
@@ -1073,18 +1091,18 @@
{
M *m;
- // Have to acquire gcsema to stop the world,
+ // Have to acquire worldsema to stop the world,
// because stoptheworld can only be used by
// one goroutine at a time, and there might be
// a pending garbage collection already calling it.
- runtime_semacquire(&gcsema);
+ runtime_semacquire(&runtime_worldsema);
m = runtime_m();
m->gcing = 1;
runtime_stoptheworld();
cachestats();
*stats = mstats;
m->gcing = 0;
- runtime_semrelease(&gcsema);
+ runtime_semrelease(&runtime_worldsema);
runtime_starttheworld(false);
}
@@ -103,6 +103,8 @@
runtime_MSpanList_Remove(s);
s->state = MSpanInUse;
mstats.heap_idle -= s->npages<<PageShift;
+ mstats.heap_released -= s->npreleased<<PageShift;
+ s->npreleased = 0;
if(s->npages > npage) {
// Trim extra and put it back in the heap.
@@ -280,6 +282,8 @@
}
mstats.heap_idle += s->npages<<PageShift;
s->state = MSpanFree;
+ s->unusedsince = 0;
+ s->npreleased = 0;
runtime_MSpanList_Remove(s);
sp = (uintptr*)(s->start<<PageShift);
@@ -292,6 +296,7 @@
*tp |= *sp; // propagate "needs zeroing" mark
s->start = t->start;
s->npages += t->npages;
+ s->npreleased = t->npreleased; // absorb released pages
p -= t->npages;
h->map[p] = s;
runtime_MSpanList_Remove(t);
@@ -304,6 +309,7 @@
tp = (uintptr*)(t->start<<PageShift);
*sp |= *tp; // propagate "needs zeroing" mark
s->npages += t->npages;
+ s->npreleased += t->npreleased;
h->map[p + s->npages - 1] = s;
runtime_MSpanList_Remove(t);
t->state = MSpanDead;
@@ -317,8 +323,86 @@
runtime_MSpanList_Insert(&h->free[s->npages], s);
else
runtime_MSpanList_Insert(&h->large, s);
+}
- // TODO(rsc): IncrementalScavenge() to return memory to OS.
+// Release (part of) unused memory to OS.
+// Goroutine created in runtime_schedinit.
+// Loop forever.
+void
+runtime_MHeap_Scavenger(void* dummy)
+{
+ MHeap *h;
+ MSpan *s, *list;
+ uint64 tick, now, forcegc, limit;
+ uint32 k, i;
+ uintptr released, sumreleased;
+ const byte *env;
+ bool trace;
+ Note note;
+
+ USED(dummy);
+
+ // If we go two minutes without a garbage collection, force one to run.
+ forcegc = 2*60*1e9;
+ // If a span goes unused for 5 minutes after a garbage collection,
+ // we hand it back to the operating system.
+ limit = 5*60*1e9;
+ // Make wake-up period small enough for the sampling to be correct.
+ if(forcegc < limit)
+ tick = forcegc/2;
+ else
+ tick = limit/2;
+
+ trace = false;
+ env = runtime_getenv("GOGCTRACE");
+ if(env != nil)
+ trace = runtime_atoi(env) > 0;
+
+ h = &runtime_mheap;
+ for(k=0;; k++) {
+ runtime_noteclear(¬e);
+ runtime_entersyscall();
+ runtime_notetsleep(¬e, tick);
+ runtime_exitsyscall();
+
+ runtime_lock(h);
+ now = runtime_nanotime();
+ if(now - mstats.last_gc > forcegc) {
+ runtime_unlock(h);
+ runtime_gc(1);
+ runtime_lock(h);
+ now = runtime_nanotime();
+ if (trace)
+ runtime_printf("scvg%d: GC forced\n", k);
+ }
+ sumreleased = 0;
+ for(i=0; i < nelem(h->free)+1; i++) {
+ if(i < nelem(h->free))
+ list = &h->free[i];
+ else
+ list = &h->large;
+ if(runtime_MSpanList_IsEmpty(list))
+ continue;
+ for(s=list->next; s != list; s=s->next) {
+ if(s->unusedsince != 0 && (now - s->unusedsince) > limit) {
+ released = (s->npages - s->npreleased) << PageShift;
+ mstats.heap_released += released;
+ sumreleased += released;
+ s->npreleased = s->npages;
+ runtime_SysUnused((void*)(s->start << PageShift), s->npages << PageShift);
+ }
+ }
+ }
+ runtime_unlock(h);
+
+ if(trace) {
+ if(sumreleased > 0)
+ runtime_printf("scvg%d: %p MB released\n", k, (void*)(sumreleased>>20));
+ runtime_printf("scvg%d: inuse: %lld, idle: %lld, sys: %lld, released: %lld, consumed: %lld (MB)\n",
+ k, (long long)(mstats.heap_inuse>>20), (long long)(mstats.heap_idle>>20), (long long)(mstats.heap_sys>>20),
+ (long long)(mstats.heap_released>>20), (long long)((mstats.heap_sys - mstats.heap_released)>>20));
+ }
+ }
}
// Initialize a new span with the given start and npages.
@@ -333,6 +417,8 @@
span->ref = 0;
span->sizeclass = 0;
span->state = 0;
+ span->unusedsince = 0;
+ span->npreleased = 0;
}
// Initialize an empty doubly-linked list.
@@ -26,6 +26,10 @@
uintptr frees;
uintptr alloc_bytes;
uintptr free_bytes;
+ uintptr recent_allocs; // since last gc
+ uintptr recent_frees;
+ uintptr recent_alloc_bytes;
+ uintptr recent_free_bytes;
uintptr hash;
uintptr nstk;
uintptr stk[1];
@@ -39,7 +43,7 @@
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
static Bucket*
-stkbucket(uintptr *stk, int32 nstk)
+stkbucket(uintptr *stk, int32 nstk, bool alloc)
{
int32 i;
uintptr h;
@@ -66,6 +70,9 @@
runtime_mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
return b;
+ if(!alloc)
+ return nil;
+
b = runtime_mallocgc(sizeof *b + nstk*sizeof stk[0], FlagNoProfiling, 0, 1);
bucketmem += sizeof *b + nstk*sizeof stk[0];
runtime_memmove(b->stk, stk, nstk*sizeof stk[0]);
@@ -78,6 +85,26 @@
return b;
}
+// Record that a gc just happened: all the 'recent' statistics are now real.
+void
+runtime_MProf_GC(void)
+{
+ Bucket *b;
+
+ runtime_lock(&proflock);
+ for(b=buckets; b; b=b->allnext) {
+ b->allocs += b->recent_allocs;
+ b->frees += b->recent_frees;
+ b->alloc_bytes += b->recent_alloc_bytes;
+ b->free_bytes += b->recent_free_bytes;
+ b->recent_allocs = 0;
+ b->recent_frees = 0;
+ b->recent_alloc_bytes = 0;
+ b->recent_free_bytes = 0;
+ }
+ runtime_unlock(&proflock);
+}
+
// Map from pointer to Bucket* that allocated it.
// Three levels:
// Linked-list hash table for top N-20 bits.
@@ -204,9 +231,9 @@
nstk = 0;
#endif
runtime_lock(&proflock);
- b = stkbucket(stk, nstk);
- b->allocs++;
- b->alloc_bytes += size;
+ b = stkbucket(stk, nstk, true);
+ b->recent_allocs++;
+ b->recent_alloc_bytes += size;
setaddrbucket((uintptr)p, b);
runtime_unlock(&proflock);
m = runtime_m();
@@ -228,8 +255,8 @@
runtime_lock(&proflock);
b = getaddrbucket((uintptr)p);
if(b != nil) {
- b->frees++;
- b->free_bytes += size;
+ b->recent_frees++;
+ b->recent_free_bytes += size;
}
runtime_unlock(&proflock);
m = runtime_m();
@@ -293,13 +320,13 @@
scan((byte*)&addrfree, sizeof addrfree);
}
-// Must match ThreadProfileRecord in debug.go.
+// Must match StackRecord in debug.go.
typedef struct TRecord TRecord;
struct TRecord {
uintptr stk[32];
};
-func ThreadProfile(p Slice) (n int32, ok bool) {
+func ThreadCreateProfile(p Slice) (n int32, ok bool) {
TRecord *r;
M *first, *m;
@@ -317,3 +344,89 @@
}
}
}
+
+func Stack(b Slice, all bool) (n int32) {
+ byte *pc, *sp;
+
+ sp = runtime_getcallersp(&b);
+ pc = runtime_getcallerpc(&b);
+
+ if(all) {
+ runtime_semacquire(&runtime_worldsema);
+ runtime_m()->gcing = 1;
+ runtime_stoptheworld();
+ }
+
+ if(b.__count == 0)
+ n = 0;
+ else{
+ G* g = runtime_g();
+ g->writebuf = (byte*)b.__values;
+ g->writenbuf = b.__count;
+ USED(pc);
+ USED(sp);
+ // runtime_goroutineheader(g);
+ // runtime_traceback(pc, sp, 0, g);
+ // if(all)
+ // runtime_tracebackothers(g);
+ n = b.__count - g->writenbuf;
+ g->writebuf = nil;
+ g->writenbuf = 0;
+ }
+
+ if(all) {
+ runtime_m()->gcing = 0;
+ runtime_semrelease(&runtime_worldsema);
+ runtime_starttheworld(false);
+ }
+}
+
+static void
+saveg(byte *pc, byte *sp, G *g, TRecord *r)
+{
+ int32 n;
+
+ USED(pc);
+ USED(sp);
+ USED(g);
+ // n = runtime_gentraceback(pc, sp, 0, g, 0, r->stk, nelem(r->stk));
+ n = 0;
+ if((size_t)n < nelem(r->stk))
+ r->stk[n] = 0;
+}
+
+func GoroutineProfile(b Slice) (n int32, ok bool) {
+ byte *pc, *sp;
+ TRecord *r;
+ G *gp;
+
+ sp = runtime_getcallersp(&b);
+ pc = runtime_getcallerpc(&b);
+
+ ok = false;
+ n = runtime_gcount();
+ if(n <= b.__count) {
+ runtime_semacquire(&runtime_worldsema);
+ runtime_m()->gcing = 1;
+ runtime_stoptheworld();
+
+ n = runtime_gcount();
+ if(n <= b.__count) {
+ G* g = runtime_g();
+ ok = true;
+ r = (TRecord*)b.__values;
+ saveg(pc, sp, g, r++);
+ for(gp = runtime_allg; gp != nil; gp = gp->alllink) {
+ if(gp == g || gp->status == Gdead)
+ continue;
+ //saveg(gp->sched.pc, gp->sched.sp, gp, r++);
+ r++;
+ }
+ }
+
+ runtime_m()->gcing = 0;
+ runtime_semrelease(&runtime_worldsema);
+ runtime_starttheworld(false);
+ }
+}
+
@@ -362,6 +362,9 @@
}
}
+// Keep trace of scavenger's goroutine for deadlock detection.
+static G *scvg;
+
// The bootstrap sequence is:
//
// call osinit
@@ -413,6 +416,8 @@
// Can not enable GC until all roots are registered.
// mstats.enablegc = 1;
m->nomemprof--;
+
+ scvg = __go_go(runtime_MHeap_Scavenger, nil);
}
extern void main_init(void) __asm__ ("__go_init_main");
@@ -547,7 +552,7 @@
// Add to runtime_allm so garbage collector doesn't free m
// when it is just in a register or thread-local storage.
m->alllink = runtime_allm;
- // runtime_Cgocalls() iterates over allm w/o schedlock,
+ // runtime_NumCgoCall() iterates over allm w/o schedlock,
// so we need to publish it safely.
runtime_atomicstorep(&runtime_allm, m);
}
@@ -786,9 +791,12 @@
mput(m);
}
- v = runtime_atomicload(&runtime_sched.atomic);
- if(runtime_sched.grunning == 0)
- runtime_throw("all goroutines are asleep - deadlock!");
+ // Look for deadlock situation: one single active g which happens to be scvg.
+ if(runtime_sched.grunning == 1 && runtime_sched.gwait == 0) {
+ if(scvg->status == Grunning || scvg->status == Gsyscall)
+ runtime_throw("all goroutines are asleep - deadlock!");
+ }
+
m->nextg = nil;
m->waitnextg = 1;
runtime_noteclear(&m->havenextg);
@@ -797,6 +805,7 @@
// Entersyscall might have decremented mcpu too, but if so
// it will see the waitstop and take the slow path.
// Exitsyscall never increments mcpu beyond mcpumax.
+ v = runtime_atomicload(&runtime_sched.atomic);
if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
// set waitstop = 0 (known to be 1)
runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
@@ -1472,11 +1481,17 @@
return m->id;
}
-int32 runtime_Goroutines (void)
- __asm__ ("libgo_runtime.runtime.Goroutines");
+int32 runtime_NumGoroutine (void)
+ __asm__ ("libgo_runtime.runtime.NumGoroutine");
int32
-runtime_Goroutines()
+runtime_NumGoroutine()
+{
+ return runtime_sched.gcount;
+}
+
+int32
+runtime_gcount(void)
{
return runtime_sched.gcount;
}
@@ -143,6 +143,8 @@
M* lockedm;
M* idlem;
// int32 sig;
+ int32 writenbuf;
+ byte* writebuf;
// uintptr sigcode0;
// uintptr sigcode1;
// uintptr sigpc;
@@ -189,9 +191,9 @@
enum
{
SigNotify = 1<<0, // let signal.Notify have signal, even if from kernel
- SigKill = 1<<1, // if signal.Notify doesn't take it, exit quietly
- SigThrow = 1<<2, // if signal.Notify doesn't take it, exit loudly
- SigPanic = 1<<3, // if the signal is from the kernel, panic
+ SigKill = 1<<1, // if signal.Notify doesn't take it, exit quietly
+ SigThrow = 1<<2, // if signal.Notify doesn't take it, exit loudly
+ SigPanic = 1<<3, // if the signal is from the kernel, panic
SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it
};
@@ -277,6 +279,7 @@
void* runtime_mal(uintptr);
void runtime_schedinit(void);
void runtime_initsig(void);
+void runtime_sigenable(uint32 sig);
String runtime_gostringnocopy(const byte*);
void* runtime_mstart(void*);
G* runtime_malg(int32, byte**, size_t*);
@@ -296,6 +299,7 @@
void runtime_stoptheworld(void);
void runtime_starttheworld(bool);
+extern uint32 runtime_worldsema;
G* __go_go(void (*pfn)(void*), void*);
/*
@@ -348,6 +352,7 @@
#define runtime_munmap munmap
#define runtime_madvise madvise
#define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
+#define runtime_getcallerpc(p) __builtin_return_address(0)
#ifdef __rtems__
void __wrap_rtems_task_variable_add(void **);
@@ -373,8 +378,6 @@
#define runtime_exit(s) exit(s)
MCache* runtime_allocmcache(void);
void free(void *v);
-struct __go_func_type;
-bool runtime_addfinalizer(void*, void(*fn)(void*), const struct __go_func_type *);
#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
#define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
#define runtime_xadd(p, v) __sync_add_and_fetch (p, v)
@@ -384,6 +387,11 @@
#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
+struct __go_func_type;
+bool runtime_addfinalizer(void*, void(*fn)(void*), const struct __go_func_type *);
+#define runtime_getcallersp(p) __builtin_frame_address(1)
+int32 runtime_mcount(void);
+int32 runtime_gcount(void);
void runtime_dopanic(int32) __attribute__ ((noreturn));
void runtime_startpanic(void);
void runtime_ready(G*);
@@ -17,7 +17,7 @@
// See Mullender and Cox, ``Semaphores in Plan 9,''
// http://swtch.com/semaphore.pdf
-package runtime
+package sync
#include "runtime.h"
#include "arch.h"
@@ -172,10 +172,10 @@
runtime_ready(s->g);
}
-func Semacquire(addr *uint32) {
+func runtime_Semacquire(addr *uint32) {
runtime_semacquire(addr);
}
-func Semrelease(addr *uint32) {
+func runtime_Semrelease(addr *uint32) {
runtime_semrelease(addr);
}
@@ -142,10 +142,12 @@
// Special case: want everything.
for(i=0; (size_t)i<nelem(sig.wanted); i++)
sig.wanted[i] = ~(uint32)0;
+ runtime_sigenable(s);
return;
}
if(s >= nelem(sig.wanted)*32)
return;
sig.wanted[s/32] |= 1U<<(s&31);
+ runtime_sigenable(s);
}