@@ -1,4 +1,4 @@
-6fdc1974457c
+921e53d4863c
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
@@ -227,8 +227,8 @@
$(exp_inotify_gox) \
exp/norm.gox \
exp/proxy.gox \
+ exp/ssa.gox \
exp/terminal.gox \
- exp/types.gox \
exp/utf8string.gox
toolexeclibgoexphtmldir = $(toolexeclibgoexpdir)/html
@@ -256,7 +256,8 @@
go/parser.gox \
go/printer.gox \
go/scanner.gox \
- go/token.gox
+ go/token.gox \
+ go/types.gox
toolexeclibgohashdir = $(toolexeclibgodir)/hash
@@ -682,7 +683,7 @@
go_net_newpollserver_file = go/net/newpollserver_unix.go
else # !LIBGO_IS_LINUX && !LIBGO_IS_RTEMS
if LIBGO_IS_NETBSD
-go_net_fd_os_file = go/net/fd_netbsd.go
+go_net_fd_os_file = go/net/fd_bsd.go
go_net_newpollserver_file = go/net/newpollserver_unix.go
else # !LIBGO_IS_NETBSD && !LIBGO_IS_LINUX && !LIBGO_IS_RTEMS
# By default use select with pipes. Most systems should have
@@ -753,9 +754,16 @@
endif
endif
+if LIBGO_IS_LINUX
+go_net_cloexec_file = go/net/sock_cloexec.go
+else
+go_net_cloexec_file = go/net/sys_cloexec.go
+endif
+
go_net_files = \
go/net/cgo_unix.go \
$(go_net_cgo_file) \
+ $(go_net_cloexec_file) \
go/net/dial.go \
go/net/dnsclient.go \
go/net/dnsclient_unix.go \
@@ -856,6 +864,12 @@
endif
endif
+if LIBGO_IS_LINUX
+go_os_pipe_file = go/os/pipe_linux.go
+else
+go_os_pipe_file = go/os/pipe_bsd.go
+endif
+
go_os_files = \
$(go_os_dir_file) \
go/os/dir.go \
@@ -872,6 +886,7 @@
go/os/getwd.go \
go/os/path.go \
go/os/path_unix.go \
+ $(go_os_pipe_file) \
go/os/proc.go \
$(go_os_stat_file) \
go/os/str.go \
@@ -1026,6 +1041,7 @@
go_compress_flate_files = \
go/compress/flate/copy.go \
go/compress/flate/deflate.go \
+ go/compress/flate/fixedhuff.go \
go/compress/flate/huffman_bit_writer.go \
go/compress/flate/huffman_code.go \
go/compress/flate/inflate.go \
@@ -1222,8 +1238,10 @@
go_exp_locale_collate_files = \
go/exp/locale/collate/colelem.go \
go/exp/locale/collate/collate.go \
+ go/exp/locale/collate/colltab.go \
go/exp/locale/collate/contract.go \
go/exp/locale/collate/export.go \
+ go/exp/locale/collate/sort.go \
go/exp/locale/collate/table.go \
go/exp/locale/collate/tables.go \
go/exp/locale/collate/trie.go
@@ -1248,23 +1266,18 @@
go/exp/proxy/per_host.go \
go/exp/proxy/proxy.go \
go/exp/proxy/socks5.go
+go_exp_ssa_files = \
+ go/exp/ssa/blockopt.go \
+ go/exp/ssa/doc.go \
+ go/exp/ssa/func.go \
+ go/exp/ssa/sanity.go \
+ go/exp/ssa/ssa.go \
+ go/exp/ssa/literal.go \
+ go/exp/ssa/print.go \
+ go/exp/ssa/util.go
go_exp_terminal_files = \
go/exp/terminal/terminal.go \
go/exp/terminal/util.go
-go_exp_types_files = \
- go/exp/types/builtins.go \
- go/exp/types/check.go \
- go/exp/types/const.go \
- go/exp/types/conversions.go \
- go/exp/types/errors.go \
- go/exp/types/exportdata.go \
- go/exp/types/expr.go \
- go/exp/types/gcimporter.go \
- go/exp/types/operand.go \
- go/exp/types/predicates.go \
- go/exp/types/stmt.go \
- go/exp/types/types.go \
- go/exp/types/universe.go
go_exp_utf8string_files = \
go/exp/utf8string/string.go
@@ -1305,6 +1318,24 @@
go/go/token/position.go \
go/go/token/serialize.go \
go/go/token/token.go
+go_go_types_files = \
+ go/go/types/api.go \
+ go/go/types/builtins.go \
+ go/go/types/check.go \
+ go/go/types/const.go \
+ go/go/types/conversions.go \
+ go/go/types/errors.go \
+ go/go/types/exportdata.go \
+ go/go/types/expr.go \
+ go/go/types/gcimporter.go \
+ go/go/types/objects.go \
+ go/go/types/operand.go \
+ go/go/types/predicates.go \
+ go/go/types/resolve.go \
+ go/go/types/scope.go \
+ go/go/types/stmt.go \
+ go/go/types/types.go \
+ go/go/types/universe.go
go_hash_adler32_files = \
go/hash/adler32/adler32.go
@@ -1848,8 +1879,8 @@
exp/locale/collate/build.lo \
exp/norm.lo \
exp/proxy.lo \
+ exp/ssa.lo \
exp/terminal.lo \
- exp/types.lo \
exp/utf8string.lo \
html/template.lo \
go/ast.lo \
@@ -1860,6 +1891,7 @@
go/printer.lo \
go/scanner.lo \
go/token.lo \
+ go/types.lo \
hash/adler32.lo \
hash/crc32.lo \
hash/crc64.lo \
@@ -2751,6 +2783,15 @@
@$(CHECK)
.PHONY: exp/proxy/check
+@go_include@ exp/ssa.lo.dep
+exp/ssa.lo.dep: $(go_exp_ssa_files)
+ $(BUILDDEPS)
+exp/ssa.lo: $(go_exp_ssa_files)
+ $(BUILDPACKAGE)
+exp/ssa/check: $(CHECK_DEPS)
+ @$(CHECK)
+.PHONY: exp/ssa/check
+
@go_include@ exp/terminal.lo.dep
exp/terminal.lo.dep: $(go_exp_terminal_files)
$(BUILDDEPS)
@@ -2760,15 +2801,6 @@
@$(CHECK)
.PHONY: exp/terminal/check
-@go_include@ exp/types.lo.dep
-exp/types.lo.dep: $(go_exp_types_files)
- $(BUILDDEPS)
-exp/types.lo: $(go_exp_types_files)
- $(BUILDPACKAGE)
-exp/types/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: exp/types/check
-
@go_include@ exp/utf8string.lo.dep
exp/utf8string.lo.dep: $(go_exp_utf8string_files)
$(BUILDDEPS)
@@ -2877,6 +2909,15 @@
@$(CHECK)
.PHONY: go/token/check
+@go_include@ go/types.lo.dep
+go/types.lo.dep: $(go_go_types_files)
+ $(BUILDDEPS)
+go/types.lo: $(go_go_types_files)
+ $(BUILDPACKAGE)
+go/types/check: $(CHECK_DEPS)
+ @$(CHECK)
+.PHONY: go/types/check
+
@go_include@ hash/adler32.lo.dep
hash/adler32.lo.dep: $(go_hash_adler32_files)
$(BUILDDEPS)
@@ -3507,10 +3548,10 @@
$(BUILDGOX)
exp/proxy.gox: exp/proxy.lo
$(BUILDGOX)
+exp/ssa.gox: exp/ssa.lo
+ $(BUILDGOX)
exp/terminal.gox: exp/terminal.lo
$(BUILDGOX)
-exp/types.gox: exp/types.lo
- $(BUILDGOX)
exp/utf8string.gox: exp/utf8string.lo
$(BUILDGOX)
@@ -3533,6 +3574,8 @@
$(BUILDGOX)
go/token.gox: go/token.lo
$(BUILDGOX)
+go/types.gox: go/types.lo
+ $(BUILDGOX)
hash/adler32.gox: hash/adler32.lo
$(BUILDGOX)
@@ -3734,7 +3777,6 @@
exp/norm/check \
exp/proxy/check \
exp/terminal/check \
- exp/types/check \
exp/utf8string/check \
html/template/check \
go/ast/check \
@@ -3745,7 +3787,7 @@
go/printer/check \
go/scanner/check \
go/token/check \
- $(go_types_check_omitted_since_it_calls_6g) \
+ go/types/check \
hash/adler32/check \
hash/crc32/check \
hash/crc64/check \
@@ -491,7 +491,7 @@
AM_CONDITIONAL(HAVE_STRERROR_R, test "$ac_cv_func_strerror_r" = yes)
AM_CONDITIONAL(HAVE_WAIT4, test "$ac_cv_func_wait4" = yes)
-AC_CHECK_FUNCS(epoll_create1 faccessat fallocate fchmodat fchownat futimesat inotify_add_watch inotify_init inotify_init1 inotify_rm_watch mkdirat mknodat openat renameat sync_file_range splice tee unlinkat unshare)
+AC_CHECK_FUNCS(accept4 epoll_create1 faccessat fallocate fchmodat fchownat futimesat inotify_add_watch inotify_init inotify_init1 inotify_rm_watch mkdirat mknodat openat pipe2 renameat sync_file_range splice tee unlinkat unshare utimensat)
AC_TYPE_OFF_T
AC_CHECK_TYPES([loff_t])
@@ -223,3 +223,17 @@
// and this shuts up the compiler.
panic("unreached")
}
+
+// Try to open a pipe with O_CLOEXEC set on both file descriptors.
+func forkExecPipe(p []int) error {
+ err := Pipe(p)
+ if err != nil {
+ return err
+ }
+ _, err = fcntl(p[0], F_SETFD, FD_CLOEXEC)
+ if err != nil {
+ return err
+ }
+ _, err = fcntl(p[1], F_SETFD, FD_CLOEXEC)
+ return err
+}
@@ -250,3 +250,20 @@
// and this shuts up the compiler.
panic("unreached")
}
+
+// Try to open a pipe with O_CLOEXEC set on both file descriptors.
+func forkExecPipe(p []int) (err error) {
+ err = Pipe2(p, O_CLOEXEC)
+ // pipe2 was added in 2.6.27 and our minimum requirement is 2.6.23, so it
+ // might not be implemented.
+ if err == ENOSYS {
+ if err = Pipe(p); err != nil {
+ return
+ }
+ if _, err = fcntl(p[0], F_SETFD, FD_CLOEXEC); err != nil {
+ return
+ }
+ _, err = fcntl(p[1], F_SETFD, FD_CLOEXEC)
+ }
+ return
+}
@@ -219,13 +219,7 @@
ForkLock.Lock()
// Allocate child status pipe close on exec.
- if err = Pipe(p[0:]); err != nil {
- goto error
- }
- if _, err = fcntl(p[0], F_SETFD, FD_CLOEXEC); err != nil {
- goto error
- }
- if _, err = fcntl(p[1], F_SETFD, FD_CLOEXEC); err != nil {
+ if err = forkExecPipe(p[:]); err != nil {
goto error
}
@@ -166,6 +166,24 @@
return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
}
+//sys accept4(fd int, sa *RawSockaddrAny, len *Socklen_t, flags int) (nfd int, err error)
+//accept4(fd _C_int, sa *RawSockaddrAny, len *Socklen_t, flags _C_int) _C_int
+
+func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len Socklen_t = SizeofSockaddrAny
+ nfd, err = accept4(fd, &rsa, &len, flags)
+ if err != nil {
+ return -1, nil, err
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ return -1, nil, err
+ }
+ return nfd, sa, nil
+}
+
//sys Acct(path string) (err error)
//acct(path *byte) int
@@ -271,6 +289,19 @@
//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
//mknodat(dirfd int, path *byte, mode Mode_t, dev _dev_t) int
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+//pipe2(p *[2]_C_int, flags _C_int) _C_int
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
//sys PivotRoot(newroot string, putold string) (err error)
//pivot_root(newroot *byte, putold *byte) int
@@ -6,9 +6,7 @@
package syscall
-import (
- "unsafe"
-)
+import "unsafe"
// Round the length of a netlink message up to align it properly.
func nlmAlignOf(msglen int) int {
@@ -21,8 +19,8 @@
return (attrlen + RTA_ALIGNTO - 1) & ^(RTA_ALIGNTO - 1)
}
-// NetlinkRouteRequest represents the request message to receive
-// routing and link states from the kernel.
+// NetlinkRouteRequest represents a request message to receive routing
+// and link states from the kernel.
type NetlinkRouteRequest struct {
Header NlMsghdr
Data RtGenmsg
@@ -49,167 +47,131 @@
return rr.toWireFormat()
}
-// NetlinkRIB returns routing information base, as known as RIB,
-// which consists of network facility information, states and
-// parameters.
+// NetlinkRIB returns routing information base, as known as RIB, which
+// consists of network facility information, states and parameters.
func NetlinkRIB(proto, family int) ([]byte, error) {
- var (
- lsanl SockaddrNetlink
- tab []byte
- )
-
- s, e := Socket(AF_NETLINK, SOCK_RAW, 0)
- if e != nil {
- return nil, e
+ s, err := Socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE)
+ if err != nil {
+ return nil, err
}
defer Close(s)
-
- lsanl.Family = AF_NETLINK
- e = Bind(s, &lsanl)
- if e != nil {
- return nil, e
+ lsa := &SockaddrNetlink{Family: AF_NETLINK}
+ if err := Bind(s, lsa); err != nil {
+ return nil, err
}
-
- seq := 1
- wb := newNetlinkRouteRequest(proto, seq, family)
- e = Sendto(s, wb, 0, &lsanl)
- if e != nil {
- return nil, e
+ wb := newNetlinkRouteRequest(proto, 1, family)
+ if err := Sendto(s, wb, 0, lsa); err != nil {
+ return nil, err
}
-
+ var tab []byte
+done:
for {
- var (
- rb []byte
- nr int
- lsa Sockaddr
- )
-
- rb = make([]byte, Getpagesize())
- nr, _, e = Recvfrom(s, rb, 0)
- if e != nil {
- return nil, e
+ rb := make([]byte, Getpagesize())
+ nr, _, err := Recvfrom(s, rb, 0)
+ if err != nil {
+ return nil, err
}
if nr < NLMSG_HDRLEN {
return nil, EINVAL
}
rb = rb[:nr]
tab = append(tab, rb...)
-
- msgs, _ := ParseNetlinkMessage(rb)
+ msgs, err := ParseNetlinkMessage(rb)
+ if err != nil {
+ return nil, err
+ }
for _, m := range msgs {
- if lsa, e = Getsockname(s); e != nil {
- return nil, e
+ lsa, err := Getsockname(s)
+ if err != nil {
+ return nil, err
}
switch v := lsa.(type) {
case *SockaddrNetlink:
- if m.Header.Seq != uint32(seq) || m.Header.Pid != v.Pid {
+ if m.Header.Seq != 1 || m.Header.Pid != v.Pid {
return nil, EINVAL
}
default:
return nil, EINVAL
}
if m.Header.Type == NLMSG_DONE {
- goto done
+ break done
}
if m.Header.Type == NLMSG_ERROR {
return nil, EINVAL
}
}
}
-
-done:
return tab, nil
}
-// NetlinkMessage represents the netlink message.
+// NetlinkMessage represents a netlink message.
type NetlinkMessage struct {
Header NlMsghdr
Data []byte
}
-// ParseNetlinkMessage parses buf as netlink messages and returns
-// the slice containing the NetlinkMessage structs.
-func ParseNetlinkMessage(buf []byte) ([]NetlinkMessage, error) {
- var (
- h *NlMsghdr
- dbuf []byte
- dlen int
- e error
- msgs []NetlinkMessage
- )
-
- for len(buf) >= NLMSG_HDRLEN {
- h, dbuf, dlen, e = netlinkMessageHeaderAndData(buf)
- if e != nil {
- break
+// ParseNetlinkMessage parses b as an array of netlink messages and
+// returns the slice containing the NetlinkMessage structures.
+func ParseNetlinkMessage(b []byte) ([]NetlinkMessage, error) {
+ var msgs []NetlinkMessage
+ for len(b) >= NLMSG_HDRLEN {
+ h, dbuf, dlen, err := netlinkMessageHeaderAndData(b)
+ if err != nil {
+ return nil, err
}
- m := NetlinkMessage{}
- m.Header = *h
- m.Data = dbuf[:int(h.Len)-NLMSG_HDRLEN]
+ m := NetlinkMessage{Header: *h, Data: dbuf[:int(h.Len)-NLMSG_HDRLEN]}
msgs = append(msgs, m)
- buf = buf[dlen:]
+ b = b[dlen:]
}
-
- return msgs, e
+ return msgs, nil
}
-func netlinkMessageHeaderAndData(buf []byte) (*NlMsghdr, []byte, int, error) {
- h := (*NlMsghdr)(unsafe.Pointer(&buf[0]))
- if int(h.Len) < NLMSG_HDRLEN || int(h.Len) > len(buf) {
+func netlinkMessageHeaderAndData(b []byte) (*NlMsghdr, []byte, int, error) {
+ h := (*NlMsghdr)(unsafe.Pointer(&b[0]))
+ if int(h.Len) < NLMSG_HDRLEN || int(h.Len) > len(b) {
return nil, nil, 0, EINVAL
}
- return h, buf[NLMSG_HDRLEN:], nlmAlignOf(int(h.Len)), nil
+ return h, b[NLMSG_HDRLEN:], nlmAlignOf(int(h.Len)), nil
}
-// NetlinkRouteAttr represents the netlink route attribute.
+// NetlinkRouteAttr represents a netlink route attribute.
type NetlinkRouteAttr struct {
Attr RtAttr
Value []byte
}
-// ParseNetlinkRouteAttr parses msg's payload as netlink route
-// attributes and returns the slice containing the NetlinkRouteAttr
-// structs.
-func ParseNetlinkRouteAttr(msg *NetlinkMessage) ([]NetlinkRouteAttr, error) {
- var (
- buf []byte
- a *RtAttr
- alen int
- vbuf []byte
- e error
- attrs []NetlinkRouteAttr
- )
-
- switch msg.Header.Type {
+// ParseNetlinkRouteAttr parses m's payload as an array of netlink
+// route attributes and returns the slice containing the
+// NetlinkRouteAttr structures.
+func ParseNetlinkRouteAttr(m *NetlinkMessage) ([]NetlinkRouteAttr, error) {
+ var b []byte
+ switch m.Header.Type {
case RTM_NEWLINK, RTM_DELLINK:
- buf = msg.Data[SizeofIfInfomsg:]
+ b = m.Data[SizeofIfInfomsg:]
case RTM_NEWADDR, RTM_DELADDR:
- buf = msg.Data[SizeofIfAddrmsg:]
+ b = m.Data[SizeofIfAddrmsg:]
case RTM_NEWROUTE, RTM_DELROUTE:
- buf = msg.Data[SizeofRtMsg:]
+ b = m.Data[SizeofRtMsg:]
default:
return nil, EINVAL
}
-
- for len(buf) >= SizeofRtAttr {
- a, vbuf, alen, e = netlinkRouteAttrAndValue(buf)
- if e != nil {
- break
+ var attrs []NetlinkRouteAttr
+ for len(b) >= SizeofRtAttr {
+ a, vbuf, alen, err := netlinkRouteAttrAndValue(b)
+ if err != nil {
+ return nil, err
}
- ra := NetlinkRouteAttr{}
- ra.Attr = *a
- ra.Value = vbuf[:int(a.Len)-SizeofRtAttr]
+ ra := NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-SizeofRtAttr]}
attrs = append(attrs, ra)
- buf = buf[alen:]
+ b = b[alen:]
}
-
return attrs, nil
}
-func netlinkRouteAttrAndValue(buf []byte) (*RtAttr, []byte, int, error) {
- h := (*RtAttr)(unsafe.Pointer(&buf[0]))
- if int(h.Len) < SizeofRtAttr || int(h.Len) > len(buf) {
+func netlinkRouteAttrAndValue(b []byte) (*RtAttr, []byte, int, error) {
+ a := (*RtAttr)(unsafe.Pointer(&b[0]))
+ if int(a.Len) < SizeofRtAttr || int(a.Len) > len(b) {
return nil, nil, 0, EINVAL
}
- return h, buf[SizeofRtAttr:], rtaAlignOf(int(h.Len)), nil
+ return a, b[SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil
}
@@ -6,33 +6,31 @@
package syscall
-import (
- "unsafe"
-)
+import "unsafe"
// UnixCredentials encodes credentials into a socket control message
// for sending to another process. This can be used for
// authentication.
func UnixCredentials(ucred *Ucred) []byte {
- buf := make([]byte, CmsgSpace(SizeofUcred))
- cmsg := (*Cmsghdr)(unsafe.Pointer(&buf[0]))
- cmsg.Level = SOL_SOCKET
- cmsg.Type = SCM_CREDENTIALS
- cmsg.SetLen(CmsgLen(SizeofUcred))
- *((*Ucred)(cmsgData(cmsg))) = *ucred
- return buf
+ b := make([]byte, CmsgSpace(SizeofUcred))
+ h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = SOL_SOCKET
+ h.Type = SCM_CREDENTIALS
+ h.SetLen(CmsgLen(SizeofUcred))
+ *((*Ucred)(cmsgData(h))) = *ucred
+ return b
}
// ParseUnixCredentials decodes a socket control message that contains
// credentials in a Ucred structure. To receive such a message, the
// SO_PASSCRED option must be enabled on the socket.
-func ParseUnixCredentials(msg *SocketControlMessage) (*Ucred, error) {
- if msg.Header.Level != SOL_SOCKET {
+func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) {
+ if m.Header.Level != SOL_SOCKET {
return nil, EINVAL
}
- if msg.Header.Type != SCM_CREDENTIALS {
+ if m.Header.Type != SCM_CREDENTIALS {
return nil, EINVAL
}
- ucred := *(*Ucred)(unsafe.Pointer(&msg.Data[0]))
+ ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
return &ucred, nil
}
@@ -8,9 +8,7 @@
package syscall
-import (
- "unsafe"
-)
+import "unsafe"
// Round the length of a raw sockaddr up to align it propery.
func cmsgAlignOf(salen int) int {
@@ -38,77 +36,69 @@
return cmsgAlignOf(SizeofCmsghdr) + cmsgAlignOf(datalen)
}
-func cmsgData(cmsg *Cmsghdr) unsafe.Pointer {
- return unsafe.Pointer(uintptr(unsafe.Pointer(cmsg)) + SizeofCmsghdr)
+func cmsgData(h *Cmsghdr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + SizeofCmsghdr)
}
+// SocketControlMessage represents a socket control message.
type SocketControlMessage struct {
Header Cmsghdr
Data []byte
}
-func ParseSocketControlMessage(buf []byte) ([]SocketControlMessage, error) {
- var (
- h *Cmsghdr
- dbuf []byte
- e error
- cmsgs []SocketControlMessage
- )
-
- for len(buf) >= CmsgLen(0) {
- h, dbuf, e = socketControlMessageHeaderAndData(buf)
- if e != nil {
- break
+// ParseSocketControlMessage parses b as an array of socket control
+// messages.
+func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) {
+ var msgs []SocketControlMessage
+ for len(b) >= CmsgLen(0) {
+ h, dbuf, err := socketControlMessageHeaderAndData(b)
+ if err != nil {
+ return nil, err
}
- m := SocketControlMessage{}
- m.Header = *h
- m.Data = dbuf[:int(h.Len)-cmsgAlignOf(SizeofCmsghdr)]
- cmsgs = append(cmsgs, m)
- buf = buf[cmsgAlignOf(int(h.Len)):]
+ m := SocketControlMessage{Header: *h, Data: dbuf[:int(h.Len)-cmsgAlignOf(SizeofCmsghdr)]}
+ msgs = append(msgs, m)
+ b = b[cmsgAlignOf(int(h.Len)):]
}
-
- return cmsgs, e
+ return msgs, nil
}
-func socketControlMessageHeaderAndData(buf []byte) (*Cmsghdr, []byte, error) {
- h := (*Cmsghdr)(unsafe.Pointer(&buf[0]))
- if h.Len < SizeofCmsghdr || int(h.Len) > len(buf) {
+func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) {
+ h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
+ if h.Len < SizeofCmsghdr || int(h.Len) > len(b) {
return nil, nil, EINVAL
}
- return h, buf[cmsgAlignOf(SizeofCmsghdr):], nil
+ return h, b[cmsgAlignOf(SizeofCmsghdr):], nil
}
// UnixRights encodes a set of open file descriptors into a socket
// control message for sending to another process.
func UnixRights(fds ...int) []byte {
datalen := len(fds) * 4
- buf := make([]byte, CmsgSpace(datalen))
- cmsg := (*Cmsghdr)(unsafe.Pointer(&buf[0]))
- cmsg.Level = SOL_SOCKET
- cmsg.Type = SCM_RIGHTS
- cmsg.SetLen(CmsgLen(datalen))
-
- data := uintptr(cmsgData(cmsg))
+ b := make([]byte, CmsgSpace(datalen))
+ h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = SOL_SOCKET
+ h.Type = SCM_RIGHTS
+ h.SetLen(CmsgLen(datalen))
+ data := uintptr(cmsgData(h))
for _, fd := range fds {
*(*int32)(unsafe.Pointer(data)) = int32(fd)
data += 4
}
-
- return buf
+ return b
}
// ParseUnixRights decodes a socket control message that contains an
// integer array of open file descriptors from another process.
-func ParseUnixRights(msg *SocketControlMessage) ([]int, error) {
- if msg.Header.Level != SOL_SOCKET {
+func ParseUnixRights(m *SocketControlMessage) ([]int, error) {
+ if m.Header.Level != SOL_SOCKET {
return nil, EINVAL
}
- if msg.Header.Type != SCM_RIGHTS {
+ if m.Header.Type != SCM_RIGHTS {
return nil, EINVAL
}
- fds := make([]int, len(msg.Data)>>2)
- for i, j := 0, 0; i < len(msg.Data); i += 4 {
- fds[j] = int(*(*int32)(unsafe.Pointer(&msg.Data[i])))
+ fds := make([]int, len(m.Data)>>2)
+ for i, j := 0, 0; i < len(m.Data); i += 4 {
+ fds[j] = int(*(*int32)(unsafe.Pointer(&m.Data[i])))
j++
}
return fds, nil
@@ -303,6 +303,11 @@
echo "const $m = 0" >> ${OUT}
fi
done
+for m in SOCK_CLOEXEC SOCK_NONBLOCK; do
+ if ! grep "^const $m " ${OUT} >/dev/null 2>&1; then
+ echo "const $m = -1" >> ${OUT}
+ fi
+done
# pathconf constants.
grep '^const __PC' gen-sysinfo.go |
@@ -16,8 +16,10 @@
#include <math.h>
#include <stdint.h>
#include <sys/types.h>
+#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
+#include <time.h>
#include <unistd.h>
#ifndef HAVE_OFF64_T
@@ -28,6 +30,19 @@
typedef off64_t loff_t;
#endif
+#ifndef HAVE_ACCEPT4
+struct sockaddr;
+int
+accept4 (int sockfd __attribute__ ((unused)),
+ struct sockaddr *addr __attribute__ ((unused)),
+ socklen_t *addrlen __attribute__ ((unused)),
+ int flags __attribute__ ((unused)))
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
#ifndef HAVE_EPOLL_CREATE1
int
epoll_create1 (int flags __attribute__ ((unused)))
@@ -171,6 +186,16 @@
}
#endif
+#ifndef HAVE_PIPE2
+int
+pipe2 (int pipefd[2] __attribute__ ((unused)),
+ int flags __attribute__ ((unused)))
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
#ifndef HAVE_RENAMEAT
int
renameat (int olddirfd __attribute__ ((unused)),
@@ -241,6 +266,19 @@
}
#endif
+#ifndef HAVE_UTIMENSAT
+struct timespec;
+int
+utimensat(int dirfd __attribute__ ((unused)),
+ const char *pathname __attribute__ ((unused)),
+ const struct timespec times[2] __attribute__ ((unused)),
+ int flags __attribute__ ((unused)))
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
/* Long double math functions. These are needed on old i386 systems
that don't have them in libm. The compiler translates calls to
these functions on float64 to call an 80-bit floating point
@@ -25,20 +25,25 @@
unsigned char *vn;
ks = k.str;
+ if (ks == NULL)
+ ks = (const byte *) "";
kn = NULL;
+
vs = v.str;
+ if (vs == NULL)
+ vs = (const byte *) "";
vn = NULL;
#ifdef HAVE_SETENV
- if (ks[k.len] != 0)
+ if (ks != NULL && ks[k.len] != 0)
{
kn = __go_alloc (k.len + 1);
__builtin_memcpy (kn, ks, k.len);
ks = kn;
}
- if (vs[v.len] != 0)
+ if (vs != NULL && vs[v.len] != 0)
{
vn = __go_alloc (v.len + 1);
__builtin_memcpy (vn, vs, v.len);
@@ -111,7 +111,8 @@
void
runtime_notewakeup(Note *n)
{
- runtime_xchg(&n->key, 1);
+ if(runtime_xchg(&n->key, 1))
+ runtime_throw("notewakeup - double wakeup");
runtime_futexwakeup(&n->key, 1);
}
@@ -20,6 +20,8 @@
MHeap runtime_mheap;
+int32 runtime_checking;
+
extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
extern volatile intgo runtime_MemProfileRate
@@ -718,14 +720,22 @@
if(raceenabled)
runtime_m()->racepc = runtime_getcallerpc(&typ);
- flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0;
- ret = runtime_mallocgc(typ->__size, flag, 1, 1);
- if(UseSpanType && !flag) {
- if(false) {
- runtime_printf("new %S: %p\n", *typ->__reflection, ret);
+ if(typ->__size == 0) {
+ // All 0-length allocations use this pointer.
+ // The language does not require the allocations to
+ // have distinct values.
+ ret = (uint8*)&runtime_zerobase;
+ } else {
+ flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0;
+ ret = runtime_mallocgc(typ->__size, flag, 1, 1);
+
+ if(UseSpanType && !flag) {
+ if(false) {
+ runtime_printf("new %S: %p\n", *typ->__reflection, ret);
+ }
+ runtime_settype(ret, (uintptr)typ | TypeInfo_SingleObject);
}
- runtime_settype(ret, (uintptr)typ | TypeInfo_SingleObject);
}
return ret;
@@ -446,7 +446,7 @@
void runtime_checkallocated(void *v, uintptr n);
void runtime_markfreed(void *v, uintptr n);
void runtime_checkfreed(void *v, uintptr n);
-int32 runtime_checking;
+extern int32 runtime_checking;
void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime_unmarkspan(void *v, uintptr size);
bool runtime_blockspecial(void*);
@@ -500,6 +500,7 @@
// defined in mgc0.go
void runtime_gc_m_ptr(Eface*);
+void runtime_gc_itab_ptr(Eface*);
void runtime_memorydump(void);
@@ -19,7 +19,6 @@
#include "malloc.h"
static bool MCentral_Grow(MCentral *c);
-static void* MCentral_Alloc(MCentral *c);
static void MCentral_Free(MCentral *c, void *v);
// Initialize a single central free list.
@@ -34,12 +33,13 @@
// Allocate up to n objects from the central free list.
// Return the number of objects allocated.
// The objects are linked together by their first words.
-// On return, *pstart points at the first object and *pend at the last.
+// On return, *pstart points at the first object.
int32
runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
{
- MLink *first, *last, *v;
- int32 i;
+ MSpan *s;
+ MLink *first, *last;
+ int32 cap, avail, i;
runtime_lock(c);
// Replenish central list if empty.
@@ -50,41 +50,34 @@
return 0;
}
}
+ s = c->nonempty.next;
+ cap = (s->npages << PageShift) / s->elemsize;
+ avail = cap - s->ref;
+ if(avail < n)
+ n = avail;
- // Copy from list, up to n.
// First one is guaranteed to work, because we just grew the list.
- first = MCentral_Alloc(c);
+ first = s->freelist;
last = first;
- for(i=1; i<n && (v = MCentral_Alloc(c)) != nil; i++) {
- last->next = v;
- last = v;
+ for(i=1; i<n; i++) {
+ last = last->next;
}
+ s->freelist = last->next;
last->next = nil;
- c->nfree -= i;
+ s->ref += n;
+ c->nfree -= n;
+
+ if(n == avail) {
+ if(s->freelist != nil || s->ref != (uint32)cap) {
+ runtime_throw("invalid freelist");
+ }
+ runtime_MSpanList_Remove(s);
+ runtime_MSpanList_Insert(&c->empty, s);
+ }
runtime_unlock(c);
*pfirst = first;
- return i;
-}
-
-// Helper: allocate one object from the central free list.
-static void*
-MCentral_Alloc(MCentral *c)
-{
- MSpan *s;
- MLink *v;
-
- if(runtime_MSpanList_IsEmpty(&c->nonempty))
- return nil;
- s = c->nonempty.next;
- s->ref++;
- v = s->freelist;
- s->freelist = v->next;
- if(s->freelist == nil) {
- runtime_MSpanList_Remove(s);
- runtime_MSpanList_Insert(&c->empty, s);
- }
- return v;
+ return n;
}
// Free n objects back into the central free list.
@@ -81,6 +81,10 @@
runtime_printf("if you're running SELinux, enable execmem for this process.\n");
exit(2);
}
+ if(errno == EAGAIN) {
+ runtime_printf("runtime: mmap: too much locked memory (check 'ulimit -l').\n");
+ runtime_exit(2);
+ }
return nil;
}
return p;
@@ -11,6 +11,22 @@
#include "malloc.h"
#include "mgc0.h"
#include "race.h"
+#include "go-type.h"
+
+// Map gccgo field names to gc field names.
+// Slice aka __go_open_array.
+#define array __values
+#define cap __capacity
+// Iface aka __go_interface
+#define tab __methods
+// Eface aka __go_empty_interface.
+#define type __type_descriptor
+// Type aka __go_type_descriptor
+#define kind __code
+#define KindPtr GO_PTR
+#define KindNoPointers GO_NO_POINTERS
+// PtrType aka __go_ptr_type
+#define elem __element_type
#ifdef USING_SPLIT_STACK
@@ -32,6 +48,11 @@
handoffThreshold = 4,
IntermediateBufferCapacity = 64,
+
+ // Bits in type information
+ PRECISE = 1,
+ LOOP = 2,
+ PC_BITS = PRECISE | LOOP,
};
// Bits in per-word bitmap.
@@ -158,12 +179,14 @@
// is moved/flushed to the work buffer (Workbuf).
// The size of an intermediate buffer is very small,
// such as 32 or 64 elements.
+typedef struct PtrTarget PtrTarget;
struct PtrTarget
{
void *p;
uintptr ti;
};
+typedef struct BitTarget BitTarget;
struct BitTarget
{
void *p;
@@ -171,15 +194,19 @@
uintptr *bitp, shift;
};
+typedef struct BufferList BufferList;
struct BufferList
{
- struct PtrTarget ptrtarget[IntermediateBufferCapacity];
- struct BitTarget bittarget[IntermediateBufferCapacity];
- struct BufferList *next;
+ PtrTarget ptrtarget[IntermediateBufferCapacity];
+ BitTarget bittarget[IntermediateBufferCapacity];
+ BufferList *next;
};
-static struct BufferList *bufferList;
+static BufferList *bufferList;
static Lock lock;
+static Type *itabtype;
+
+static void enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj);
// flushptrbuf moves data from the PtrTarget buffer to the work buffer.
// The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
@@ -202,16 +229,16 @@
// flushptrbuf
// (2nd part, mark and enqueue)
static void
-flushptrbuf(struct PtrTarget *ptrbuf, uintptr n, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj, struct BitTarget *bitbuf)
+flushptrbuf(PtrTarget *ptrbuf, PtrTarget **ptrbufpos, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj, BitTarget *bitbuf)
{
byte *p, *arena_start, *obj;
- uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti;
+ uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti, n;
MSpan *s;
PageID k;
Obj *wp;
Workbuf *wbuf;
- struct PtrTarget *ptrbuf_end;
- struct BitTarget *bitbufpos, *bt;
+ PtrTarget *ptrbuf_end;
+ BitTarget *bitbufpos, *bt;
arena_start = runtime_mheap.arena_start;
@@ -219,7 +246,9 @@
wbuf = *_wbuf;
nobj = *_nobj;
- ptrbuf_end = ptrbuf + n;
+ ptrbuf_end = *ptrbufpos;
+ n = ptrbuf_end - ptrbuf;
+ *ptrbufpos = ptrbuf;
// If buffer is nearly full, get a new one.
if(wbuf == nil || nobj+n >= nelem(wbuf->obj)) {
@@ -318,8 +347,7 @@
if((bits & (bitAllocated|bitMarked)) != bitAllocated)
continue;
- *bitbufpos = (struct BitTarget){obj, ti, bitp, shift};
- bitbufpos++;
+ *bitbufpos++ = (BitTarget){obj, ti, bitp, shift};
}
runtime_lock(&lock);
@@ -370,6 +398,13 @@
// Program that scans the whole block and treats every block element as a potential pointer
static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
+// Local variables of a program fragment or loop
+typedef struct Frame Frame;
+struct Frame {
+ uintptr count, elemsize, b;
+ uintptr *loop_or_ret;
+};
+
// scanblock scans a block of n bytes starting at pointer b for references
// to other objects, scanning any it finds recursively until there are no
// unscanned objects left. Instead of using an explicit recursion, it keeps
@@ -384,22 +419,17 @@
scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
{
byte *b, *arena_start, *arena_used;
- uintptr n, i, end_b;
+ uintptr n, i, end_b, elemsize, ti, objti, count /* , type */;
+ uintptr *pc, precise_type, nominal_size;
void *obj;
-
- // TODO(atom): to be expanded in a next CL
- struct Frame {uintptr count, b; uintptr *loop_or_ret;};
- struct Frame stack_top;
-
- uintptr *pc;
-
- struct BufferList *scanbuffers;
- struct PtrTarget *ptrbuf, *ptrbuf_end;
- struct BitTarget *bitbuf;
-
- struct PtrTarget *ptrbufpos;
-
- // End of local variable declarations.
+ const Type *t;
+ Slice *sliceptr;
+ Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
+ BufferList *scanbuffers;
+ PtrTarget *ptrbuf, *ptrbuf_end, *ptrbufpos;
+ BitTarget *bitbuf;
+ Eface *eface;
+ Iface *iface;
if(sizeof(Workbuf) % PageSize != 0)
runtime_throw("scanblock: size of Workbuf is suboptimal");
@@ -408,6 +438,11 @@
arena_start = runtime_mheap.arena_start;
arena_used = runtime_mheap.arena_used;
+ stack_ptr = stack+nelem(stack)-1;
+
+ precise_type = false;
+ nominal_size = 0;
+
// Allocate ptrbuf, bitbuf
{
runtime_lock(&lock);
@@ -437,50 +472,247 @@
runtime_printf("scanblock %p %D\n", b, (int64)n);
}
- // TODO(atom): to be replaced in a next CL
- pc = defaultProg;
+ if(ti != 0 && 0) {
+ pc = (uintptr*)(ti & ~(uintptr)PC_BITS);
+ precise_type = (ti & PRECISE);
+ stack_top.elemsize = pc[0];
+ if(!precise_type)
+ nominal_size = pc[0];
+ if(ti & LOOP) {
+ stack_top.count = 0; // 0 means an infinite number of iterations
+ stack_top.loop_or_ret = pc+1;
+ } else {
+ stack_top.count = 1;
+ }
+ } else if(UseSpanType && 0) {
+#if 0
+ type = runtime_gettype(b);
+ if(type != 0) {
+ t = (Type*)(type & ~(uintptr)(PtrSize-1));
+ switch(type & (PtrSize-1)) {
+ case TypeInfo_SingleObject:
+ pc = (uintptr*)t->gc;
+ precise_type = true; // type information about 'b' is precise
+ stack_top.count = 1;
+ stack_top.elemsize = pc[0];
+ break;
+ case TypeInfo_Array:
+ pc = (uintptr*)t->gc;
+ if(pc[0] == 0)
+ goto next_block;
+ precise_type = true; // type information about 'b' is precise
+ stack_top.count = 0; // 0 means an infinite number of iterations
+ stack_top.elemsize = pc[0];
+ stack_top.loop_or_ret = pc+1;
+ break;
+ case TypeInfo_Map:
+ // TODO(atom): to be expanded in a next CL
+ pc = defaultProg;
+ break;
+ default:
+ runtime_throw("scanblock: invalid type");
+ return;
+ }
+ } else {
+ pc = defaultProg;
+ }
+#endif
+ } else {
+ pc = defaultProg;
+ }
pc++;
stack_top.b = (uintptr)b;
end_b = (uintptr)b + n - PtrSize;
- next_instr:
- // TODO(atom): to be expanded in a next CL
+ for(;;) {
+ obj = nil;
+ objti = 0;
switch(pc[0]) {
+ case GC_PTR:
+ obj = *(void**)(stack_top.b + pc[1]);
+ objti = pc[2];
+ pc += 3;
+ break;
+
+ case GC_SLICE:
+ sliceptr = (Slice*)(stack_top.b + pc[1]);
+ if(sliceptr->cap != 0) {
+ obj = sliceptr->array;
+ objti = pc[2] | PRECISE | LOOP;
+ }
+ pc += 3;
+ break;
+
+ case GC_APTR:
+ obj = *(void**)(stack_top.b + pc[1]);
+ pc += 2;
+ break;
+
+ case GC_STRING:
+ obj = *(void**)(stack_top.b + pc[1]);
+ pc += 2;
+ break;
+
+ case GC_EFACE:
+ eface = (Eface*)(stack_top.b + pc[1]);
+ pc += 2;
+ if(eface->type != nil && ((byte*)eface->__object >= arena_start && (byte*)eface->__object < arena_used)) {
+ t = eface->type;
+ if(t->__size <= sizeof(void*)) {
+ if((t->kind & KindNoPointers))
+ break;
+
+ obj = eface->__object;
+ if((t->kind & ~KindNoPointers) == KindPtr)
+ // objti = (uintptr)((PtrType*)t)->elem->gc;
+ objti = 0;
+ } else {
+ obj = eface->__object;
+ // objti = (uintptr)t->gc;
+ objti = 0;
+ }
+ }
+ break;
+
+ case GC_IFACE:
+ iface = (Iface*)(stack_top.b + pc[1]);
+ pc += 2;
+ if(iface->tab == nil)
+ break;
+
+ // iface->tab
+ if((byte*)iface->tab >= arena_start && (byte*)iface->tab < arena_used) {
+ // *ptrbufpos++ = (struct PtrTarget){iface->tab, (uintptr)itabtype->gc};
+ *ptrbufpos++ = (struct PtrTarget){iface->tab, 0};
+ if(ptrbufpos == ptrbuf_end)
+ flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
+ }
+
+ // iface->data
+ if((byte*)iface->__object >= arena_start && (byte*)iface->__object < arena_used) {
+ // t = iface->tab->type;
+ t = nil;
+ if(t->__size <= sizeof(void*)) {
+ if((t->kind & KindNoPointers))
+ break;
+
+ obj = iface->__object;
+ if((t->kind & ~KindNoPointers) == KindPtr)
+ // objti = (uintptr)((const PtrType*)t)->elem->gc;
+ objti = 0;
+ } else {
+ obj = iface->__object;
+ // objti = (uintptr)t->gc;
+ objti = 0;
+ }
+ }
+ break;
+
case GC_DEFAULT_PTR:
- while(true) {
- i = stack_top.b;
- if(i > end_b)
- goto next_block;
+ while((i = stack_top.b) <= end_b) {
stack_top.b += PtrSize;
-
obj = *(byte**)i;
if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
- *ptrbufpos = (struct PtrTarget){obj, 0};
- ptrbufpos++;
+ *ptrbufpos++ = (struct PtrTarget){obj, 0};
if(ptrbufpos == ptrbuf_end)
- goto flush_buffers;
+ flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
}
}
+ goto next_block;
+
+ case GC_END:
+ if(--stack_top.count != 0) {
+ // Next iteration of a loop if possible.
+ elemsize = stack_top.elemsize;
+ stack_top.b += elemsize;
+ if(stack_top.b + elemsize <= end_b+PtrSize) {
+ pc = stack_top.loop_or_ret;
+ continue;
+ }
+ i = stack_top.b;
+ } else {
+ // Stack pop if possible.
+ if(stack_ptr+1 < stack+nelem(stack)) {
+ pc = stack_top.loop_or_ret;
+ stack_top = *(++stack_ptr);
+ continue;
+ }
+ i = (uintptr)b + nominal_size;
+ }
+ if(!precise_type) {
+ // Quickly scan [b+i,b+n) for possible pointers.
+ for(; i<=end_b; i+=PtrSize) {
+ if(*(byte**)i != nil) {
+ // Found a value that may be a pointer.
+ // Do a rescan of the entire block.
+ enqueue((Obj){b, n, 0}, &wbuf, &wp, &nobj);
+ break;
+ }
+ }
+ }
+ goto next_block;
+
+ case GC_ARRAY_START:
+ i = stack_top.b + pc[1];
+ count = pc[2];
+ elemsize = pc[3];
+ pc += 4;
+
+ // Stack push.
+ *stack_ptr-- = stack_top;
+ stack_top = (Frame){count, elemsize, i, pc};
+ continue;
+
+ case GC_ARRAY_NEXT:
+ if(--stack_top.count != 0) {
+ stack_top.b += stack_top.elemsize;
+ pc = stack_top.loop_or_ret;
+ } else {
+ // Stack pop.
+ stack_top = *(++stack_ptr);
+ pc += 1;
+ }
+ continue;
+
+ case GC_CALL:
+ // Stack push.
+ *stack_ptr-- = stack_top;
+ stack_top = (Frame){1, 0, stack_top.b + pc[1], pc+3 /*return address*/};
+ pc = (uintptr*)pc[2]; // target of the CALL instruction
+ continue;
+
+ case GC_MAP_PTR:
+ // TODO(atom): to be expanded in a next CL. Same as GC_APTR for now.
+ obj = *(void**)(stack_top.b + pc[1]);
+ pc += 3;
+ break;
+
+ case GC_REGION:
+ // TODO(atom): to be expanded in a next CL. Same as GC_APTR for now.
+ obj = (void*)(stack_top.b + pc[1]);
+ pc += 4;
+ break;
default:
runtime_throw("scanblock: invalid GC instruction");
return;
}
- flush_buffers:
- flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
- ptrbufpos = ptrbuf;
- goto next_instr;
+ if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
+ *ptrbufpos++ = (PtrTarget){obj, objti};
+ if(ptrbufpos == ptrbuf_end)
+ flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
+ }
+ }
next_block:
// Done scanning [b, b+n). Prepare for the next iteration of
- // the loop by setting b, n to the parameters for the next block.
+ // the loop by setting b, n, ti to the parameters for the next block.
if(nobj == 0) {
- flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
- ptrbufpos = ptrbuf;
+ flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
if(nobj == 0) {
if(!keepworking) {
@@ -501,6 +733,7 @@
--wp;
b = wp->p;
n = wp->n;
+ ti = wp->ti;
nobj--;
}
@@ -1004,10 +1237,6 @@
USED(&desc);
s = runtime_mheap.allspans[idx];
- // Stamp newly unused spans. The scavenger will use that
- // info to potentially give back some pages to the OS.
- if(s->state == MSpanFree && s->unusedsince == 0)
- s->unusedsince = runtime_nanotime();
if(s->state != MSpanInUse)
return;
arena_start = runtime_mheap.arena_start;
@@ -1229,18 +1458,15 @@
MCache *c;
uint32 i;
uint64 stacks_inuse;
- uint64 stacks_sys;
uint64 *src, *dst;
if(stats)
runtime_memclr((byte*)stats, sizeof(*stats));
stacks_inuse = 0;
- stacks_sys = runtime_stacks_sys;
for(mp=runtime_allm; mp; mp=mp->alllink) {
c = mp->mcache;
runtime_purgecachedstats(c);
- // stacks_inuse += mp->stackalloc->inuse;
- // stacks_sys += mp->stackalloc->sys;
+ // stacks_inuse += mp->stackinuse*FixedStack;
if(stats) {
src = (uint64*)&mp->gcstats;
dst = (uint64*)stats;
@@ -1256,7 +1482,6 @@
}
}
mstats.stacks_inuse = stacks_inuse;
- mstats.stacks_sys = stacks_sys;
}
// Structure of arguments passed to function gc().
@@ -1330,11 +1555,12 @@
gc(struct gc_args *args)
{
M *m;
- int64 t0, t1, t2, t3;
+ int64 t0, t1, t2, t3, t4;
uint64 heap0, heap1, obj0, obj1;
GCStats stats;
M *mp;
uint32 i;
+ // Eface eface;
runtime_semacquire(&runtime_worldsema);
if(!args->force && mstats.heap_alloc < mstats.next_gc) {
@@ -1367,6 +1593,12 @@
work.sweepfor = runtime_parforalloc(MaxGcproc);
m->locks--;
+ if(itabtype == nil) {
+ // get C pointer to the Go type "itab"
+ // runtime_gc_itab_ptr(&eface);
+ // itabtype = ((PtrType*)eface.type)->elem;
+ }
+
work.nwait = 0;
work.ndone = 0;
work.debugmarkdone = 0;
@@ -1379,6 +1611,8 @@
runtime_helpgc(work.nproc);
}
+ t1 = runtime_nanotime();
+
runtime_parfordo(work.markfor);
scanblock(nil, nil, 0, true);
@@ -1387,10 +1621,10 @@
debug_scanblock(work.roots[i].p, work.roots[i].n);
runtime_atomicstore(&work.debugmarkdone, 1);
}
- t1 = runtime_nanotime();
+ t2 = runtime_nanotime();
runtime_parfordo(work.sweepfor);
- t2 = runtime_nanotime();
+ t3 = runtime_nanotime();
stealcache();
cachestats(&stats);
@@ -1420,18 +1654,18 @@
heap1 = mstats.heap_alloc;
obj1 = mstats.nmalloc - mstats.nfree;
- t3 = runtime_nanotime();
- mstats.last_gc = t3;
- mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0;
- mstats.pause_total_ns += t3 - t0;
+ t4 = runtime_nanotime();
+ mstats.last_gc = t4;
+ mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
+ mstats.pause_total_ns += t4 - t0;
mstats.numgc++;
if(mstats.debuggc)
- runtime_printf("pause %D\n", t3-t0);
+ runtime_printf("pause %D\n", t4-t0);
if(gctrace) {
runtime_printf("gc%d(%d): %D+%D+%D ms, %D -> %D MB %D -> %D (%D-%D) objects,"
" %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
- mstats.numgc, work.nproc, (t1-t0)/1000000, (t2-t1)/1000000, (t3-t2)/1000000,
+ mstats.numgc, work.nproc, (t2-t1)/1000000, (t3-t2)/1000000, (t1-t0+t4-t3)/1000000,
heap0>>20, heap1>>20, obj0, obj1,
mstats.nmalloc, mstats.nfree,
stats.nhandoff, stats.nhandoffcnt,
@@ -138,7 +138,9 @@
*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
t->state = MSpanInUse;
MHeap_FreeLocked(h, t);
+ t->unusedsince = s->unusedsince; // preserve age
}
+ s->unusedsince = 0;
// Record span info, because gc needs to be
// able to map interior pointer to containing span.
@@ -300,10 +302,12 @@
}
mstats.heap_idle += s->npages<<PageShift;
s->state = MSpanFree;
- s->unusedsince = 0;
- s->npreleased = 0;
runtime_MSpanList_Remove(s);
sp = (uintptr*)(s->start<<PageShift);
+ // Stamp newly unused spans. The scavenger will use that
+ // info to potentially give back some pages to the OS.
+ s->unusedsince = runtime_nanotime();
+ s->npreleased = 0;
// Coalesce with earlier, later spans.
p = s->start;
@@ -405,10 +409,10 @@
runtime_entersyscall();
runtime_notesleep(¬e);
runtime_exitsyscall();
+ if(trace)
+ runtime_printf("scvg%d: GC forced\n", k);
runtime_lock(h);
now = runtime_nanotime();
- if (trace)
- runtime_printf("scvg%d: GC forced\n", k);
}
sumreleased = 0;
for(i=0; i < nelem(h->free)+1; i++) {
@@ -419,7 +423,7 @@
if(runtime_MSpanList_IsEmpty(list))
continue;
for(s=list->next; s != list; s=s->next) {
- if(s->unusedsince != 0 && (now - s->unusedsince) > limit) {
+ if((now - s->unusedsince) > limit) {
released = (s->npages - s->npreleased) << PageShift;
mstats.heap_released += released;
sumreleased += released;
@@ -87,7 +87,7 @@
runtime_throw(const char *s)
{
runtime_startpanic();
- runtime_printf("throw: %s\n", s);
+ runtime_printf("fatal error: %s\n", s);
runtime_dopanic(0);
*(int32*)0 = 0; // not reached
runtime_exit(1); // even more not reached
@@ -167,6 +167,13 @@
int32 runtime_gcwaiting;
+G* runtime_allg;
+G* runtime_lastg;
+M* runtime_allm;
+
+int8* runtime_goos;
+int32 runtime_ncpu;
+
// The static TLS size. See runtime_newm.
static int tlssize;
@@ -119,6 +119,13 @@
{
PtrSize = sizeof(void*),
};
+enum
+{
+ // Per-M stack segment cache size.
+ StackCacheSize = 32,
+ // Global <-> per-M stack segment cache transfer batch size.
+ StackCacheBatch = 16,
+};
/*
* structures
@@ -178,6 +185,8 @@
int32 sig;
int32 writenbuf;
byte* writebuf;
+ // DeferChunk *dchunk;
+ // DeferChunk *dchunknext;
uintptr sigcode0;
uintptr sigcode1;
// uintptr sigpc;
@@ -344,14 +353,14 @@
* external data
*/
extern uintptr runtime_zerobase;
-G* runtime_allg;
-G* runtime_lastg;
-M* runtime_allm;
+extern G* runtime_allg;
+extern G* runtime_lastg;
+extern M* runtime_allm;
extern int32 runtime_gomaxprocs;
extern bool runtime_singleproc;
extern uint32 runtime_panicking;
extern int32 runtime_gcwaiting; // gc is waiting to run
-int32 runtime_ncpu;
+extern int32 runtime_ncpu;
/*
* common functions and data
@@ -5,36 +5,24 @@
// This file implements runtime support for signal handling.
//
// Most synchronization primitives are not available from
-// the signal handler (it cannot block and cannot use locks)
+// the signal handler (it cannot block, allocate memory, or use locks)
// so the handler communicates with a processing goroutine
// via struct sig, below.
//
-// Ownership for sig.Note passes back and forth between
-// the signal handler and the signal goroutine in rounds.
-// The initial state is that sig.note is cleared (setup by signal_enable).
-// At the beginning of each round, mask == 0.
-// The round goes through three stages:
-//
-// (In parallel)
-// 1a) One or more signals arrive and are handled
-// by sigsend using cas to set bits in sig.mask.
-// The handler that changes sig.mask from zero to non-zero
-// calls notewakeup(&sig).
-// 1b) Sigrecv calls notesleep(&sig) to wait for the wakeup.
-//
-// 2) Having received the wakeup, sigrecv knows that sigsend
-// will not send another wakeup, so it can noteclear(&sig)
-// to prepare for the next round. (Sigsend may still be adding
-// signals to sig.mask at this point, which is fine.)
-//
-// 3) Sigrecv uses cas to grab the current sig.mask and zero it,
-// triggering the next round.
-//
-// The signal handler takes ownership of the note by atomically
-// changing mask from a zero to non-zero value. It gives up
-// ownership by calling notewakeup. The signal goroutine takes
-// ownership by returning from notesleep (caused by the notewakeup)
-// and gives up ownership by clearing mask.
+// sigsend() is called by the signal handler to queue a new signal.
+// signal_recv() is called by the Go program to receive a newly queued signal.
+// Synchronization between sigsend() and signal_recv() is based on the sig.state
+// variable. It can be in 3 states: 0, HASWAITER and HASSIGNAL.
+// HASWAITER means that signal_recv() is blocked on sig.Note and there are no
+// new pending signals.
+// HASSIGNAL means that sig.mask *may* contain new pending signals,
+// signal_recv() can't be blocked in this state.
+// 0 means that there are no new pending signals and signal_recv() is not blocked.
+// Transitions between states are done atomically with CAS.
+// When signal_recv() is unblocked, it resets sig.Note and rechecks sig.mask.
+// If several sigsend()'s and signal_recv() execute concurrently, it can lead to
+// unnecessary rechecks of sig.mask, but must not lead to missed signals
+// nor deadlocks.
package signal
#include "config.h"
@@ -47,15 +35,20 @@
Note;
uint32 mask[(NSIG+31)/32];
uint32 wanted[(NSIG+31)/32];
- uint32 kick;
+ uint32 state;
bool inuse;
} sig;
+enum {
+ HASWAITER = 1,
+ HASSIGNAL = 2,
+};
+
// Called from sighandler to send a signal back out of the signal handling thread.
bool
__go_sigsend(int32 s)
{
- uint32 bit, mask;
+ uint32 bit, mask, old, new;
if(!sig.inuse || s < 0 || (size_t)s >= 32*nelem(sig.wanted) || !(sig.wanted[s/32]&(1U<<(s&31))))
return false;
@@ -67,8 +60,20 @@
if(runtime_cas(&sig.mask[s/32], mask, mask|bit)) {
// Added to queue.
// Only send a wakeup if the receiver needs a kick.
- if(runtime_cas(&sig.kick, 1, 0))
- runtime_notewakeup(&sig);
+ for(;;) {
+ old = runtime_atomicload(&sig.state);
+ if(old == HASSIGNAL)
+ break;
+ if(old == HASWAITER)
+ new = 0;
+ else // if(old == 0)
+ new = HASSIGNAL;
+ if(runtime_cas(&sig.state, old, new)) {
+ if (old == HASWAITER)
+ runtime_notewakeup(&sig);
+ break;
+ }
+ }
break;
}
}
@@ -79,7 +84,7 @@
// Must only be called from a single goroutine at a time.
func signal_recv() (m uint32) {
static uint32 recv[nelem(sig.mask)];
- int32 i, more;
+ uint32 i, old, new;
for(;;) {
// Serve from local copy if there are bits left.
@@ -91,15 +96,27 @@
}
}
- // Get a new local copy.
- // Ask for a kick if more signals come in
- // during or after our check (before the sleep).
- if(sig.kick == 0) {
- runtime_noteclear(&sig);
- runtime_cas(&sig.kick, 0, 1);
+ // Check and update sig.state.
+ for(;;) {
+ old = runtime_atomicload(&sig.state);
+ if(old == HASWAITER)
+ runtime_throw("inconsistent state in signal_recv");
+ if(old == HASSIGNAL)
+ new = 0;
+ else // if(old == 0)
+ new = HASWAITER;
+ if(runtime_cas(&sig.state, old, new)) {
+ if (new == HASWAITER) {
+ runtime_entersyscall();
+ runtime_notesleep(&sig);
+ runtime_exitsyscall();
+ runtime_noteclear(&sig);
+ }
+ break;
+ }
}
- more = 0;
+ // Get a new local copy.
for(i=0; (size_t)i<nelem(sig.mask); i++) {
for(;;) {
m = sig.mask[i];
@@ -107,16 +124,7 @@
break;
}
recv[i] = m;
- if(m != 0)
- more = 1;
}
- if(more)
- continue;
-
- // Sleep waiting for more.
- runtime_entersyscall();
- runtime_notesleep(&sig);
- runtime_exitsyscall();
}
done:;