Patchwork [gccgo] Update to current version of Go library

login
register
mail settings
Submitter Ian Taylor
Date Nov. 11, 2010, 6:16 a.m.
Message ID <mcrsjz8s7uh.fsf@google.com>
Download mbox | patch
Permalink /patch/70775/
State New
Headers show

Comments

Ian Taylor - Nov. 11, 2010, 6:16 a.m.
I have updated libgo to the current version of the Go library.  The
diffs to the Go library itself are too large to include here.  They are
also interesting as they simply copy the changes in the master
repository.  See the repository for details on the changes.

I have attached the changes to the files which are not part of the Go
library proper.

Committed to gccgo branch.

Ian

Patch

diff -r bb880434e617 libgo/MERGE
--- a/libgo/MERGE	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/MERGE	Wed Nov 10 21:37:28 2010 -0800
@@ -1,4 +1,4 @@ 
-aba03c93af6a
+b547c5b04a18
 
 The first line of this file holds the Mercurial revision number of the
 last merge done from the master library sources.
diff -r bb880434e617 libgo/Makefile.am
--- a/libgo/Makefile.am	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/Makefile.am	Wed Nov 10 21:37:28 2010 -0800
@@ -118,7 +118,6 @@ 
 	mime.gox \
 	net.gox \
 	netchan.gox \
-	nntp.gox \
 	os.gox \
 	patch.gox \
 	path.gox \
@@ -128,6 +127,7 @@ 
 	rpc.gox \
 	runtime.gox \
 	scanner.gox \
+	smtp.gox \
 	sort.gox \
 	strconv.gox \
 	strings.gox \
@@ -138,6 +138,7 @@ 
 	template.gox \
 	testing.gox \
 	time.gox \
+	try.gox \
 	unicode.gox \
 	utf16.gox \
 	utf8.gox \
@@ -147,7 +148,8 @@ 
 toolexeclibarchivedir = $(toolexeclibdir)/archive
 
 toolexeclibarchive_DATA = \
-	archive/tar.gox
+	archive/tar.gox \
+	archive/zip.gox
 
 toolexeclibcompressdir = $(toolexeclibdir)/compress
 
@@ -170,6 +172,7 @@ 
 	crypto/aes.gox \
 	crypto/block.gox \
 	crypto/blowfish.gox \
+	crypto/cast5.gox \
 	crypto/hmac.gox \
 	crypto/md4.gox \
 	crypto/md5.gox \
@@ -193,6 +196,7 @@ 
 	debug/elf.gox \
 	debug/gosym.gox \
 	debug/macho.gox \
+	debug/pe.gox \
 	debug/proc.gox
 
 toolexeclibencodingdir = $(toolexeclibdir)/encoding
@@ -210,8 +214,7 @@ 
 toolexeclibexp_DATA = \
 	exp/datafmt.gox \
 	exp/draw.gox \
-	exp/eval.gox \
-	exp/iterable.gox
+	exp/eval.gox
 
 toolexeclibgodir = $(toolexeclibdir)/go
 
@@ -221,7 +224,8 @@ 
 	go/parser.gox \
 	go/printer.gox \
 	go/scanner.gox \
-	go/token.gox
+	go/token.gox \
+	go/typechecker.gox
 
 toolexeclibhashdir = $(toolexeclibdir)/hash
 
@@ -241,6 +245,11 @@ 
 	image/jpeg.gox \
 	image/png.gox
 
+toolexeclibindexdir = $(toolexeclibdir)/index
+
+toolexeclibindex_DATA = \
+	index/suffixarray.gox
+
 toolexeclibiodir = $(toolexeclibdir)/io
 
 toolexeclibio_DATA = \
@@ -309,12 +318,14 @@ 
 	runtime/go-defer.c \
 	runtime/go-deferred-recover.c \
 	runtime/go-eface-compare.c \
+	runtime/go-eface-val-compare.c \
 	runtime/go-getgoroot.c \
 	runtime/go-go.c \
 	runtime/go-gomaxprocs.c \
 	runtime/go-int-array-to-string.c \
 	runtime/go-int-to-string.c \
 	runtime/go-interface-compare.c \
+	runtime/go-interface-val-compare.c \
 	runtime/go-lock-os-thread.c \
 	runtime/go-map-delete.c \
 	runtime/go-map-index.c \
@@ -359,6 +370,7 @@ 
 	runtime/go-type-interface.c \
 	runtime/go-type-string.c \
 	runtime/go-typedesc-equal.c \
+	runtime/go-typestring.c \
 	runtime/go-unreflect.c \
 	runtime/go-unsafe-new.c \
 	runtime/go-unsafe-newarray.c \
@@ -456,7 +468,8 @@ 
 	go/ebnf/parser.go
 
 go_exec_files = \
-	go/exec/exec.go
+	go/exec/exec.go \
+	go/exec/lp_unix.go
 
 go_expvar_files = \
 	go/expvar/expvar.go
@@ -473,8 +486,10 @@ 
 go_gob_files = \
 	go/gob/decode.go \
 	go/gob/decoder.go \
+	go/gob/doc.go \
 	go/gob/encode.go \
 	go/gob/encoder.go \
+	go/gob/error.go \
 	go/gob/type.go
 
 go_hash_files = \
@@ -608,11 +623,9 @@ 
 	go/netchan/export.go \
 	go/netchan/import.go
 
-go_nntp_files = \
-	go/nntp/nntp.go
-
 go_os_files = \
 	go/os/dir.go \
+	go/os/env.go \
 	go/os/env_unix.go \
 	go/os/error.go \
 	go/os/exec.go \
@@ -661,21 +674,60 @@ 
 	go/runtime/error.go \
 	go/runtime/extern.go \
 	go/runtime/sig.go \
+	go/runtime/softfloat64.go \
 	go/runtime/type.go \
 	version.go
 
+if LIBGO_IS_386
+GOARCH = 386
+else
+if LIBGO_IS_X86_64
+GOARCH = amd64
+else
+if LIBGO_IS_ARM
+GOARCH = arm
+else
+GOARCH = unknown
+endif
+endif
+endif
+
+if LIBGO_IS_LINUX
+GOOS = linux
+else
+if LIBGO_IS_DARWIN
+GOOS = darwin
+else
+if LIBGO_IS_FREEBSD
+GOOS = freebsd
+else
+if LIBGO_IS_RTEMS
+GOOS = rtems
+else
+GOOS = unknown
+endif
+endif
+endif
+endif
+
 version.go: s-version; @true
 s-version: Makefile
 	rm -f version.go.tmp
 	echo "package runtime" > version.go.tmp
 	echo 'const defaultGoroot = "$(prefix)"' >> version.go.tmp
-	echo 'const defaultVersion = "'`$(CC) --version | sed 1q`'"' >> version.go.tmp
+	echo 'const theVersion = "'`$(CC) --version | sed 1q`'"' >> version.go.tmp
+	echo 'const theGoarch = "'$(GOARCH)'"' >> version.go.tmp
+	echo 'const theGoos = "'$(GOOS)'"' >> version.go.tmp
 	$(SHELL) $(srcdir)/../move-if-change version.go.tmp version.go
 	$(STAMP) $@
 
 go_scanner_files = \
 	go/scanner/scanner.go
 
+go_smtp_files = \
+	go/smtp/auth.go \
+	go/smtp/smtp.go
+
 go_sort_files = \
 	go/sort/sort.go
 
@@ -711,7 +763,6 @@ 
 
 go_testing_files = \
 	go/testing/benchmark.go \
-	go/testing/regexp.go \
 	go/testing/testing.go
 
 go_time_files = \
@@ -721,6 +772,9 @@ 
 	go/time/time.go \
 	go/time/zoneinfo_unix.go
 
+go_try_files = \
+	go/try/try.go
+
 go_unicode_files = \
 	go/unicode/casetables.go \
 	go/unicode/digit.go \
@@ -731,6 +785,7 @@ 
 	go/utf16/utf16.go
 
 go_utf8_files = \
+	go/utf8/string.go \
 	go/utf8/utf8.go
 
 go_websocket_files = \
@@ -747,6 +802,10 @@ 
 	go/archive/tar/reader.go \
 	go/archive/tar/writer.go
 
+go_archive_zip_files = \
+	go/archive/zip/reader.go \
+	go/archive/zip/struct.go
+
 go_compress_flate_files = \
 	go/compress/flate/deflate.go \
 	go/compress/flate/huffman_bit_writer.go \
@@ -797,6 +856,8 @@ 
 	go/crypto/blowfish/block.go \
 	go/crypto/blowfish/const.go \
 	go/crypto/blowfish/cipher.go
+go_crypto_cast5_files = \
+	go/crypto/cast5/cast5.go
 go_crypto_hmac_files = \
 	go/crypto/hmac/hmac.go
 go_crypto_md4_files = \
@@ -861,6 +922,9 @@ 
 go_debug_macho_files = \
 	go/debug/macho/file.go \
 	go/debug/macho/macho.go
+go_debug_pe_files = \
+	go/debug/pe/file.go \
+	go/debug/pe/pe.go
 
 if LIBGO_IS_LINUX
 proc_file = go/debug/proc/proc_linux.go
@@ -941,9 +1005,6 @@ 
 	go/exp/eval/typec.go \
 	go/exp/eval/value.go \
 	go/exp/eval/world.go
-go_exp_iterable_files = \
-	go/exp/iterable/array.go \
-	go/exp/iterable/iterable.go
 
 go_go_ast_files = \
 	go/go/ast/ast.go \
@@ -965,6 +1026,10 @@ 
 	go/go/scanner/scanner.go
 go_go_token_files = \
 	go/go/token/token.go
+go_go_typechecker_files = \
+	go/go/typechecker/scope.go \
+	go/go/typechecker/typechecker.go \
+	go/go/typechecker/universe.go
 
 go_hash_adler32_files = \
 	go/hash/adler32/adler32.go
@@ -985,6 +1050,9 @@ 
 	go/image/png/reader.go \
 	go/image/png/writer.go
 
+go_index_suffixarray_files = \
+	go/index/suffixarray/suffixarray.go
+
 go_io_ioutil_files = \
 	go/io/ioutil/ioutil.go \
 	go/io/ioutil/tempfile.go
@@ -1094,7 +1162,6 @@ 
 	mime/libmime.la \
 	net/libnet.la \
 	netchan/libnetchan.la \
-	nntp/libnntp.la \
 	os/libos.la \
 	patch/libpatch.la \
 	path/libpath.la \
@@ -1104,6 +1171,7 @@ 
 	rpc/librpc.la \
 	runtime/libruntime.la \
 	scanner/libscanner.la \
+	smtp/libsmtp.la \
 	sort/libsort.la \
 	strconv/libstrconv.la \
 	strings/libstrings.la \
@@ -1112,12 +1180,14 @@ 
 	tabwriter/libtabwriter.la \
 	template/libtemplate.la \
 	time/libtime.la \
+	try/libtry.la \
 	unicode/libunicode.la \
 	utf16/libutf16.la \
 	utf8/libutf8.la \
 	websocket/libwebsocket.la \
 	xml/libxml.la \
 	archive/libtar.la \
+	archive/libzip.la \
 	compress/libflate.la \
 	compress/libgzip.la \
 	compress/libzlib.la \
@@ -1128,6 +1198,7 @@ 
 	crypto/libaes.la \
 	crypto/libblock.la \
 	crypto/libblowfish.la \
+	crypto/libcast5.la \
 	crypto/libhmac.la \
 	crypto/libmd4.la \
 	crypto/libmd5.la \
@@ -1147,6 +1218,7 @@ 
 	debug/libelf.la \
 	debug/libgosym.la \
 	debug/libmacho.la \
+	debug/libpe.la \
 	debug/libproc.la \
 	encoding/libascii85.la \
 	encoding/libbase64.la \
@@ -1157,19 +1229,20 @@ 
 	exp/libdatafmt.la \
 	exp/libdraw.la \
 	exp/libeval.la \
-	exp/libiterable.la \
 	go/libast.la \
 	go/libdoc.la \
 	go/libparser.la \
 	go/libprinter.la \
 	go/libscanner.la \
 	go/libtoken.la \
+	go/libtypechecker.la \
 	hash/libadler32.la \
 	hash/libcrc32.la \
 	hash/libcrc64.la \
 	http/libpprof.la \
 	image/libjpeg.la \
 	image/libpng.la \
+	index/libsuffixarray.la \
 	io/libioutil.la \
 	mime/libmultipart.la \
 	net/libdict.la \
@@ -1233,7 +1306,7 @@ 
 	export MAKE; \
 	rm -f $@-log; \
 	echo -n "$(@D) " >$@-log 2>&1; \
-	prefix=`dirname $(@D)`; \
+	prefix=`if test "$(@D)" = "regexp"; then echo regexp-test; else dirname $(@D); fi`; \
 	test "$${prefix}" != "." || prefix="$(@D)"; \
 	$(srcdir)/testsuite/gotest --dejagnu=$(use_dejagnu) --basedir=$(srcdir) --srcdir=$(srcdir)/go/$(@D) --prefix="libgo_$${prefix}" --pkgfiles="$(go_$(subst /,_,$(@D))_files)" >>$@-log 2>&1; \
 	x=$$?; \
@@ -1355,7 +1428,7 @@ 
 	$(CHECK)
 .PHONY: hash/check
 
-html/libhtml.a: $(go_html_files) bytes.gox io.gox log.gox os.gox strconv.gox \
+html/libhtml.a: $(go_html_files) bytes.gox io.gox os.gox strconv.gox \
 		strings.gox utf8.gox
 	$(BUILDARCHIVE)
 html/libhtml.la: html/libhtml.a
@@ -1397,7 +1470,8 @@ 
 	$(CHECK)
 .PHONY: json/check
 
-log/liblog.a: $(go_log_files) fmt.gox io.gox runtime.gox os.gox time.gox
+log/liblog.a: $(go_log_files) bytes.gox fmt.gox io.gox runtime.gox os.gox \
+		time.gox
 	$(BUILDARCHIVE)
 log/liblog.la: log/liblog.a
 log/check: $(CHECK_DEPS)
@@ -1428,22 +1502,13 @@ 
 .PHONY: net/check
 
 netchan/libnetchan.a: $(go_netchan_files) gob.gox log.gox net.gox os.gox \
-		reflect.gox sync.gox
+		reflect.gox sync.gox time.gox
 	$(BUILDARCHIVE)
 netchan/libnetchan.la: netchan/libnetchan.a
 netchan/check: $(CHECK_DEPS)
 	$(CHECK)
 .PHONY: netchan/check
 
-nntp/libnntp.a: $(go_nntp_files) bufio.gox bytes.gox container/vector.gox \
-		fmt.gox http.gox io.gox io/ioutil.gox os.gox net.gox sort.gox \
-		strconv.gox strings.gox time.gox
-	$(BUILDARCHIVE)
-nntp/libnntp.la: nntp/libnntp.a
-nntp/check: $(CHECK_DEPS)
-	$(CHECK)
-.PHONY: nntp/check
-
 os/libos.a: $(go_os_files) sync.gox syscall.gox
 	$(BUILDARCHIVE)
 os/libos.la: os/libos.a
@@ -1460,7 +1525,8 @@ 
 	$(CHECK)
 .PHONY: patch/check
 
-path/libpath.a: $(go_path_files) io/ioutil.gox os.gox strings.gox
+path/libpath.a: $(go_path_files) io/ioutil.gox os.gox sort.gox strings.gox \
+		utf8.gox
 	$(BUILDARCHIVE)
 path/libpath.la: path/libpath.a
 path/check: $(CHECK_DEPS)
@@ -1482,8 +1548,8 @@ 
 	$(CHECK)
 .PHONY: reflect/check
 
-regexp/libregexp.a: $(go_regexp_files) bytes.gox container/vector.gox io.gox \
-		os.gox strings.gox utf8.gox
+regexp/libregexp.a: $(go_regexp_files) bytes.gox io.gox os.gox strings.gox \
+		utf8.gox
 	$(BUILDARCHIVE)
 regexp/libregexp.la: regexp/libregexp.a
 regexp/check: $(CHECK_DEPS)
@@ -1514,6 +1580,14 @@ 
 	$(CHECK)
 .PHONY: scanner/check
 
+smtp/libsmtp.a: $(go_smtp_files) crypto/tls.gox encoding/base64.gox io.gox \
+		net.gox net/textproto.gox os.gox strings.gox
+	$(BUILDARCHIVE)
+smtp/libsmtp.la: smtp/libsmtp.a
+smtp/check: $(CHECK_DEPS)
+	$(CHECK)
+.PHONY: smtp/check
+
 sort/libsort.a: $(go_sort_files)
 	$(BUILDARCHIVE)
 sort/libsort.la: sort/libsort.a
@@ -1556,7 +1630,7 @@ 
 .PHONY: syslog/check
 
 tabwriter/libtabwriter.a: $(go_tabwriter_files) bytes.gox io.gox os.gox \
-		utf8.gox container/vector.gox
+		utf8.gox
 	$(BUILDARCHIVE)
 tabwriter/libtabwriter.la: tabwriter/libtabwriter.a
 tabwriter/check: $(CHECK_DEPS)
@@ -1587,6 +1661,13 @@ 
 	$(CHECK)
 .PHONY: time/check
 
+try/libtry.a: $(go_try_files) fmt.gox io.gox os.gox reflect.gox unicode.gox
+	$(BUILDARCHIVE)
+try/libtry.la: try/libtry.a
+try/check: $(CHECK_DEPS)
+	$(CHECK)
+.PHONY: try/check
+
 unicode/libunicode.a: $(go_unicode_files)
 	$(BUILDARCHIVE)
 unicode/libunicode.la: unicode/libunicode.a
@@ -1609,8 +1690,9 @@ 
 .PHONY: utf8/check
 
 websocket/libwebsocket.a: $(go_websocket_files) bufio.gox bytes.gox \
-		container/vector.gox crypto/md5.gox encoding/binary.gox \
-		fmt.gox http.gox io.gox net.gox os.gox rand.gox strings.gox
+		container/vector.gox crypto/md5.gox crypto/tls.gox \
+		encoding/binary.gox fmt.gox http.gox io.gox net.gox os.gox \
+		rand.gox strings.gox
 	$(BUILDARCHIVE)
 websocket/libwebsocket.la: websocket/libwebsocket.a
 websocket/check: $(CHECK_DEPS)
@@ -1634,6 +1716,16 @@ 
 	$(CHECK)
 .PHONY: archive/tar/check
 
+archive/libzip.a: $(go_archive_zip_files) bufio.gox bytes.gox \
+		compress/flate.gox hash.gox hash/crc32.gox \
+		encoding/binary.gox io.gox os.gox
+	$(BUILDARCHIVE)
+archive/libzip.la: archive/libzip.a
+archive/zip/check: $(CHECK_DEPS)
+	@mkdir -p archive/zip
+	$(CHECK)
+.PHONY: archive/zip/check
+
 compress/libflate.a: $(go_compress_flate_files) bufio.gox io.gox math.gox \
 		os.gox sort.gox strconv.gox
 	$(BUILDARCHIVE)
@@ -1717,6 +1809,14 @@ 
 	$(CHECK)
 .PHONY: crypto/blowfish/check
 
+crypto/libcast5.a: $(go_crypto_cast5_files) os.gox
+	$(BUILDARCHIVE)
+crypto/libcast5.la: crypto/libcast5.a
+crypt/cast5/check: $(CHECK_DEPS)
+	@mkdir -p crypto/cast5
+	$(CHECK)
+.PHONY: crypto/cast5/check
+
 crypto/libhmac.a: $(go_crypto_hmac_files) crypto/md5.gox crypto/sha1.gox \
 		hash.gox os.gox
 	$(BUILDARCHIVE)
@@ -1821,7 +1921,8 @@ 
 		crypto/hmac.gox crypto/md5.gox crypto/rc4.gox crypto/rand.gox \
 		crypto/rsa.gox crypto/sha1.gox crypto/subtle.gox \
 		crypto/rsa.gox crypto/x509.gox encoding/pem.gox fmt.gox \
-		hash.gox io.gox io/ioutil.gox net.gox os.gox sync.gox time.gox
+		hash.gox io.gox io/ioutil.gox net.gox os.gox strings.gox \
+		sync.gox time.gox
 	$(BUILDARCHIVE)
 crypto/libtls.la: crypto/libtls.a
 crypto/tls/check: $(CHECK_DEPS)
@@ -1883,6 +1984,15 @@ 
 	$(CHECK)
 .PHONY: debug/macho/check
 
+debug/libpe.a: $(go_debug_pe_files) debug/dwarf.gox encoding/binary.gox \
+		fmt.gox io.gox os.gox strconv.gox
+	$(BUILDARCHIVE)
+debug/libpe.la: debug/libpe.a
+debug/pe/check: $(CHECK_DEPS)
+	@mkdir -p debug/pe
+	$(CHECK)
+.PHONY: debug/pe/check
+
 debug/libproc.a: $(go_debug_proc_files) container/vector.gox fmt.gox \
 		io/ioutil.gox os.gox runtime.gox strconv.gox strings.gox \
 		sync.gox syscall.gox
@@ -1953,7 +2063,7 @@ 
 	$(CHECK)
 .PHONY: exp/datafmt/check
 
-exp/libdraw.a: $(go_exp_draw_files) image.gox
+exp/libdraw.a: $(go_exp_draw_files) image.gox os.gox
 	$(BUILDARCHIVE)
 exp/libdraw.la: exp/libdraw.a
 exp/draw/check: $(CHECK_DEPS)
@@ -1971,15 +2081,6 @@ 
 	$(CHECK)
 .PHONY: exp/eval/check
 
-exp/libiterable.a: $(go_exp_iterable_files) container/list.gox \
-		container/vector.gox
-	$(BUILDARCHIVE)
-exp/libiterable.la: exp/libiterable.a
-exp/iterable/check: $(CHECK_DEPS)
-	@mkdir -p exp/iterable
-	$(CHECK)
-.PHONY: exp/iterable/check
-
 go/libast.a: $(go_go_ast_files) fmt.gox go/token.gox io.gox os.gox \
 		reflect.gox unicode.gox utf8.gox
 	$(BUILDARCHIVE)
@@ -1989,8 +2090,8 @@ 
 	$(CHECK)
 .PHONY: go/ast/check
 
-go/libdoc.a: $(go_go_doc_files) container/vector.gox go/ast.gox go/token.gox \
-		http.gox io.gox regexp.gox sort.gox strings.gox template.gox
+go/libdoc.a: $(go_go_doc_files) go/ast.gox go/token.gox io.gox regexp.gox \
+		sort.gox strings.gox template.gox
 	$(BUILDARCHIVE)
 go/libdoc.la: go/libdoc.a
 go/doc/check: $(CHECK_DEPS)
@@ -1998,9 +2099,9 @@ 
 	$(CHECK)
 .PHONY: go/doc/check
 
-go/libparser.a: $(go_go_parser_files) bytes.gox container/vector.gox fmt.gox \
-		go/ast.gox go/scanner.gox go/token.gox io.gox io/ioutil.gox \
-		os.gox path.gox strings.gox
+go/libparser.a: $(go_go_parser_files) bytes.gox fmt.gox go/ast.gox \
+		go/scanner.gox go/token.gox io.gox io/ioutil.gox os.gox \
+		path.gox strings.gox
 	$(BUILDARCHIVE)
 go/libparser.la: go/libparser.a
 go/parser/check: $(CHECK_DEPS)
@@ -2008,8 +2109,8 @@ 
 	$(CHECK)
 .PHONY: go/parser/check
 
-go/libprinter.a: $(go_go_printer_files) bytes.gox container/vector.gox fmt.gox \
-		go/ast.gox go/token.gox io.gox os.gox reflect.gox runtime.gox \
+go/libprinter.a: $(go_go_printer_files) bytes.gox fmt.gox go/ast.gox \
+		go/token.gox io.gox os.gox reflect.gox runtime.gox \
 		strings.gox tabwriter.gox
 	$(BUILDARCHIVE)
 go/libprinter.la: go/libprinter.a
@@ -2036,6 +2137,15 @@ 
 	$(CHECK)
 .PHONY: go/token/check
 
+go/libtypechecker.a: $(go_go_typechecker_files) fmt.gox go/ast.gox \
+		go/token.gox go/scanner.gox os.gox
+	$(BUILDARCHIVE)
+go/libtypechecker.la: go/libtypechecker.a
+go/typechecker/check: $(CHECK_DEPS)
+	@mkdir -p go/typechecker
+	$(CHECK)
+.PHONY: go/typechecker/check
+
 hash/libadler32.a: $(go_hash_adler32_files) hash.gox os.gox
 	$(BUILDARCHIVE)
 hash/libadler32.la: hash/libadler32.a
@@ -2086,6 +2196,15 @@ 
 	$(CHECK)
 .PHONY: image/png/check
 
+index/libsuffixarray.a: $(go_index_suffixarray_files) bytes.gox \
+		container/vector.gox sort.gox
+	$(BUILDARCHIVE)
+index/libsuffixarray.la: index/libsuffixarray.a
+index/suffixarray/check: $(CHECK_DEPS)
+	@mkdir -p index/suffixarray
+	$(CHECK)
+.PHONY: index/suffixarray/check
+
 io/libioutil.a: $(go_io_ioutil_files) bytes.gox io.gox os.gox sort.gox \
 		strconv.gox
 	$(BUILDARCHIVE)
@@ -2240,8 +2359,6 @@ 
 	$(BUILDGOX)
 netchan.gox: netchan/libnetchan.a
 	$(BUILDGOX)
-nntp.gox: nntp/libnntp.a
-	$(BUILDGOX)
 os.gox: os/libos.a
 	$(BUILDGOX)
 patch.gox: patch/libpatch.a
@@ -2260,6 +2377,8 @@ 
 	$(BUILDGOX)
 scanner.gox: scanner/libscanner.a
 	$(BUILDGOX)
+smtp.gox: smtp/libsmtp.a
+	$(BUILDGOX)
 sort.gox: sort/libsort.a
 	$(BUILDGOX)
 strconv.gox: strconv/libstrconv.a
@@ -2280,6 +2399,8 @@ 
 	$(BUILDGOX)
 time.gox: time/libtime.a
 	$(BUILDGOX)
+try.gox: try/libtry.a
+	$(BUILDGOX)
 unicode.gox: unicode/libunicode.a
 	$(BUILDGOX)
 utf16.gox: utf16/libutf16.a
@@ -2293,6 +2414,8 @@ 
 
 archive/tar.gox: archive/libtar.a
 	$(BUILDGOX)
+archive/zip.gox: archive/libzip.a
+	$(BUILDGOX)
 
 compress/flate.gox: compress/libflate.a
 	$(BUILDGOX)
@@ -2316,6 +2439,8 @@ 
 	$(BUILDGOX)
 crypto/blowfish.gox: crypto/libblowfish.a
 	$(BUILDGOX)
+crypto/cast5.gox: crypto/libcast5.a
+	$(BUILDGOX)
 crypto/hmac.gox: crypto/libhmac.a
 	$(BUILDGOX)
 crypto/md4.gox: crypto/libmd4.a
@@ -2355,6 +2480,8 @@ 
 	$(BUILDGOX)
 debug/macho.gox: debug/libmacho.a
 	$(BUILDGOX)
+debug/pe.gox: debug/libpe.a
+	$(BUILDGOX)
 debug/proc.gox: debug/libproc.a
 	$(BUILDGOX)
 
@@ -2377,8 +2504,6 @@ 
 	$(BUILDGOX)
 exp/eval.gox: exp/libeval.a
 	$(BUILDGOX)
-exp/iterable.gox: exp/libiterable.a
-	$(BUILDGOX)
 
 go/ast.gox: go/libast.a
 	$(BUILDGOX)
@@ -2392,6 +2517,8 @@ 
 	$(BUILDGOX)
 go/token.gox: go/libtoken.a
 	$(BUILDGOX)
+go/typechecker.gox: go/libtypechecker.a
+	$(BUILDGOX)
 
 hash/adler32.gox: hash/libadler32.a
 	$(BUILDGOX)
@@ -2408,6 +2535,9 @@ 
 image/png.gox: image/libpng.a
 	$(BUILDGOX)
 
+index/suffixarray.gox: index/libsuffixarray.a
+	$(BUILDGOX)
+
 io/ioutil.gox: io/libioutil.a
 	$(BUILDGOX)
 
@@ -2456,7 +2586,6 @@ 
 	mime/check \
 	net/check \
 	netchan/check \
-	nntp/check \
 	os/check \
 	patch/check \
 	path/check \
@@ -2464,7 +2593,9 @@ 
 	reflect/check \
 	regexp/check \
 	rpc/check \
+	runtime/check \
 	scanner/check \
+	smtp/check \
 	sort/check \
 	strconv/check \
 	strings/check \
@@ -2472,14 +2603,15 @@ 
 	syslog/check \
 	tabwriter/check \
 	template/check \
-	testing/check \
 	time/check \
+	try/check \
 	unicode/check \
 	utf16/check \
 	utf8/check \
 	websocket/check \
 	xml/check \
 	archive/tar/check \
+	archive/zip/check \
 	compress/flate/check \
 	compress/gzip/check \
 	compress/zlib/check \
@@ -2490,6 +2622,7 @@ 
 	crypto/aes/check \
 	crypto/block/check \
 	crypto/blowfish/check \
+	crypto/cast5/check \
 	crypto/hmac/check \
 	crypto/md4/check \
 	crypto/md5/check \
@@ -2508,6 +2641,7 @@ 
 	debug/dwarf/check \
 	debug/elf/check \
 	debug/macho/check \
+	debug/pe/check \
 	encoding/ascii85/check \
 	encoding/base64/check \
 	encoding/binary/check \
@@ -2517,14 +2651,15 @@ 
 	exp/datafmt/check \
 	exp/draw/check \
 	exp/eval/check \
-	exp/iterable/check \
 	go/parser/check \
 	go/printer/check \
 	go/scanner/check \
+	go/typechecker/check \
 	hash/adler32/check \
 	hash/crc32/check \
 	hash/crc64/check \
 	image/png/check \
+	index/suffixarray/check \
 	io/ioutil/check \
 	mime/multipart/check \
 	net/textproto/check \
diff -r bb880434e617 libgo/mksysinfo.sh
--- a/libgo/mksysinfo.sh	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/mksysinfo.sh	Wed Nov 10 21:37:28 2010 -0800
@@ -114,8 +114,8 @@ 
 fi
 
 # Networking constants.
-grep '^const _\(AF\|SOCK\|SOL\|SO\|IPPROTO\|TCP\)_' gen-sysinfo.go |
-  sed -e 's/^\(const \)_\(\(AF\|SOCK\|SOL\|SO\|IPPROTO\|TCP\)_[^= ]*\)\(.*\)$/\1\2 = _\2/' \
+grep '^const _\(AF\|SOCK\|SOL\|SO\|IPPROTO\|TCP\|IP\|IPV6\)_' gen-sysinfo.go |
+  sed -e 's/^\(const \)_\(\(AF\|SOCK\|SOL\|SO\|IPPROTO\|TCP\|IP\|IPV6\)_[^= ]*\)\(.*\)$/\1\2 = _\2/' \
     >> ${OUT}
 grep '^const _SOMAXCONN' gen-sysinfo.go |
   sed -e 's/^\(const \)_\(SOMAXCONN[^= ]*\)\(.*\)$/\1\2 = _\2/' \
diff -r bb880434e617 libgo/runtime/go-byte-array-to-string.c
--- a/libgo/runtime/go-byte-array-to-string.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-byte-array-to-string.c	Wed Nov 10 21:37:28 2010 -0800
@@ -16,7 +16,7 @@ 
   struct __go_string ret;
 
   bytes = (const unsigned char *) p;
-  retdata = mallocgc (len, RefNoPointers, 1, 0);
+  retdata = runtime_mallocgc (len, RefNoPointers, 1, 0);
   __builtin_memcpy (retdata, bytes, len);
   ret.__data = retdata;
   ret.__length = len;
diff -r bb880434e617 libgo/runtime/go-go.c
--- a/libgo/runtime/go-go.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-go.c	Wed Nov 10 21:37:28 2010 -0800
@@ -107,14 +107,18 @@ 
   if (list_entry->next != NULL)
     list_entry->next->prev = list_entry->prev;
 
-  /* We use __go_thread_ids_lock as a lock for mheap.cachealloc.  */
-  MCache_ReleaseAll (mcache);
-  __builtin_memset (mcache, 0, sizeof (struct MCache));
-  FixAlloc_Free (&mheap.cachealloc, mcache);
-
   i = pthread_mutex_unlock (&__go_thread_ids_lock);
   __go_assert (i == 0);
 
+  runtime_MCache_ReleaseAll (mcache);
+
+  runtime_lock (&runtime_mheap);
+  mstats.heap_alloc += mcache->local_alloc;
+  mstats.heap_objects += mcache->local_objects;
+  __builtin_memset (mcache, 0, sizeof (struct MCache));
+  runtime_FixAlloc_Free (&runtime_mheap.cachealloc, mcache);
+  runtime_unlock (&runtime_mheap);
+
   free (list_entry);
 }
 
@@ -227,7 +231,7 @@ 
   __go_assert (i == 0);
 
   /* We use __go_thread_ids_lock as a lock for mheap.cachealloc.  */
-  newm->mcache = allocmcache ();
+  newm->mcache = runtime_allocmcache ();
 
   if (__go_all_thread_ids != NULL)
     __go_all_thread_ids->prev = list_entry;
@@ -353,7 +357,7 @@ 
 /* Stop all the other threads for garbage collection.  */
 
 void
-stoptheworld (void)
+runtime_stoptheworld (void)
 {
   int i;
   pthread_t me;
@@ -424,7 +428,7 @@ 
    with __go_thread_ids_lock held.  */
 
 void
-__go_scanstacks (void (*scan) (int32, unsigned char *, int64_t))
+__go_scanstacks (void (*scan) (unsigned char *, int64_t))
 {
   pthread_t me;
   struct __go_thread_id *p;
@@ -440,9 +444,9 @@ 
 	  /* The goroutine function and argument can be allocated on
 	     the heap, so we have to scan them for a thread that has
 	     not yet started.  */
-	  scan (0, (void *) &p->pfn, sizeof (void *));
-	  scan (0, (void *) &p->arg, sizeof (void *));
-	  scan (0, (void *) &p->m, sizeof (void *));
+	  scan ((void *) &p->pfn, sizeof (void *));
+	  scan ((void *) &p->arg, sizeof (void *));
+	  scan ((void *) &p->m, sizeof (void *));
 	  continue;
 	}
 
@@ -473,7 +477,7 @@ 
 
       while (sp != NULL)
 	{
-	  scan (0, sp, len);
+	  scan (sp, len);
 	  sp = __splitstack_find (next_segment, next_sp, &len,
 				  &next_segment, &next_sp, &initial_sp);
 	}
@@ -485,20 +489,20 @@ 
 	  uintptr_t top = (uintptr_t) m->gc_sp;
 	  uintptr_t bottom = (uintptr_t) &top;
 	  if (top < bottom)
-	    scan (0, m->gc_sp, bottom - top);
+	    scan (m->gc_sp, bottom - top);
 	  else
-	    scan (0, (void *) bottom, top - bottom);
+	    scan ((void *) bottom, top - bottom);
 	}
       else
 	{
-	  scan (0, p->m->gc_next_sp, p->m->gc_len);
+	  scan (p->m->gc_next_sp, p->m->gc_len);
 	}
 	
 #endif /* !defined(USING_SPLIT_STACK) */
 
       /* Also scan the M structure while we're at it.  */
 
-      scan (0, (void *) &p->m, sizeof (void *));
+      scan ((void *) &p->m, sizeof (void *));
     }
 }
 
@@ -506,18 +510,38 @@ 
    __go_thread_ids_lock held.  */
 
 void
-__go_stealcache(void)
+__go_stealcache (void)
 {
   struct __go_thread_id *p;
 
   for (p = __go_all_thread_ids; p != NULL; p = p->next)
-    MCache_ReleaseAll (p->m->mcache);
+    runtime_MCache_ReleaseAll (p->m->mcache);
+}
+
+/* Gather memory cache statistics.  This is called with
+   __go_thread_ids_lock held.  */
+
+void
+__go_cachestats (void)
+{
+  struct __go_thread_id *p;
+
+  for (p = __go_all_thread_ids; p != NULL; p = p->next)
+    {
+      MCache *c;
+
+      c = p->m->mcache;
+      mstats.heap_alloc += c->local_alloc;
+      c->local_alloc = 0;
+      mstats.heap_objects += c->local_objects;
+      c->local_objects = 0;
+    }
 }
 
 /* Start the other threads after garbage collection.  */
 
 void
-starttheworld (void)
+runtime_starttheworld (void)
 {
   int i;
   pthread_t me;
diff -r bb880434e617 libgo/runtime/go-int-array-to-string.c
--- a/libgo/runtime/go-int-array-to-string.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-int-array-to-string.c	Wed Nov 10 21:37:28 2010 -0800
@@ -41,7 +41,7 @@ 
 	slen += 4;
     }
 
-  retdata = mallocgc (slen, RefNoPointers, 1, 0);
+  retdata = runtime_mallocgc (slen, RefNoPointers, 1, 0);
   ret.__data = retdata;
   ret.__length = slen;
 
diff -r bb880434e617 libgo/runtime/go-int-to-string.c
--- a/libgo/runtime/go-int-to-string.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-int-to-string.c	Wed Nov 10 21:37:28 2010 -0800
@@ -51,7 +51,7 @@ 
 	}
     }
 
-  retdata = mallocgc (len, RefNoPointers, 1, 0);
+  retdata = runtime_mallocgc (len, RefNoPointers, 1, 0);
   __builtin_memcpy (retdata, buf, len);
   ret.__data = retdata;
   ret.__length = len;
diff -r bb880434e617 libgo/runtime/go-main.c
--- a/libgo/runtime/go-main.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-main.c	Wed Nov 10 21:37:28 2010 -0800
@@ -47,7 +47,7 @@ 
   int i;
   struct __go_string *values;
 
-  mallocinit ();
+  runtime_mallocinit ();
   __go_gc_goroutine_init (&argc);
 
   Args.__count = argc;
diff -r bb880434e617 libgo/runtime/go-nanotime.c
--- a/libgo/runtime/go-nanotime.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-nanotime.c	Wed Nov 10 21:37:28 2010 -0800
@@ -10,7 +10,7 @@ 
 #include "runtime.h"
 
 int64
-nanotime (void)
+runtime_nanotime (void)
 {
   int i;
   struct timeval tv;
diff -r bb880434e617 libgo/runtime/go-new.c
--- a/libgo/runtime/go-new.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-new.c	Wed Nov 10 21:37:28 2010 -0800
@@ -17,5 +17,5 @@ 
 void *
 __go_new_nopointers (size_t size)
 {
-  return mallocgc (size, RefNoPointers, 1, 1);
+  return runtime_mallocgc (size, RefNoPointers, 1, 1);
 }
diff -r bb880434e617 libgo/runtime/go-panic.c
--- a/libgo/runtime/go-panic.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-panic.c	Wed Nov 10 21:37:28 2010 -0800
@@ -112,7 +112,7 @@ 
   struct __go_empty_interface arg;
 
   len = __builtin_strlen (msg);
-  sdata = mallocgc (len, RefNoPointers, 0, 0);
+  sdata = runtime_mallocgc (len, RefNoPointers, 0, 0);
   __builtin_memcpy (sdata, msg, len);
   s.__data = sdata;
   s.__length = len;
diff -r bb880434e617 libgo/runtime/go-string-to-byte-array.c
--- a/libgo/runtime/go-string-to-byte-array.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-string-to-byte-array.c	Wed Nov 10 21:37:28 2010 -0800
@@ -15,7 +15,7 @@ 
   unsigned char *data;
   struct __go_open_array ret;
 
-  data = (unsigned char *) mallocgc (str.__length, RefNoPointers, 1, 0);
+  data = (unsigned char *) runtime_mallocgc (str.__length, RefNoPointers, 1, 0);
   __builtin_memcpy (data, str.__data, str.__length);
   ret.__values = (void *) data;
   ret.__count = str.__length;
diff -r bb880434e617 libgo/runtime/go-string-to-int-array.c
--- a/libgo/runtime/go-string-to-int-array.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-string-to-int-array.c	Wed Nov 10 21:37:28 2010 -0800
@@ -31,7 +31,8 @@ 
       p += __go_get_rune (p, pend - p, &rune);
     }
 
-  data = (uint32_t *) mallocgc (c * sizeof (uint32_t), RefNoPointers, 1, 0);
+  data = (uint32_t *) runtime_mallocgc (c * sizeof (uint32_t), RefNoPointers,
+					1, 0);
   p = str.__data;
   pd = data;
   while (p < pend)
diff -r bb880434e617 libgo/runtime/go-strplus.c
--- a/libgo/runtime/go-strplus.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/go-strplus.c	Wed Nov 10 21:37:28 2010 -0800
@@ -21,7 +21,7 @@ 
     return s1;
 
   len = s1.__length + s2.__length;
-  retdata = mallocgc (len, RefNoPointers, 1, 0);
+  retdata = runtime_mallocgc (len, RefNoPointers, 1, 0);
   __builtin_memcpy (retdata, s1.__data, s1.__length);
   __builtin_memcpy (retdata + s1.__length, s2.__data, s2.__length);
   ret.__data = retdata;
diff -r bb880434e617 libgo/runtime/go-typestring.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libgo/runtime/go-typestring.c	Wed Nov 10 21:37:28 2010 -0800
@@ -0,0 +1,18 @@ 
+/* go-typestring.c -- the runtime.typestring function.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "interface.h"
+#include "go-type.h"
+#include "go-string.h"
+
+struct __go_string typestring(struct __go_empty_interface)
+  asm ("libgo_runtime.runtime.typestring");
+
+struct __go_string
+typestring (struct __go_empty_interface e)
+{
+  return *e.__type_descriptor->__reflection;
+}
diff -r bb880434e617 libgo/runtime/malloc.goc
--- a/libgo/runtime/malloc.goc	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/malloc.goc	Wed Nov 10 21:37:28 2010 -0800
@@ -20,10 +20,10 @@ 
 typedef struct __go_type_descriptor Type;
 typedef struct __go_func_type FuncType;
 
-MHeap mheap;
-MStats mstats;
+MHeap runtime_mheap;
+extern MStats mstats;	// defined in extern.go
 
-extern volatile int32 MemProfileRate
+extern volatile int32 runtime_MemProfileRate
   __asm__ ("libgo_runtime.runtime.MemProfileRate");
 
 // Same algorithm from chan.c, but a different
@@ -45,7 +45,7 @@ 
 // Small objects are allocated from the per-thread cache's free lists.
 // Large objects (> 32 kB) are allocated straight from the heap.
 void*
-mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
+runtime_mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
 {
 	int32 sizeclass, rate;
 	MCache *c;
@@ -55,26 +55,26 @@ 
 	uint32 *ref;
 
 	if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
-		throw("malloc/free - deadlock");
+		runtime_throw("malloc/free - deadlock");
 	if(size == 0)
 		size = 1;
 
 	mstats.nmalloc++;
 	if(size <= MaxSmallSize) {
 		// Allocate from mcache free lists.
-		sizeclass = SizeToClass(size);
-		size = class_to_size[sizeclass];
+		sizeclass = runtime_SizeToClass(size);
+		size = runtime_class_to_size[sizeclass];
 		c = m->mcache;
-		v = MCache_Alloc(c, sizeclass, size, zeroed);
+		v = runtime_MCache_Alloc(c, sizeclass, size, zeroed);
 		if(v == nil)
-			throw("out of memory");
+			runtime_throw("out of memory");
 		mstats.alloc += size;
 		mstats.total_alloc += size;
 		mstats.by_size[sizeclass].nmalloc++;
 
-		if(!mlookup(v, nil, nil, nil, &ref)) {
-			//printf("malloc %lld; mlookup failed\n", (long long)size);
-			throw("malloc mlookup");
+		if(!runtime_mlookup(v, nil, nil, nil, &ref)) {
+			// runtime_printf("malloc %D; runtime_mlookup failed\n", (uint64)size);
+			runtime_throw("malloc runtime_mlookup");
 		}
 		*ref = RefNone | refflag;
 	} else {
@@ -84,9 +84,9 @@ 
 		npages = size >> PageShift;
 		if((size & PageMask) != 0)
 			npages++;
-		s = MHeap_Alloc(&mheap, npages, 0, 1);
+		s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1);
 		if(s == nil)
-			throw("out of memory");
+			runtime_throw("out of memory");
 		size = npages<<PageShift;
 		mstats.alloc += size;
 		mstats.total_alloc += size;
@@ -110,7 +110,7 @@ 
 		}
 	}
 
-	if(!(refflag & RefNoProfiling) && (rate = MemProfileRate) > 0) {
+	if(!(refflag & RefNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
 		if(size >= (uint32) rate)
 			goto profile;
 		if((uint32) m->mcache->next_sample > size)
@@ -122,19 +122,19 @@ 
 			m->mcache->next_sample = fastrand1() % (2*rate);
 		profile:
 			*ref |= RefProfiled;
-			MProf_Malloc(v, size);
+			runtime_MProf_Malloc(v, size);
 		}
 	}
 
 	if(dogc && mstats.heap_alloc >= mstats.next_gc)
-		gc(0);
+		runtime_gc(0);
 	return v;
 }
 
 void*
 __go_alloc(uintptr size)
 {
-	return mallocgc(size, 0, 1, 1);
+	return runtime_mallocgc(size, 0, 0, 1);
 }
 
 // Free the object whose base pointer is v.
@@ -150,11 +150,11 @@ 
 		return;
 
 	if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
-		throw("malloc/free - deadlock");
+		runtime_throw("malloc/free - deadlock");
 
-	if(!mlookup(v, nil, nil, &s, &ref)) {
-		//printf("free %p: not an allocated block\n", v);
-		throw("free mlookup");
+	if(!runtime_mlookup(v, nil, nil, &s, &ref)) {
+		// runtime_printf("free %p: not an allocated block\n", v);
+		runtime_throw("free runtime_mlookup");
 	}
 	prof = *ref & RefProfiled;
 	*ref = RefFree;
@@ -164,21 +164,21 @@ 
 	if(sizeclass == 0) {
 		// Large object.
 		if(prof)
-			MProf_Free(v, s->npages<<PageShift);
+			runtime_MProf_Free(v, s->npages<<PageShift);
 		mstats.alloc -= s->npages<<PageShift;
 		runtime_memclr(v, s->npages<<PageShift);
-		MHeap_Free(&mheap, s, 1);
+		runtime_MHeap_Free(&runtime_mheap, s, 1);
 	} else {
 		// Small object.
 		c = m->mcache;
-		size = class_to_size[sizeclass];
+		size = runtime_class_to_size[sizeclass];
 		if(size > (int32)sizeof(uintptr))
 			((uintptr*)v)[1] = 1;	// mark as "needs to be zeroed"
 		if(prof)
-			MProf_Free(v, size);
+			runtime_MProf_Free(v, size);
 		mstats.alloc -= size;
 		mstats.by_size[sizeclass].nfree++;
-		MCache_Free(c, v, sizeclass, size);
+		runtime_MCache_Free(c, v, sizeclass, size);
 	}
 	__sync_bool_compare_and_swap(&m->mallocing, 1, 0);
 
@@ -187,14 +187,14 @@ 
 }
 
 int32
-mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
+runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
 {
 	uintptr n, nobj, i;
 	byte *p;
 	MSpan *s;
 
 	mstats.nlookup++;
-	s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift);
+	s = runtime_MHeap_LookupMaybe(&runtime_mheap, (uintptr)v>>PageShift);
 	if(sp)
 		*sp = s;
 	if(s == nil) {
@@ -225,7 +225,7 @@ 
 		return 0;
 	}
 
-	n = class_to_size[s->sizeclass];
+	n = runtime_class_to_size[s->sizeclass];
 	i = ((byte*)v - p)/n;
 	if(base)
 		*base = p + i*n;
@@ -236,12 +236,12 @@ 
 	if(0) {
 		nobj = (s->npages << PageShift) / (n + RefcountOverhead);
 		if((byte*)s->gcref < p || (byte*)(s->gcref+nobj) > p+(s->npages<<PageShift)) {
-			//printf("odd span state=%d span=%p base=%p sizeclass=%d n=%llu size=%llu npages=%llu\n",
-			//	s->state, s, p, s->sizeclass, (unsigned long long)nobj, (unsigned long long)n, (unsigned long long)s->npages);
-			//printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%llu nobj=%llu size=%llu end=%p end=%p\n",
-			//	s->sizeclass, v, p, s->gcref, (unsigned long long)s->npages<<PageShift,
-			//	(unsigned long long)nobj, (unsigned long long)n, s->gcref + nobj, p+(s->npages<<PageShift));
-			throw("bad gcref");
+			// runtime_printf("odd span state=%d span=%p base=%p sizeclass=%d n=%D size=%D npages=%D\n",
+			//	s->state, s, p, s->sizeclass, (uint64)nobj, (uint64)n, (uint64)s->npages);
+			// runtime_printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%D nobj=%D size=%D end=%p end=%p\n",
+			//	s->sizeclass, v, p, s->gcref, (uint64)s->npages<<PageShift,
+			//	(uint64)nobj, (uint64)n, s->gcref + nobj, p+(s->npages<<PageShift));
+			runtime_throw("bad gcref");
 		}
 	}
 	if(ref)
@@ -251,53 +251,56 @@ 
 }
 
 MCache*
-allocmcache(void)
+runtime_allocmcache(void)
 {
 	MCache *c;
 
-	c = FixAlloc_Alloc(&mheap.cachealloc);
+	runtime_lock(&runtime_mheap);
+	c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
 
 	// Clear the free list used by FixAlloc; assume the rest is zeroed.
 	c->list[0].list = nil;
 
-	mstats.mcache_inuse = mheap.cachealloc.inuse;
-	mstats.mcache_sys = mheap.cachealloc.sys;
+	mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
+	mstats.mcache_sys = runtime_mheap.cachealloc.sys;
+	runtime_unlock(&runtime_mheap);
 	return c;
 }
 
 void
-mallocinit(void)
+runtime_mallocinit(void)
 {
-	InitSizes();
-	MHeap_Init(&mheap, SysAlloc);
-	m->mcache = allocmcache();
+	runtime_SysMemInit();
+	runtime_InitSizes();
+	runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc);
+	m->mcache = runtime_allocmcache();
 
 	// See if it works.
-	__go_free(__go_alloc(1));
+	runtime_free(runtime_malloc(1));
 }
 
 // Runtime stubs.
 
 void*
-mal(uintptr n)
+runtime_mal(uintptr n)
 {
-	return mallocgc(n, 0, 1, 1);
+	return runtime_mallocgc(n, 0, 1, 1);
 }
 
 func Alloc(n uintptr) (p *byte) {
-	p = __go_alloc(n);
+	p = runtime_malloc(n);
 }
 
 func Free(p *byte) {
-	__go_free(p);
+	runtime_free(p);
 }
 
 func Lookup(p *byte) (base *byte, size uintptr) {
-	mlookup(p, &base, &size, nil, nil);
+	runtime_mlookup(p, &base, &size, nil, nil);
 }
 
 func GC() {
-	gc(1);
+	runtime_gc(1);
 }
 
 func SetFinalizer(obj Eface, finalizer Eface) {
@@ -306,33 +309,33 @@ 
 	const FuncType *ft;
 
 	if(obj.__type_descriptor == nil) {
-		//printf("runtime.SetFinalizer: first argument is nil interface\n");
+		// runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
 	throw:
-		throw("runtime.SetFinalizer");
+		runtime_throw("runtime.SetFinalizer");
 	}
 	if(obj.__type_descriptor->__code != GO_PTR) {
-		//printf("runtime.SetFinalizer: first argument is %.*s, not pointer\n", (int)obj.__type_descriptor->__reflection->__length, obj.__type_descriptor->__reflection->__data);
+		// runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
 		goto throw;
 	}
-	if(!mlookup(obj.__object, &base, &size, nil, nil) || obj.__object != base) {
-		//printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
+	if(!runtime_mlookup(obj.__object, &base, &size, nil, nil) || obj.__object != base) {
+		// runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
 		goto throw;
 	}
 	ft = nil;
 	if(finalizer.__type_descriptor != nil) {
 		if(finalizer.__type_descriptor->__code != GO_FUNC) {
 		badfunc:
-		  //printf("runtime.SetFinalizer: second argument is %.*s, not func(%.*s)\n", (int)finalizer.__type_descriptor->__reflection->__length, finalizer.__type_descriptor->__reflection->__data, (int)obj.__type_descriptor->__reflection->__length, obj.__type_descriptor->__reflection->__data);
+			// runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
 			goto throw;
 		}
 		ft = (const FuncType*)finalizer.__type_descriptor;
 		if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor))
 			goto badfunc;
 
-		if(getfinalizer(obj.__object, 0)) {
-			//printf("runtime.SetFinalizer: finalizer already set");
+		if(runtime_getfinalizer(obj.__object, 0)) {
+			// runtime_printf("runtime.SetFinalizer: finalizer already set");
 			goto throw;
 		}
 	}
-	addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft);
+	runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft);
 }
diff -r bb880434e617 libgo/runtime/malloc.h
--- a/libgo/runtime/malloc.h	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/malloc.h	Wed Nov 10 21:37:28 2010 -0800
@@ -135,10 +135,10 @@ 
 // an out-of-memory error has been detected midway through
 // an allocation.  It is okay if SysFree is a no-op.
 
-void*	SysAlloc(uintptr nbytes);
-void	SysFree(void *v, uintptr nbytes);
-void	SysUnused(void *v, uintptr nbytes);
-
+void*	runtime_SysAlloc(uintptr nbytes);
+void	runtime_SysFree(void *v, uintptr nbytes);
+void	runtime_SysUnused(void *v, uintptr nbytes);
+void	runtime_SysMemInit(void);
 
 // FixAlloc is a simple free-list allocator for fixed size objects.
 // Malloc uses a FixAlloc wrapped around SysAlloc to manages its
@@ -161,9 +161,9 @@ 
 	uintptr sys;	// bytes obtained from system
 };
 
-void	FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
-void*	FixAlloc_Alloc(FixAlloc *f);
-void	FixAlloc_Free(FixAlloc *f, void *p);
+void	runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
+void*	runtime_FixAlloc_Alloc(FixAlloc *f);
+void	runtime_FixAlloc_Free(FixAlloc *f, void *p);
 
 
 // Statistics.
@@ -183,6 +183,7 @@ 
 	uint64	heap_sys;	// bytes obtained from system
 	uint64	heap_idle;	// bytes in idle spans
 	uint64	heap_inuse;	// bytes in non-idle spans
+	uint64	heap_objects;	// total number of allocated objects
 
 	// Statistics about allocation of low-level fixed-size structures.
 	// Protected by FixAlloc locks.
@@ -229,11 +230,11 @@ 
 //	taking a bunch of objects out of the central lists
 //	and putting them in the thread free list.
 
-int32	SizeToClass(int32);
-extern	int32	class_to_size[NumSizeClasses];
-extern	int32	class_to_allocnpages[NumSizeClasses];
-extern	int32	class_to_transfercount[NumSizeClasses];
-extern	void	InitSizes(void);
+int32	runtime_SizeToClass(int32);
+extern	int32	runtime_class_to_size[NumSizeClasses];
+extern	int32	runtime_class_to_allocnpages[NumSizeClasses];
+extern	int32	runtime_class_to_transfercount[NumSizeClasses];
+extern	void	runtime_InitSizes(void);
 
 
 // Per-thread (in Go, per-M) cache for small objects.
@@ -251,12 +252,13 @@ 
 	MCacheList list[NumSizeClasses];
 	uint64 size;
 	int64 local_alloc;	// bytes allocated (or freed) since last lock of heap
+	int64 local_objects;	// objects allocated (or freed) since last lock of heap
 	int32 next_sample;	// trigger heap sample after allocating this many bytes
 };
 
-void*	MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
-void	MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
-void	MCache_ReleaseAll(MCache *c);
+void*	runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
+void	runtime_MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
+void	runtime_MCache_ReleaseAll(MCache *c);
 
 // An MSpan is a run of pages.
 enum
@@ -283,15 +285,15 @@ 
 	};
 };
 
-void	MSpan_Init(MSpan *span, PageID start, uintptr npages);
+void	runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
 
 // Every MSpan is in one doubly-linked list,
 // either one of the MHeap's free lists or one of the
 // MCentral's span lists.  We use empty MSpan structures as list heads.
-void	MSpanList_Init(MSpan *list);
-bool	MSpanList_IsEmpty(MSpan *list);
-void	MSpanList_Insert(MSpan *list, MSpan *span);
-void	MSpanList_Remove(MSpan *span);	// from whatever list it is in
+void	runtime_MSpanList_Init(MSpan *list);
+bool	runtime_MSpanList_IsEmpty(MSpan *list);
+void	runtime_MSpanList_Insert(MSpan *list, MSpan *span);
+void	runtime_MSpanList_Remove(MSpan *span);	// from whatever list it is in
 
 
 // Central list of free objects of a given size.
@@ -304,9 +306,9 @@ 
 	int32 nfree;
 };
 
-void	MCentral_Init(MCentral *c, int32 sizeclass);
-int32	MCentral_AllocList(MCentral *c, int32 n, MLink **first);
-void	MCentral_FreeList(MCentral *c, int32 n, MLink *first);
+void	runtime_MCentral_Init(MCentral *c, int32 sizeclass);
+int32	runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **first);
+void	runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *first);
 
 // Main malloc heap.
 // The heap itself is the "free[]" and "large" arrays,
@@ -341,22 +343,22 @@ 
 	FixAlloc spanalloc;	// allocator for Span*
 	FixAlloc cachealloc;	// allocator for MCache*
 };
-extern MHeap mheap;
+extern MHeap runtime_mheap;
 
-void	MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
-MSpan*	MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
-void	MHeap_Free(MHeap *h, MSpan *s, int32 acct);
-MSpan*	MHeap_Lookup(MHeap *h, PageID p);
-MSpan*	MHeap_LookupMaybe(MHeap *h, PageID p);
-void	MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
+void	runtime_MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
+MSpan*	runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
+void	runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct);
+MSpan*	runtime_MHeap_Lookup(MHeap *h, PageID p);
+MSpan*	runtime_MHeap_LookupMaybe(MHeap *h, PageID p);
+void	runtime_MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
 
-void*	mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
-int32	mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
-void	gc(int32 force);
+void*	runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
+int32	runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
+void	runtime_gc(int32 force);
 
-void*	SysAlloc(uintptr);
-void	SysUnused(void*, uintptr);
-void	SysFree(void*, uintptr);
+void*	runtime_SysAlloc(uintptr);
+void	runtime_SysUnused(void*, uintptr);
+void	runtime_SysFree(void*, uintptr);
 
 enum
 {
@@ -373,9 +375,9 @@ 
 	RefFlags = 0xFFFF0000U,
 };
 
-void	MProf_Malloc(void*, uintptr);
-void	MProf_Free(void*, uintptr);
-void	MProf_Mark(void (*scan)(int32, byte *, int64));
+void	runtime_MProf_Malloc(void*, uintptr);
+void	runtime_MProf_Free(void*, uintptr);
+void	runtime_MProf_Mark(void (*scan)(byte *, int64));
 
 // Malloc profiling settings.
 // Must match definition in extern.go.
@@ -384,7 +386,7 @@ 
 	MProf_Sample = 1,
 	MProf_All = 2,
 };
-extern int32 malloc_profile;
+extern int32 runtime_malloc_profile;
 
 typedef struct Finalizer Finalizer;
 struct Finalizer
@@ -395,4 +397,4 @@ 
 	const struct __go_func_type *ft;
 };
 
-Finalizer*	getfinalizer(void*, bool);
+Finalizer*	runtime_getfinalizer(void*, bool);
diff -r bb880434e617 libgo/runtime/mcache.c
--- a/libgo/runtime/mcache.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mcache.c	Wed Nov 10 21:37:28 2010 -0800
@@ -10,7 +10,7 @@ 
 #include "malloc.h"
 
 void*
-MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed)
+runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed)
 {
 	MCacheList *l;
 	MLink *first, *v;
@@ -20,8 +20,8 @@ 
 	l = &c->list[sizeclass];
 	if(l->list == nil) {
 		// Replenish using central lists.
-		n = MCentral_AllocList(&mheap.central[sizeclass],
-			class_to_transfercount[sizeclass], &first);
+		n = runtime_MCentral_AllocList(&runtime_mheap.central[sizeclass],
+			runtime_class_to_transfercount[sizeclass], &first);
 		l->list = first;
 		l->nlist = n;
 		c->size += n*size;
@@ -47,6 +47,7 @@ 
 		}
 	}
 	c->local_alloc += size;
+	c->local_objects++;
 	return v;
 }
 
@@ -67,14 +68,14 @@ 
 	l->nlist -= n;
 	if(l->nlist < l->nlistmin)
 		l->nlistmin = l->nlist;
-	c->size -= n*class_to_size[sizeclass];
+	c->size -= n*runtime_class_to_size[sizeclass];
 
 	// Return them to central free list.
-	MCentral_FreeList(&mheap.central[sizeclass], n, first);
+	runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], n, first);
 }
 
 void
-MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
+runtime_MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
 {
 	int32 i, n;
 	MCacheList *l;
@@ -88,10 +89,11 @@ 
 	l->nlist++;
 	c->size += size;
 	c->local_alloc -= size;
+	c->local_objects--;
 
 	if(l->nlist >= MaxMCacheListLen) {
 		// Release a chunk back.
-		ReleaseN(c, l, class_to_transfercount[sizeclass], sizeclass);
+		ReleaseN(c, l, runtime_class_to_transfercount[sizeclass], sizeclass);
 	}
 
 	if(c->size >= MaxMCacheSize) {
@@ -116,16 +118,11 @@ 
 }
 
 void
-MCache_ReleaseAll(MCache *c)
+runtime_MCache_ReleaseAll(MCache *c)
 {
 	int32 i;
 	MCacheList *l;
 
-	lock(&mheap);
-	mstats.heap_alloc += c->local_alloc;
-	c->local_alloc = 0;
-	unlock(&mheap);
-
 	for(i=0; i<NumSizeClasses; i++) {
 		l = &c->list[i];
 		ReleaseN(c, l, l->nlist, i);
diff -r bb880434e617 libgo/runtime/mcentral.c
--- a/libgo/runtime/mcentral.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mcentral.c	Wed Nov 10 21:37:28 2010 -0800
@@ -23,12 +23,12 @@ 
 
 // Initialize a single central free list.
 void
-MCentral_Init(MCentral *c, int32 sizeclass)
+runtime_MCentral_Init(MCentral *c, int32 sizeclass)
 {
-	initlock(c);
+	runtime_initlock(c);
 	c->sizeclass = sizeclass;
-	MSpanList_Init(&c->nonempty);
-	MSpanList_Init(&c->empty);
+	runtime_MSpanList_Init(&c->nonempty);
+	runtime_MSpanList_Init(&c->empty);
 }
 
 // Allocate up to n objects from the central free list.
@@ -36,16 +36,16 @@ 
 // The objects are linked together by their first words.
 // On return, *pstart points at the first object and *pend at the last.
 int32
-MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
+runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
 {
 	MLink *first, *last, *v;
 	int32 i;
 
-	lock(c);
+	runtime_lock(c);
 	// Replenish central list if empty.
-	if(MSpanList_IsEmpty(&c->nonempty)) {
+	if(runtime_MSpanList_IsEmpty(&c->nonempty)) {
 		if(!MCentral_Grow(c)) {
-			unlock(c);
+			runtime_unlock(c);
 			*pfirst = nil;
 			return 0;
 		}
@@ -62,7 +62,7 @@ 
 	last->next = nil;
 	c->nfree -= i;
 
-	unlock(c);
+	runtime_unlock(c);
 	*pfirst = first;
 	return i;
 }
@@ -74,15 +74,15 @@ 
 	MSpan *s;
 	MLink *v;
 
-	if(MSpanList_IsEmpty(&c->nonempty))
+	if(runtime_MSpanList_IsEmpty(&c->nonempty))
 		return nil;
 	s = c->nonempty.next;
 	s->ref++;
 	v = s->freelist;
 	s->freelist = v->next;
 	if(s->freelist == nil) {
-		MSpanList_Remove(s);
-		MSpanList_Insert(&c->empty, s);
+		runtime_MSpanList_Remove(s);
+		runtime_MSpanList_Insert(&c->empty, s);
 	}
 	return v;
 }
@@ -92,7 +92,7 @@ 
 // The objects are linked together by their first words.
 // On return, *pstart points at the first object and *pend at the last.
 void
-MCentral_FreeList(MCentral *c, int32 n, MLink *start)
+runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *start)
 {
 	MLink *v, *next;
 
@@ -101,12 +101,12 @@ 
 	// the transfer cache optimization in the TODO above.
 	USED(n);
 
-	lock(c);
+	runtime_lock(c);
 	for(v=start; v; v=next) {
 		next = v->next;
 		MCentral_Free(c, v);
 	}
-	unlock(c);
+	runtime_unlock(c);
 }
 
 // Helper: free one object back into the central free list.
@@ -120,14 +120,14 @@ 
 
 	// Find span for v.
 	page = (uintptr)v >> PageShift;
-	s = MHeap_Lookup(&mheap, page);
+	s = runtime_MHeap_Lookup(&runtime_mheap, page);
 	if(s == nil || s->ref == 0)
-		throw("invalid free");
+		runtime_throw("invalid free");
 
 	// Move to nonempty if necessary.
 	if(s->freelist == nil) {
-		MSpanList_Remove(s);
-		MSpanList_Insert(&c->nonempty, s);
+		runtime_MSpanList_Remove(s);
+		runtime_MSpanList_Insert(&c->nonempty, s);
 	}
 
 	// Add v back to s's free list.
@@ -138,8 +138,8 @@ 
 
 	// If s is completely freed, return it to the heap.
 	if(--s->ref == 0) {
-		size = class_to_size[c->sizeclass];
-		MSpanList_Remove(s);
+		size = runtime_class_to_size[c->sizeclass];
+		runtime_MSpanList_Remove(s);
 		// The second word of each freed block indicates
 		// whether it needs to be zeroed.  The first word
 		// is the link pointer and must always be cleared.
@@ -152,20 +152,20 @@ 
 		}
 		s->freelist = nil;
 		c->nfree -= (s->npages << PageShift) / size;
-		unlock(c);
-		MHeap_Free(&mheap, s, 0);
-		lock(c);
+		runtime_unlock(c);
+		runtime_MHeap_Free(&runtime_mheap, s, 0);
+		runtime_lock(c);
 	}
 }
 
 void
-MGetSizeClassInfo(int32 sizeclass, int32 *sizep, int32 *npagesp, int32 *nobj)
+runtime_MGetSizeClassInfo(int32 sizeclass, int32 *sizep, int32 *npagesp, int32 *nobj)
 {
 	int32 size;
 	int32 npages;
 
-	npages = class_to_allocnpages[sizeclass];
-	size = class_to_size[sizeclass];
+	npages = runtime_class_to_allocnpages[sizeclass];
+	size = runtime_class_to_size[sizeclass];
 	*npagesp = npages;
 	*sizep = size;
 	*nobj = (npages << PageShift) / (size + RefcountOverhead);
@@ -181,12 +181,12 @@ 
 	byte *p;
 	MSpan *s;
 
-	unlock(c);
-	MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
-	s = MHeap_Alloc(&mheap, npages, c->sizeclass, 0);
+	runtime_unlock(c);
+	runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
+	s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0);
 	if(s == nil) {
 		// TODO(rsc): Log out of memory
-		lock(c);
+		runtime_lock(c);
 		return false;
 	}
 
@@ -202,8 +202,8 @@ 
 	}
 	*tailp = nil;
 
-	lock(c);
+	runtime_lock(c);
 	c->nfree += n;
-	MSpanList_Insert(&c->nonempty, s);
+	runtime_MSpanList_Insert(&c->nonempty, s);
 	return true;
 }
diff -r bb880434e617 libgo/runtime/mem.c
--- a/libgo/runtime/mem.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mem.c	Wed Nov 10 21:37:28 2010 -0800
@@ -4,7 +4,7 @@ 
 #include "malloc.h"
 
 void*
-SysAlloc(uintptr n)
+runtime_SysAlloc(uintptr n)
 {
 	void *p;
 
@@ -23,7 +23,7 @@ 
 }
 
 void
-SysUnused(void *v, uintptr n)
+runtime_SysUnused(void *v, uintptr n)
 {
 	USED(v);
 	USED(n);
@@ -31,10 +31,20 @@ 
 }
 
 void
-SysFree(void *v, uintptr n)
+runtime_SysFree(void *v, uintptr n)
 {
-	USED(v);
-	USED(n);
-	// TODO(rsc): call munmap
+	mstats.sys -= n;
+	runtime_munmap(v, n);
 }
 
+void
+runtime_SysMemInit(void)
+{
+	// Code generators assume that references to addresses
+	// on the first page will fault.  Map the page explicitly with
+	// no permissions, to head off possible bugs like the system
+	// allocating that page as the virtual address space fills.
+	// Ignore any error, since other systems might be smart
+	// enough to never allow anything there.
+	runtime_mmap(nil, 4096, PROT_NONE, MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0);
+}
diff -r bb880434e617 libgo/runtime/mem_posix_memalign.c
--- a/libgo/runtime/mem_posix_memalign.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mem_posix_memalign.c	Wed Nov 10 21:37:28 2010 -0800
@@ -4,7 +4,7 @@ 
 #include "malloc.h"
 
 void*
-SysAlloc(uintptr n)
+runtime_SysAlloc(uintptr n)
 {
 	void *p;
 
@@ -18,7 +18,7 @@ 
 }
 
 void
-SysUnused(void *v, uintptr n)
+runtime_SysUnused(void *v, uintptr n)
 {
 	USED(v);
 	USED(n);
@@ -26,10 +26,13 @@ 
 }
 
 void
-SysFree(void *v, uintptr n)
+runtime_SysFree(void *v, uintptr n)
 {
-	USED(v);
-	USED(n);
-	// TODO(rsc): call munmap
+	mstats.sys -= n;
+	free(v);
 }
 
+void
+runtime_SysMemInit(void)
+{
+}
diff -r bb880434e617 libgo/runtime/mfinal.c
--- a/libgo/runtime/mfinal.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mfinal.c	Wed Nov 10 21:37:28 2010 -0800
@@ -5,7 +5,7 @@ 
 #include "runtime.h"
 #include "malloc.h"
 
-Lock finlock = LOCK_INITIALIZER;
+static Lock finlock = LOCK_INITIALIZER;
 
 // Finalizer hash table.  Direct hash, linear scan, at most 3/4 full.
 // Table size is power of 3 so that hash can be key % max.
@@ -44,7 +44,7 @@ 
 	}
 
 	// cannot happen - table is known to be non-full
-	throw("finalizer table inconsistent");
+	runtime_throw("finalizer table inconsistent");
 
 ret:
 	t->key[i] = k;
@@ -77,7 +77,7 @@ 
 	}
 
 	// cannot happen - table is known to be non-full
-	throw("finalizer table inconsistent");
+	runtime_throw("finalizer table inconsistent");
 	return nil;
 }
 
@@ -85,7 +85,7 @@ 
 
 // add finalizer; caller is responsible for making sure not already in table
 void
-addfinalizer(void *p, void (*f)(void*), const struct __go_func_type *ft)
+runtime_addfinalizer(void *p, void (*f)(void*), const struct __go_func_type *ft)
 {
 	Fintab newtab;
 	int32 i;
@@ -95,28 +95,28 @@ 
 	
 	e = nil;
 	if(f != nil) {
-		e = mal(sizeof *e);
+		e = runtime_mal(sizeof *e);
 		e->fn = f;
 		e->ft = ft;
 	}
 
-	lock(&finlock);
-	if(!mlookup(p, &base, nil, nil, &ref) || p != base) {
-		unlock(&finlock);
-		throw("addfinalizer on invalid pointer");
+	runtime_lock(&finlock);
+	if(!runtime_mlookup(p, &base, nil, nil, &ref) || p != base) {
+		runtime_unlock(&finlock);
+		runtime_throw("addfinalizer on invalid pointer");
 	}
 	if(f == nil) {
 		if(*ref & RefHasFinalizer) {
 			lookfintab(&fintab, p, 1);
 			*ref &= ~RefHasFinalizer;
 		}
-		unlock(&finlock);
+		runtime_unlock(&finlock);
 		return;
 	}
 
 	if(*ref & RefHasFinalizer) {
-		unlock(&finlock);
-		throw("double finalizer");
+		runtime_unlock(&finlock);
+		runtime_throw("double finalizer");
 	}
 	*ref |= RefHasFinalizer;
 
@@ -134,8 +134,8 @@ 
 			newtab.max *= 3;
 		}
 
-		newtab.key = mallocgc(newtab.max*sizeof newtab.key[0], RefNoPointers, 0, 1);
-		newtab.val = mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);
+		newtab.key = runtime_mallocgc(newtab.max*sizeof newtab.key[0], RefNoPointers, 0, 1);
+		newtab.val = runtime_mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);
 
 		for(i=0; i<fintab.max; i++) {
 			void *k;
@@ -144,40 +144,40 @@ 
 			if(k != nil && k != (void*)-1)
 				addfintab(&newtab, k, fintab.val[i]);
 		}
-		__go_free(fintab.key);
-		__go_free(fintab.val);
+		runtime_free(fintab.key);
+		runtime_free(fintab.val);
 		fintab = newtab;
 	}
 
 	addfintab(&fintab, p, e);
-	unlock(&finlock);
+	runtime_unlock(&finlock);
 }
 
 // get finalizer; if del, delete finalizer.
 // caller is responsible for updating RefHasFinalizer bit.
 Finalizer*
-getfinalizer(void *p, bool del)
+runtime_getfinalizer(void *p, bool del)
 {
 	Finalizer *f;
 	
-	lock(&finlock);
+	runtime_lock(&finlock);
 	f = lookfintab(&fintab, p, del);
-	unlock(&finlock);
+	runtime_unlock(&finlock);
 	return f;
 }
 
 void
-walkfintab(void (*fn)(void*), void (*scan)(int32, byte *, int64))
+runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64))
 {
 	void **key;
 	void **ekey;
 
-	scan(0, (byte*)&fintab, sizeof fintab);
-	lock(&finlock);
+	scan((byte*)&fintab, sizeof fintab);
+	runtime_lock(&finlock);
 	key = fintab.key;
 	ekey = key + fintab.max;
 	for(; key < ekey; key++)
 		if(*key != nil && *key != ((void*)-1))
 			fn(*key);
-	unlock(&finlock);
+	runtime_unlock(&finlock);
 }
diff -r bb880434e617 libgo/runtime/mfixalloc.c
--- a/libgo/runtime/mfixalloc.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mfixalloc.c	Wed Nov 10 21:37:28 2010 -0800
@@ -12,7 +12,7 @@ 
 // Initialize f to allocate objects of the given size,
 // using the allocator to obtain chunks of memory.
 void
-FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg)
+runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg)
 {
 	f->size = size;
 	f->alloc = alloc;
@@ -26,7 +26,7 @@ 
 }
 
 void*
-FixAlloc_Alloc(FixAlloc *f)
+runtime_FixAlloc_Alloc(FixAlloc *f)
 {
 	void *v;
 
@@ -40,7 +40,7 @@ 
 		f->sys += FixAllocChunk;
 		f->chunk = f->alloc(FixAllocChunk);
 		if(f->chunk == nil)
-			throw("out of memory (FixAlloc)");
+			runtime_throw("out of memory (FixAlloc)");
 		f->nchunk = FixAllocChunk;
 	}
 	v = f->chunk;
@@ -53,7 +53,7 @@ 
 }
 
 void
-FixAlloc_Free(FixAlloc *f, void *p)
+runtime_FixAlloc_Free(FixAlloc *f, void *p)
 {
 	f->inuse -= f->size;
 	*(void**)p = f->list;
diff -r bb880434e617 libgo/runtime/mgc0.c
--- a/libgo/runtime/mgc0.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mgc0.c	Wed Nov 10 21:37:28 2010 -0800
@@ -19,11 +19,19 @@ 
 	Debug = 0
 };
 
+typedef struct BlockList BlockList;
+struct BlockList
+{
+	byte *obj;
+	uintptr size;
+};
+
 static bool finstarted;
 static Lock finqlock = LOCK_INITIALIZER;
 static pthread_cond_t finqcond = PTHREAD_COND_INITIALIZER;
 static Finalizer *finq;
 static int32 fingwait;
+static BlockList *bl, *ebl;
 
 static void runfinq(void*);
 
@@ -32,7 +40,7 @@ 
 };
 
 static void
-scanblock(int32 depth, byte *b, int64 n)
+scanblock(byte *b, int64 n)
 {
 	int32 off;
 	void *obj;
@@ -40,48 +48,65 @@ 
 	uint32 *refp, ref;
 	void **vp;
 	int64 i;
+	BlockList *w;
 
-	if(Debug > 1)
-		printf("%d scanblock %p %lld\n", depth, b, (long long) n);
-	off = (uint32)(uintptr)b & (PtrSize-1);
-	if(off) {
-		b += PtrSize - off;
-		n -= PtrSize - off;
-	}
+	w = bl;
+	w->obj = b;
+	w->size = n;
+	w++;
 
-	vp = (void**)b;
-	n /= PtrSize;
-	for(i=0; i<n; i++) {
-		obj = vp[i];
-		if(obj == nil)
-			continue;
-		if(mheap.closure_min != nil && mheap.closure_min <= (byte*)obj && (byte*)obj < mheap.closure_max) {
-			if((((uintptr)obj) & 63) != 0)
+	while(w > bl) {
+		w--;
+		b = w->obj;
+		n = w->size;
+
+		if(Debug > 1)
+			runtime_printf("scanblock %p %lld\n", b, (long long) n);
+		off = (uint32)(uintptr)b & (PtrSize-1);
+		if(off) {
+			b += PtrSize - off;
+			n -= PtrSize - off;
+		}
+	
+		vp = (void**)b;
+		n /= PtrSize;
+		for(i=0; i<n; i++) {
+			obj = vp[i];
+			if(obj == nil)
 				continue;
-
-			// Looks like a Native Client closure.
-			// Actual pointer is pointed at by address in first instruction.
-			// Embedded pointer starts at byte 2.
-			// If it is f4f4f4f4 then that space hasn't been
-			// used for a closure yet (f4 is the HLT instruction).
-			// See nacl/386/closure.c for more.
-			void **pp;
-			pp = *(void***)((byte*)obj+2);
-			if(pp == (void**)0xf4f4f4f4)	// HLT... - not a closure after all
-				continue;
-			obj = *pp;
-		}
-		if(mheap.min <= (byte*)obj && (byte*)obj < mheap.max) {
-			if(mlookup(obj, (byte**)&obj, &size, nil, &refp)) {
-				ref = *refp;
-				switch(ref & ~RefFlags) {
-				case RefNone:
-					if(Debug > 1)
-						printf("%d found at %p: ", depth, &vp[i]);
-					*refp = RefSome | (ref & RefFlags);
-					if(!(ref & RefNoPointers))
-						scanblock(depth+1, obj, size);
-					break;
+			if(runtime_mheap.closure_min != nil && runtime_mheap.closure_min <= (byte*)obj && (byte*)obj < runtime_mheap.closure_max) {
+				if((((uintptr)obj) & 63) != 0)
+					continue;
+	
+				// Looks like a Native Client closure.
+				// Actual pointer is pointed at by address in first instruction.
+				// Embedded pointer starts at byte 2.
+				// If it is f4f4f4f4 then that space hasn't been
+				// used for a closure yet (f4 is the HLT instruction).
+				// See nacl/386/closure.c for more.
+				void **pp;
+				pp = *(void***)((byte*)obj+2);
+				if(pp == (void**)0xf4f4f4f4)	// HLT... - not a closure after all
+					continue;
+				obj = *pp;
+			}
+			if(runtime_mheap.min <= (byte*)obj && (byte*)obj < runtime_mheap.max) {
+				if(runtime_mlookup(obj, (byte**)&obj, &size, nil, &refp)) {
+					ref = *refp;
+					switch(ref & ~RefFlags) {
+					case RefNone:
+						if(Debug > 1)
+							runtime_printf("found at %p: ", &vp[i]);
+						*refp = RefSome | (ref & RefFlags);
+						if(!(ref & RefNoPointers)) {
+							if(w >= ebl)
+								runtime_throw("scanblock: garbage collection stack overflow");
+							w->obj = obj;
+							w->size = size;
+							w++;
+						}
+						break;
+					}
 				}
 			}
 		}
@@ -96,11 +121,11 @@ 
 
 	size = 0;
 	refp = nil;
-	if(!mlookup(v, (byte**)&v, &size, nil, &refp) || !(*refp & RefHasFinalizer))
-		throw("mark - finalizer inconsistency");
+	if(!runtime_mlookup(v, (byte**)&v, &size, nil, &refp) || !(*refp & RefHasFinalizer))
+		runtime_throw("mark - finalizer inconsistency");
 	
 	// do not mark the finalizer block itself.  just mark the things it points at.
-	scanblock(1, v, size);
+	scanblock(v, size);
 }
 
 struct root_list {
@@ -125,28 +150,49 @@ 
 static void
 mark(void)
 {
+	uintptr blsize, nobj;
 	struct root_list *pl;
 
+	// Figure out how big an object stack we need.
+	// Get a new one if we need more than we have
+	// or we need significantly less than we have.
+	nobj = mstats.heap_objects;
+	if(nobj > (uintptr)(ebl - bl) || nobj < (uintptr)(ebl-bl)/4) {
+		if(bl != nil)
+			runtime_SysFree(bl, (byte*)ebl - (byte*)bl);
+		
+		// While we're allocated a new object stack,
+		// add 20% headroom and also round up to
+		// the nearest page boundary, since mmap
+		// will anyway.
+		nobj = nobj * 12/10;
+		blsize = nobj * sizeof *bl;
+		blsize = (blsize + 4095) & ~4095;
+		nobj = blsize / sizeof *bl;
+		bl = runtime_SysAlloc(blsize);
+		ebl = bl + nobj;
+	}
+
 	for(pl = roots; pl != nil; pl = pl->next) {
 		struct root* pr = &pl->roots[0];
 		while(1) {
 			void *decl = pr->decl;
 			if(decl == nil)
 				break;
-			scanblock(0, decl, pr->size);
+			scanblock(decl, pr->size);
 			pr++;
 		}
 	}
 
-	scanblock(0, (byte*)&m0, sizeof m0);
-	scanblock(0, (byte*)&finq, sizeof finq);
-	MProf_Mark(scanblock);
+	scanblock((byte*)&m0, sizeof m0);
+	scanblock((byte*)&finq, sizeof finq);
+	runtime_MProf_Mark(scanblock);
 
 	// mark stacks
 	__go_scanstacks(scanblock);
 
 	// mark things pointed at by objects with finalizers
-	walkfintab(markfin, scanblock);
+	runtime_walkfintab(markfin, scanblock);
 }
 
 // free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome
@@ -169,14 +215,14 @@ 
 			mstats.alloc -= s->npages<<PageShift;
 			runtime_memclr(p, s->npages<<PageShift);
 			if(ref & RefProfiled)
-				MProf_Free(p, s->npages<<PageShift);
+				runtime_MProf_Free(p, s->npages<<PageShift);
 			s->gcref0 = RefFree;
-			MHeap_Free(&mheap, s, 1);
+			runtime_MHeap_Free(&runtime_mheap, s, 1);
 			break;
 		case RefNone|RefHasFinalizer:
-			f = getfinalizer(p, 1);
+			f = runtime_getfinalizer(p, 1);
 			if(f == nil)
-				throw("finalizer inconsistency");
+				runtime_throw("finalizer inconsistency");
 			f->arg = p;
 			f->next = finq;
 			finq = f;
@@ -191,7 +237,7 @@ 
 	}
 
 	// Chunk full of small blocks.
-	MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
+	runtime_MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
 	gcrefp = s->gcref;
 	gcrefep = s->gcref + n;
 	for(; gcrefp < gcrefep; gcrefp++, p += size) {
@@ -202,19 +248,19 @@ 
 		case RefNone:
 			// Free small object.
 			if(ref & RefProfiled)
-				MProf_Free(p, size);
+				runtime_MProf_Free(p, size);
 			*gcrefp = RefFree;
 			c = m->mcache;
 			if(size > (int32)sizeof(uintptr))
 				((uintptr*)p)[1] = 1;	// mark as "needs to be zeroed"
 			mstats.alloc -= size;
 			mstats.by_size[s->sizeclass].nfree++;
-			MCache_Free(c, p, s->sizeclass, size);
+			runtime_MCache_Free(c, p, s->sizeclass, size);
 			break;
 		case RefNone|RefHasFinalizer:
-			f = getfinalizer(p, 1);
+			f = runtime_getfinalizer(p, 1);
 			if(f == nil)
-				throw("finalizer inconsistency");
+				runtime_throw("finalizer inconsistency");
 			f->arg = p;
 			f->next = finq;
 			finq = f;
@@ -233,7 +279,7 @@ 
 {
 	MSpan *s;
 
-	for(s = mheap.allspans; s != nil; s = s->allnext)
+	for(s = runtime_mheap.allspans; s != nil; s = s->allnext)
 		if(s->state == MSpanInUse)
 			sweepspan(s);
 }
@@ -252,7 +298,7 @@ 
 static int32 gcpercent = -2;
 
 void
-gc(int32 force __attribute__ ((unused)))
+runtime_gc(int32 force __attribute__ ((unused)))
 {
 	int64 t0, t1;
 	char *p;
@@ -266,40 +312,41 @@ 
 	// problems, don't bother trying to run gc
 	// while holding a lock.  The next mallocgc
 	// without a lock will do the gc instead.
-	if(!mstats.enablegc || m->locks > 0 /* || panicking */)
+	if(!mstats.enablegc || m->locks > 0 /* || runtime_panicking */)
 		return;
 
 	if(gcpercent == -2) {	// first time through
-		p = getenv("GOGC");
+		p = runtime_getenv("GOGC");
 		if(p == nil || p[0] == '\0')
 			gcpercent = 100;
-		else if(strcmp(p, "off") == 0)
+		else if(runtime_strcmp(p, "off") == 0)
 			gcpercent = -1;
 		else
-			gcpercent = atoi(p);
+			gcpercent = runtime_atoi(p);
 	}
 	if(gcpercent < 0)
 		return;
 
-	lock(&finqlock);
-	lock(&gcsema);
+	runtime_lock(&finqlock);
+	runtime_lock(&gcsema);
 	m->locks++;	// disable gc during the mallocs in newproc
-	t0 = nanotime();
-	stoptheworld();
+	t0 = runtime_nanotime();
+	runtime_stoptheworld();
 	if(force || mstats.heap_alloc >= mstats.next_gc) {
+		__go_cachestats();
 		mark();
 		sweep();
 		__go_stealcache();
 		mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100;
 	}
 
-	t1 = nanotime();
+	t1 = runtime_nanotime();
 	mstats.numgc++;
 	mstats.pause_ns += t1 - t0;
 	if(mstats.debuggc)
-		printf("pause %llu\n", (unsigned long long)t1-t0);
-	unlock(&gcsema);
-	starttheworld();
+		runtime_printf("pause %llu\n", (unsigned long long)t1-t0);
+	runtime_unlock(&gcsema);
+	runtime_starttheworld();
 
 	// finqlock is still held.
 	fp = finq;
@@ -315,7 +362,7 @@ 
 		}
 	}
 	m->locks--;
-	unlock(&finqlock);
+	runtime_unlock(&finqlock);
 }
 
 static void
@@ -326,16 +373,16 @@ 
 	USED(dummy);
 
 	for(;;) {
-		lock(&finqlock);
+		runtime_lock(&finqlock);
 		f = finq;
 		finq = nil;
 		if(f == nil) {
 			fingwait = 1;
 			pthread_cond_wait(&finqcond, &finqlock.mutex);
-			unlock(&finqlock);
+			runtime_unlock(&finqlock);
 			continue;
 		}
-		unlock(&finqlock);
+		runtime_unlock(&finqlock);
 		for(; f; f=next) {
 			void *params[1];
 
@@ -345,9 +392,9 @@ 
 			f->fn = nil;
 			f->arg = nil;
 			f->next = nil;
-			__go_free(f);
+			runtime_free(f);
 		}
-		gc(1);	// trigger another gc to clean up the finalized objects, if possible
+		runtime_gc(1);	// trigger another gc to clean up the finalized objects, if possible
 	}
 }
 
diff -r bb880434e617 libgo/runtime/mheap.c
--- a/libgo/runtime/mheap.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mheap.c	Wed Nov 10 21:37:28 2010 -0800
@@ -35,39 +35,43 @@ 
 
 // Initialize the heap; fetch memory using alloc.
 void
-MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
+runtime_MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
 {
 	uint32 i;
 
-	initlock(h);
-	FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
-	FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
-	MHeapMap_Init(&h->map, alloc);
+	runtime_initlock(h);
+	runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
+	runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
+	runtime_MHeapMap_Init(&h->map, alloc);
 	// h->mapcache needs no init
 	for(i=0; i<nelem(h->free); i++)
-		MSpanList_Init(&h->free[i]);
-	MSpanList_Init(&h->large);
+		runtime_MSpanList_Init(&h->free[i]);
+	runtime_MSpanList_Init(&h->large);
 	for(i=0; i<nelem(h->central); i++)
-		MCentral_Init(&h->central[i], i);
+		runtime_MCentral_Init(&h->central[i], i);
 }
 
 // Allocate a new span of npage pages from the heap
 // and record its size class in the HeapMap and HeapMapCache.
 MSpan*
-MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
+runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
 {
 	MSpan *s;
 
-	lock(h);
+	runtime_lock(h);
 	mstats.heap_alloc += m->mcache->local_alloc;
 	m->mcache->local_alloc = 0;
+	mstats.heap_objects += m->mcache->local_objects;
+	m->mcache->local_objects = 0;
 	s = MHeap_AllocLocked(h, npage, sizeclass);
 	if(s != nil) {
 		mstats.heap_inuse += npage<<PageShift;
-		if(acct)
+		if(acct) {
+			mstats.heap_objects++;
 			mstats.heap_alloc += npage<<PageShift;
+		}
 	}
-	unlock(h);
+	runtime_unlock(h);
 	return s;
 }
 
@@ -79,7 +83,7 @@ 
 
 	// Try in fixed-size lists up to max.
 	for(n=npage; n < nelem(h->free); n++) {
-		if(!MSpanList_IsEmpty(&h->free[n])) {
+		if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
 			s = h->free[n].next;
 			goto HaveSpan;
 		}
@@ -96,22 +100,22 @@ 
 HaveSpan:
 	// Mark span in use.
 	if(s->state != MSpanFree)
-		throw("MHeap_AllocLocked - MSpan not free");
+		runtime_throw("MHeap_AllocLocked - MSpan not free");
 	if(s->npages < npage)
-		throw("MHeap_AllocLocked - bad npages");
-	MSpanList_Remove(s);
+		runtime_throw("MHeap_AllocLocked - bad npages");
+	runtime_MSpanList_Remove(s);
 	s->state = MSpanInUse;
 
 	if(s->npages > npage) {
 		// Trim extra and put it back in the heap.
-		t = FixAlloc_Alloc(&h->spanalloc);
+		t = runtime_FixAlloc_Alloc(&h->spanalloc);
 		mstats.mspan_inuse = h->spanalloc.inuse;
 		mstats.mspan_sys = h->spanalloc.sys;
-		MSpan_Init(t, s->start + npage, s->npages - npage);
+		runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
 		s->npages = npage;
-		MHeapMap_Set(&h->map, t->start - 1, s);
-		MHeapMap_Set(&h->map, t->start, t);
-		MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
+		runtime_MHeapMap_Set(&h->map, t->start - 1, s);
+		runtime_MHeapMap_Set(&h->map, t->start, t);
+		runtime_MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
 		t->state = MSpanInUse;
 		MHeap_FreeLocked(h, t);
 	}
@@ -120,7 +124,7 @@ 
 	// able to map interior pointer to containing span.
 	s->sizeclass = sizeclass;
 	for(n=0; n<npage; n++)
-		MHeapMap_Set(&h->map, s->start+n, s);
+		runtime_MHeapMap_Set(&h->map, s->start+n, s);
 	return s;
 }
 
@@ -168,11 +172,11 @@ 
 	if(ask < HeapAllocChunk)
 		ask = HeapAllocChunk;
 
-	v = SysAlloc(ask);
+	v = runtime_SysAlloc(ask);
 	if(v == nil) {
 		if(ask > (npage<<PageShift)) {
 			ask = npage<<PageShift;
-			v = SysAlloc(ask);
+			v = runtime_SysAlloc(ask);
 		}
 		if(v == nil)
 			return false;
@@ -187,19 +191,19 @@ 
 	// NOTE(rsc): In tcmalloc, if we've accumulated enough
 	// system allocations, the heap map gets entirely allocated
 	// in 32-bit mode.  (In 64-bit mode that's not practical.)
-	if(!MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) {
-		SysFree(v, ask);
+	if(!runtime_MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) {
+		runtime_SysFree(v, ask);
 		return false;
 	}
 
 	// Create a fake "in use" span and free it, so that the
 	// right coalescing happens.
-	s = FixAlloc_Alloc(&h->spanalloc);
+	s = runtime_FixAlloc_Alloc(&h->spanalloc);
 	mstats.mspan_inuse = h->spanalloc.inuse;
 	mstats.mspan_sys = h->spanalloc.sys;
-	MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
-	MHeapMap_Set(&h->map, s->start, s);
-	MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+	runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
+	runtime_MHeapMap_Set(&h->map, s->start, s);
+	runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
 	s->state = MSpanInUse;
 	MHeap_FreeLocked(h, s);
 	return true;
@@ -209,9 +213,9 @@ 
 // Page number is guaranteed to be in map
 // and is guaranteed to be start or end of span.
 MSpan*
-MHeap_Lookup(MHeap *h, PageID p)
+runtime_MHeap_Lookup(MHeap *h, PageID p)
 {
-	return MHeapMap_Get(&h->map, p);
+	return runtime_MHeapMap_Get(&h->map, p);
 }
 
 // Look up the span at the given page number.
@@ -222,11 +226,11 @@ 
 // other garbage in their middles, so we have to
 // check for that.
 MSpan*
-MHeap_LookupMaybe(MHeap *h, PageID p)
+runtime_MHeap_LookupMaybe(MHeap *h, PageID p)
 {
 	MSpan *s;
 
-	s = MHeapMap_GetMaybe(&h->map, p);
+	s = runtime_MHeapMap_GetMaybe(&h->map, p);
 	if(s == nil || p < s->start || p - s->start >= s->npages)
 		return nil;
 	if(s->state != MSpanInUse)
@@ -236,16 +240,20 @@ 
 
 // Free the span back into the heap.
 void
-MHeap_Free(MHeap *h, MSpan *s, int32 acct)
+runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
 {
-	lock(h);
+	runtime_lock(h);
 	mstats.heap_alloc += m->mcache->local_alloc;
 	m->mcache->local_alloc = 0;
+	mstats.heap_objects += m->mcache->local_objects;
+	m->mcache->local_objects = 0;
 	mstats.heap_inuse -= s->npages<<PageShift;
-	if(acct)
+	if(acct) {
 		mstats.heap_alloc -= s->npages<<PageShift;
+		mstats.heap_objects--;
+	}
 	MHeap_FreeLocked(h, s);
-	unlock(h);
+	runtime_unlock(h);
 }
 
 static void
@@ -254,45 +262,45 @@ 
 	MSpan *t;
 
 	if(s->state != MSpanInUse || s->ref != 0) {
-		// printf("MHeap_FreeLocked - span %p ptr %zu state %u ref %u\n", s, (size_t) (s->start<<PageShift), (unsigned int) s->state, (unsigned int) s->ref);
-		throw("MHeap_FreeLocked - invalid free");
+		// runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
+		runtime_throw("MHeap_FreeLocked - invalid free");
 	}
 	s->state = MSpanFree;
-	MSpanList_Remove(s);
+	runtime_MSpanList_Remove(s);
 
 	// Coalesce with earlier, later spans.
-	if((t = MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
+	if((t = runtime_MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
 		s->start = t->start;
 		s->npages += t->npages;
-		MHeapMap_Set(&h->map, s->start, s);
-		MSpanList_Remove(t);
+		runtime_MHeapMap_Set(&h->map, s->start, s);
+		runtime_MSpanList_Remove(t);
 		t->state = MSpanDead;
-		FixAlloc_Free(&h->spanalloc, t);
+		runtime_FixAlloc_Free(&h->spanalloc, t);
 		mstats.mspan_inuse = h->spanalloc.inuse;
 		mstats.mspan_sys = h->spanalloc.sys;
 	}
-	if((t = MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
+	if((t = runtime_MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
 		s->npages += t->npages;
-		MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
-		MSpanList_Remove(t);
+		runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+		runtime_MSpanList_Remove(t);
 		t->state = MSpanDead;
-		FixAlloc_Free(&h->spanalloc, t);
+		runtime_FixAlloc_Free(&h->spanalloc, t);
 		mstats.mspan_inuse = h->spanalloc.inuse;
 		mstats.mspan_sys = h->spanalloc.sys;
 	}
 
 	// Insert s into appropriate list.
 	if(s->npages < nelem(h->free))
-		MSpanList_Insert(&h->free[s->npages], s);
+		runtime_MSpanList_Insert(&h->free[s->npages], s);
 	else
-		MSpanList_Insert(&h->large, s);
+		runtime_MSpanList_Insert(&h->large, s);
 
 	// TODO(rsc): IncrementalScavenge() to return memory to OS.
 }
 
 // Initialize a new span with the given start and npages.
 void
-MSpan_Init(MSpan *span, PageID start, uintptr npages)
+runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
 {
 	span->next = nil;
 	span->prev = nil;
@@ -306,7 +314,7 @@ 
 
 // Initialize an empty doubly-linked list.
 void
-MSpanList_Init(MSpan *list)
+runtime_MSpanList_Init(MSpan *list)
 {
 	list->state = MSpanListHead;
 	list->next = list;
@@ -314,7 +322,7 @@ 
 }
 
 void
-MSpanList_Remove(MSpan *span)
+runtime_MSpanList_Remove(MSpan *span)
 {
 	if(span->prev == nil && span->next == nil)
 		return;
@@ -325,16 +333,16 @@ 
 }
 
 bool
-MSpanList_IsEmpty(MSpan *list)
+runtime_MSpanList_IsEmpty(MSpan *list)
 {
 	return list->next == list;
 }
 
 void
-MSpanList_Insert(MSpan *list, MSpan *span)
+runtime_MSpanList_Insert(MSpan *list, MSpan *span)
 {
 	if(span->next != nil || span->prev != nil)
-		throw("MSpanList_Insert");
+		runtime_throw("MSpanList_Insert");
 	span->next = list->next;
 	span->prev = list;
 	span->next->prev = span;
diff -r bb880434e617 libgo/runtime/mheapmap32.c
--- a/libgo/runtime/mheapmap32.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mheapmap32.c	Wed Nov 10 21:37:28 2010 -0800
@@ -12,13 +12,13 @@ 
 
 // 3-level radix tree mapping page ids to Span*.
 void
-MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
+runtime_MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
 {
 	m->allocator = allocator;
 }
 
 MSpan*
-MHeapMap_Get(MHeapMap *m, PageID k)
+runtime_MHeapMap_Get(MHeapMap *m, PageID k)
 {
 	int32 i1, i2;
 
@@ -27,13 +27,13 @@ 
 	i1 = k & MHeapMap_Level1Mask;
 	k >>= MHeapMap_Level1Bits;
 	if(k != 0)
-		throw("MHeapMap_Get");
+		runtime_throw("MHeapMap_Get");
 
 	return m->p[i1]->s[i2];
 }
 
 MSpan*
-MHeapMap_GetMaybe(MHeapMap *m, PageID k)
+runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k)
 {
 	int32 i1, i2;
 	MHeapMapNode2 *p2;
@@ -43,7 +43,7 @@ 
 	i1 = k & MHeapMap_Level1Mask;
 	k >>= MHeapMap_Level1Bits;
 	if(k != 0)
-		throw("MHeapMap_Get");
+		runtime_throw("MHeapMap_Get");
 
 	p2 = m->p[i1];
 	if(p2 == nil)
@@ -52,7 +52,7 @@ 
 }
 
 void
-MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
+runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
 {
 	int32 i1, i2;
 
@@ -61,7 +61,7 @@ 
 	i1 = k & MHeapMap_Level1Mask;
 	k >>= MHeapMap_Level1Bits;
 	if(k != 0)
-		throw("MHeapMap_Set");
+		runtime_throw("MHeapMap_Set");
 
 	m->p[i1]->s[i2] = s;
 }
@@ -69,7 +69,7 @@ 
 // Allocate the storage required for entries [k, k+1, ..., k+len-1]
 // so that Get and Set calls need not check for nil pointers.
 bool
-MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
+runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
 {
 	uintptr end;
 	int32 i1;
diff -r bb880434e617 libgo/runtime/mheapmap32.h
--- a/libgo/runtime/mheapmap32.h	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mheapmap32.h	Wed Nov 10 21:37:28 2010 -0800
@@ -32,10 +32,10 @@ 
 	MSpan *s[1<<MHeapMap_Level2Bits];
 };
 
-void	MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
-bool	MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
-MSpan*	MHeapMap_Get(MHeapMap *m, PageID k);
-MSpan*	MHeapMap_GetMaybe(MHeapMap *m, PageID k);
-void	MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
+void	runtime_MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
+bool	runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
+MSpan*	runtime_MHeapMap_Get(MHeapMap *m, PageID k);
+MSpan*	runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k);
+void	runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
 
 
diff -r bb880434e617 libgo/runtime/mheapmap64.c
--- a/libgo/runtime/mheapmap64.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mheapmap64.c	Wed Nov 10 21:37:28 2010 -0800
@@ -12,13 +12,13 @@ 
 
 // 3-level radix tree mapping page ids to Span*.
 void
-MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
+runtime_MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
 {
 	m->allocator = allocator;
 }
 
 MSpan*
-MHeapMap_Get(MHeapMap *m, PageID k)
+runtime_MHeapMap_Get(MHeapMap *m, PageID k)
 {
 	int32 i1, i2, i3;
 
@@ -29,13 +29,13 @@ 
 	i1 = k & MHeapMap_Level1Mask;
 	k >>= MHeapMap_Level1Bits;
 	if(k != 0)
-		throw("MHeapMap_Get");
+		runtime_throw("MHeapMap_Get");
 
 	return m->p[i1]->p[i2]->s[i3];
 }
 
 MSpan*
-MHeapMap_GetMaybe(MHeapMap *m, PageID k)
+runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k)
 {
 	int32 i1, i2, i3;
 	MHeapMapNode2 *p2;
@@ -48,7 +48,7 @@ 
 	i1 = k & MHeapMap_Level1Mask;
 	k >>= MHeapMap_Level1Bits;
 	if(k != 0)
-		throw("MHeapMap_Get");
+		runtime_throw("MHeapMap_Get");
 
 	p2 = m->p[i1];
 	if(p2 == nil)
@@ -60,7 +60,7 @@ 
 }
 
 void
-MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
+runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
 {
 	int32 i1, i2, i3;
 
@@ -71,7 +71,7 @@ 
 	i1 = k & MHeapMap_Level1Mask;
 	k >>= MHeapMap_Level1Bits;
 	if(k != 0)
-		throw("MHeapMap_Set");
+		runtime_throw("MHeapMap_Set");
 
 	m->p[i1]->p[i2]->s[i3] = s;
 }
@@ -79,7 +79,7 @@ 
 // Allocate the storage required for entries [k, k+1, ..., k+len-1]
 // so that Get and Set calls need not check for nil pointers.
 bool
-MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
+runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
 {
 	uintptr end;
 	int32 i1, i2;
diff -r bb880434e617 libgo/runtime/mheapmap64.h
--- a/libgo/runtime/mheapmap64.h	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mheapmap64.h	Wed Nov 10 21:37:28 2010 -0800
@@ -51,10 +51,10 @@ 
 	MSpan *s[1<<MHeapMap_Level3Bits];
 };
 
-void	MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
-bool	MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
-MSpan*	MHeapMap_Get(MHeapMap *m, PageID k);
-MSpan*	MHeapMap_GetMaybe(MHeapMap *m, PageID k);
-void	MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
+void	runtime_MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
+bool	runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
+MSpan*	runtime_MHeapMap_Get(MHeapMap *m, PageID k);
+MSpan*	runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k);
+void	runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
 
 
diff -r bb880434e617 libgo/runtime/mprof.goc
--- a/libgo/runtime/mprof.goc	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/mprof.goc	Wed Nov 10 21:37:28 2010 -0800
@@ -47,7 +47,7 @@ 
 	Bucket *b;
 
 	if(buckhash == nil) {
-		buckhash = SysAlloc(BuckHashSize*sizeof buckhash[0]);
+		buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0]);
 		mstats.buckhash_sys += BuckHashSize*sizeof buckhash[0];
 	}
 
@@ -64,12 +64,12 @@ 
 	i = h%BuckHashSize;
 	for(b = buckhash[i]; b; b=b->next)
 		if(b->hash == h && b->nstk == (uintptr)nstk &&
-		   mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
+		   runtime_mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
 			return b;
 
-	b = mallocgc(sizeof *b + nstk*sizeof stk[0], RefNoProfiling, 0, 1);
+	b = runtime_mallocgc(sizeof *b + nstk*sizeof stk[0], RefNoProfiling, 0, 1);
 	bucketmem += sizeof *b + nstk*sizeof stk[0];
-	memmove(b->stk, stk, nstk*sizeof stk[0]);
+	runtime_memmove(b->stk, stk, nstk*sizeof stk[0]);
 	b->hash = h;
 	b->nstk = nstk;
 	b->next = buckhash[i];
@@ -134,7 +134,7 @@ 
 		if(ah->addr == (addr>>20))
 			goto found;
 
-	ah = mallocgc(sizeof *ah, RefNoProfiling, 0, 1);
+	ah = runtime_mallocgc(sizeof *ah, RefNoProfiling, 0, 1);
 	addrmem += sizeof *ah;
 	ah->next = addrhash[h];
 	ah->addr = addr>>20;
@@ -142,7 +142,7 @@ 
 
 found:
 	if((e = addrfree) == nil) {
-		e = mallocgc(64*sizeof *e, RefNoProfiling, 0, 0);
+		e = runtime_mallocgc(64*sizeof *e, RefNoProfiling, 0, 0);
 		addrmem += 64*sizeof *e;
 		for(i=0; i+1<64; i++)
 			e[i].next = &e[i+1];
@@ -187,7 +187,7 @@ 
 
 // Called by malloc to record a profiled block.
 void
-MProf_Malloc(void *p, uintptr size)
+runtime_MProf_Malloc(void *p, uintptr size)
 {
 	int32 nstk;
 	uintptr stk[32];
@@ -196,16 +196,16 @@ 
 	if(!__sync_bool_compare_and_swap(&m->nomemprof, 0, 1))
 		return;
 #if 0
-	nstk = callers(1, stk, 32);
+	nstk = runtime_callers(1, stk, 32);
 #else
 	nstk = 0;
 #endif
-	lock(&proflock);
+	runtime_lock(&proflock);
 	b = stkbucket(stk, nstk);
 	b->allocs++;
 	b->alloc_bytes += size;
 	setaddrbucket((uintptr)p, b);
-	unlock(&proflock);
+	runtime_unlock(&proflock);
 	__sync_bool_compare_and_swap(&m->nomemprof, 1, 0);
 
 	if(__sync_bool_compare_and_swap(&m->gcing_for_prof, 1, 0))
@@ -214,20 +214,20 @@ 
 
 // Called when freeing a profiled block.
 void
-MProf_Free(void *p, uintptr size)
+runtime_MProf_Free(void *p, uintptr size)
 {
 	Bucket *b;
 
 	if(!__sync_bool_compare_and_swap(&m->nomemprof, 0, 1))
 		return;
 
-	lock(&proflock);
+	runtime_lock(&proflock);
 	b = getaddrbucket((uintptr)p);
 	if(b != nil) {
 		b->frees++;
 		b->free_bytes += size;
 	}
-	unlock(&proflock);
+	runtime_unlock(&proflock);
 	__sync_bool_compare_and_swap(&m->nomemprof, 1, 0);
 
 	if(__sync_bool_compare_and_swap(&m->gcing_for_prof, 1, 0))
@@ -268,7 +268,7 @@ 
 
 	__sync_bool_compare_and_swap(&m->nomemprof, 0, 1);
 
-	lock(&proflock);
+	runtime_lock(&proflock);
 	n = 0;
 	for(b=buckets; b; b=b->allnext)
 		if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
@@ -281,7 +281,7 @@ 
 			if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
 				record(r++, b);
 	}
-	unlock(&proflock);
+	runtime_unlock(&proflock);
 
 	__sync_bool_compare_and_swap(&m->nomemprof, 1, 0);
 
@@ -290,10 +290,10 @@ 
 }
 
 void
-MProf_Mark(void (*scan)(int32, byte *, int64))
+runtime_MProf_Mark(void (*scan)(byte *, int64))
 {
 	// buckhash is not allocated via mallocgc.
-	scan(0, (byte*)&buckets, sizeof buckets);
-	scan(0, (byte*)&addrhash, sizeof addrhash);
-	scan(0, (byte*)&addrfree, sizeof addrfree);
+	scan((byte*)&buckets, sizeof buckets);
+	scan((byte*)&addrhash, sizeof addrhash);
+	scan((byte*)&addrfree, sizeof addrfree);
 }
diff -r bb880434e617 libgo/runtime/msize.c
--- a/libgo/runtime/msize.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/msize.c	Wed Nov 10 21:37:28 2010 -0800
@@ -28,9 +28,9 @@ 
 #include "runtime.h"
 #include "malloc.h"
 
-int32 class_to_size[NumSizeClasses];
-int32 class_to_allocnpages[NumSizeClasses];
-int32 class_to_transfercount[NumSizeClasses];
+int32 runtime_class_to_size[NumSizeClasses];
+int32 runtime_class_to_allocnpages[NumSizeClasses];
+int32 runtime_class_to_transfercount[NumSizeClasses];
 
 // The SizeToClass lookup is implemented using two arrays,
 // one mapping sizes <= 1024 to their class and one mapping
@@ -45,24 +45,24 @@ 
 static int32 size_to_class128[(MaxSmallSize-1024)/128 + 1];
 
 int32
-SizeToClass(int32 size)
+runtime_SizeToClass(int32 size)
 {
 	if(size > MaxSmallSize)
-		throw("SizeToClass - invalid size");
+		runtime_throw("SizeToClass - invalid size");
 	if(size > 1024-8)
 		return size_to_class128[(size-1024+127) >> 7];
 	return size_to_class8[(size+7)>>3];
 }
 
 void
-InitSizes(void)
+runtime_InitSizes(void)
 {
 	int32 align, sizeclass, size, osize, nextsize, n;
 	uint32 i;
 	uintptr allocsize, npages;
 
-	// Initialize the class_to_size table (and choose class sizes in the process).
-	class_to_size[0] = 0;
+	// Initialize the runtime_class_to_size table (and choose class sizes in the process).
+	runtime_class_to_size[0] = 0;
 	sizeclass = 1;	// 0 means no class
 	align = 8;
 	for(size = align; size <= MaxSmallSize; size += align) {
@@ -75,7 +75,7 @@ 
 				align = 16;	// required for x86 SSE instructions, if we want to use them
 		}
 		if((align&(align-1)) != 0)
-			throw("InitSizes - bug");
+			runtime_throw("InitSizes - bug");
 
 		// Make the allocnpages big enough that
 		// the leftover is less than 1/8 of the total,
@@ -92,78 +92,78 @@ 
 		// use just this size instead of having two
 		// different sizes.
 		if(sizeclass > 1
-		&& (int32)npages == class_to_allocnpages[sizeclass-1]
-		&& allocsize/osize == allocsize/(class_to_size[sizeclass-1]+RefcountOverhead)) {
-			class_to_size[sizeclass-1] = size;
+		&& (int32)npages == runtime_class_to_allocnpages[sizeclass-1]
+		&& allocsize/osize == allocsize/(runtime_class_to_size[sizeclass-1]+RefcountOverhead)) {
+			runtime_class_to_size[sizeclass-1] = size;
 			continue;
 		}
 
-		class_to_allocnpages[sizeclass] = npages;
-		class_to_size[sizeclass] = size;
+		runtime_class_to_allocnpages[sizeclass] = npages;
+		runtime_class_to_size[sizeclass] = size;
 		sizeclass++;
 	}
 	if(sizeclass != NumSizeClasses) {
-		// printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
-		throw("InitSizes - bad NumSizeClasses");
+		// runtime_printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
+		runtime_throw("InitSizes - bad NumSizeClasses");
 	}
 
 	// Initialize the size_to_class tables.
 	nextsize = 0;
 	for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
-		for(; nextsize < 1024 && nextsize <= class_to_size[sizeclass]; nextsize+=8)
+		for(; nextsize < 1024 && nextsize <= runtime_class_to_size[sizeclass]; nextsize+=8)
 			size_to_class8[nextsize/8] = sizeclass;
 		if(nextsize >= 1024)
-			for(; nextsize <= class_to_size[sizeclass]; nextsize += 128)
+			for(; nextsize <= runtime_class_to_size[sizeclass]; nextsize += 128)
 				size_to_class128[(nextsize-1024)/128] = sizeclass;
 	}
 
 	// Double-check SizeToClass.
 	if(0) {
 		for(n=0; n < MaxSmallSize; n++) {
-			sizeclass = SizeToClass(n);
-			if(sizeclass < 1 || sizeclass >= NumSizeClasses || class_to_size[sizeclass] < n) {
-				// printf("size=%d sizeclass=%d class_to_size=%d\n", n, sizeclass, class_to_size[sizeclass]);
-				// printf("incorrect SizeToClass");
+			sizeclass = runtime_SizeToClass(n);
+			if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime_class_to_size[sizeclass] < n) {
+				// runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]);
+				// runtime_printf("incorrect SizeToClass");
 				goto dump;
 			}
-			if(sizeclass > 1 && class_to_size[sizeclass-1] >= n) {
-				// printf("size=%d sizeclass=%d class_to_size=%d\n", n, sizeclass, class_to_size[sizeclass]);
-				// printf("SizeToClass too big");
+			if(sizeclass > 1 && runtime_class_to_size[sizeclass-1] >= n) {
+				// runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]);
+				// runtime_printf("SizeToClass too big");
 				goto dump;
 			}
 		}
 	}
 
 	// Copy out for statistics table.
-	for(i=0; i<nelem(class_to_size); i++)
-		mstats.by_size[i].size = class_to_size[i];
+	for(i=0; i<nelem(runtime_class_to_size); i++)
+		mstats.by_size[i].size = runtime_class_to_size[i];
 
-	// Initialize the class_to_transfercount table.
+	// Initialize the runtime_class_to_transfercount table.
 	for(sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
-		n = 64*1024 / class_to_size[sizeclass];
+		n = 64*1024 / runtime_class_to_size[sizeclass];
 		if(n < 2)
 			n = 2;
 		if(n > 32)
 			n = 32;
-		class_to_transfercount[sizeclass] = n;
+		runtime_class_to_transfercount[sizeclass] = n;
 	}
 	return;
 
 dump:
-	if(0){
-		printf("NumSizeClasses=%d\n", NumSizeClasses);
-		printf("class_to_size:");
+	if(1){
+		runtime_printf("NumSizeClasses=%d\n", NumSizeClasses);
+		runtime_printf("runtime_class_to_size:");
 		for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++)
-			printf(" %d", class_to_size[sizeclass]);
-		printf("\n\n");
-		printf("size_to_class8:");
+			runtime_printf(" %d", runtime_class_to_size[sizeclass]);
+		runtime_printf("\n\n");
+		runtime_printf("size_to_class8:");
 		for(i=0; i<nelem(size_to_class8); i++)
-			printf(" %d=>%d(%d)\n", i*8, size_to_class8[i], class_to_size[size_to_class8[i]]);
-		printf("\n");
-		printf("size_to_class128:");
+			runtime_printf(" %d=>%d(%d)\n", i*8, size_to_class8[i], runtime_class_to_size[size_to_class8[i]]);
+		runtime_printf("\n");
+		runtime_printf("size_to_class128:");
 		for(i=0; i<nelem(size_to_class128); i++)
-			printf(" %d=>%d(%d)\n", i*128, size_to_class128[i], class_to_size[size_to_class128[i]]);
-		printf("\n");
+			runtime_printf(" %d=>%d(%d)\n", i*128, size_to_class128[i], runtime_class_to_size[size_to_class128[i]]);
+		runtime_printf("\n");
 	}
-	throw("InitSizes failed");
+	runtime_throw("InitSizes failed");
 }
diff -r bb880434e617 libgo/runtime/runtime.h
--- a/libgo/runtime/runtime.h	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/runtime.h	Wed Nov 10 21:37:28 2010 -0800
@@ -115,22 +115,23 @@ 
 #define USED(v)		((void) v)
 
 /* We map throw to assert.  */
-#define throw(s) __go_assert(s == 0)
+#define runtime_throw(s) __go_assert(s == 0)
 
-void*	mal(uintptr);
-void	mallocinit(void);
+void*	runtime_mal(uintptr);
+void	runtime_mallocinit(void);
 void	siginit(void);
 bool	__go_sigsend(int32 sig);
-int64	nanotime(void);
+int64	runtime_nanotime(void);
 
-void	stoptheworld(void);
-void	starttheworld(void);
+void	runtime_stoptheworld(void);
+void	runtime_starttheworld(void);
 void	__go_go(void (*pfn)(void*), void*);
 void	__go_gc_goroutine_init(void*);
 void	__go_enable_gc(void);
 int	__go_run_goroutine_gc(int);
-void	__go_scanstacks(void (*scan)(int32, byte *, int64));
+void	__go_scanstacks(void (*scan)(byte *, int64));
 void	__go_stealcache(void);
+void	__go_cachestats(void);
 
 /*
  * mutual exclusion locks.  in the uncontended case,
@@ -138,11 +139,11 @@ 
  * but on the contention path they sleep in the kernel.
  */
 #define	LOCK_INITIALIZER	{ PTHREAD_MUTEX_INITIALIZER }
-void	initlock(Lock*);
-void	lock(Lock*);
-void	unlock(Lock*);
-void	destroylock(Lock*);
-bool	trylock(Lock*);
+void	runtime_initlock(Lock*);
+void	runtime_lock(Lock*);
+void	runtime_unlock(Lock*);
+void	runtime_destroylock(Lock*);
+bool	runtime_trylock(Lock*);
 
 void semacquire (uint32 *) asm ("libgo_runtime.runtime.Semacquire");
 void semrelease (uint32 *) asm ("libgo_runtime.runtime.Semrelease");
@@ -161,14 +162,22 @@ 
 void	notewakeup(Note*);
 
 /* Functions.  */
+#define runtime_printf printf
+#define runtime_malloc(s) __go_alloc(s)
+#define runtime_free(p) __go_free(p)
 #define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
-#define mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
-MCache*	allocmcache(void);
+#define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2))
+#define runtime_getenv(s) getenv(s)
+#define runtime_atoi(s) atoi(s)
+#define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
+#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
+MCache*	runtime_allocmcache(void);
 void	free(void *v);
 struct __go_func_type;
-void	addfinalizer(void*, void(*fn)(void*), const struct __go_func_type *);
-void	walkfintab(void (*fn)(void*), void (*scan)(int32, byte *, int64));
+void	runtime_addfinalizer(void*, void(*fn)(void*), const struct __go_func_type *);
+void	runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64));
 #define runtime_mmap mmap
+#define runtime_munmap(p, s) munmap((p), (s))
 #define cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
 
 struct __go_func_type;
diff -r bb880434e617 libgo/runtime/sigqueue.goc
--- a/libgo/runtime/sigqueue.goc	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/sigqueue.goc	Wed Nov 10 21:37:28 2010 -0800
@@ -102,7 +102,7 @@ 
 		s = buf;
 	}
 	int32 len = __builtin_strlen(s);
-	unsigned char *data = mallocgc(len, RefNoPointers, 0, 0);
+	unsigned char *data = runtime_mallocgc(len, RefNoPointers, 0, 0);
 	__builtin_memcpy(data, s, len);
 	name.__data = data;
 	name.__length = len;
diff -r bb880434e617 libgo/runtime/thread.c
--- a/libgo/runtime/thread.c	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/runtime/thread.c	Wed Nov 10 21:37:28 2010 -0800
@@ -5,34 +5,34 @@ 
 #include "runtime.h"
 
 void
-initlock(Lock *l)
+runtime_initlock(Lock *l)
 {
 	if(pthread_mutex_init(&l->mutex, NULL) != 0)
-		throw("pthread_mutex_init failed");
+		runtime_throw("pthread_mutex_init failed");
 }
 
 void
-lock(Lock *l)
+runtime_lock(Lock *l)
 {
 	if(pthread_mutex_lock(&l->mutex) != 0)
-		throw("lock failed");
+		runtime_throw("lock failed");
 }
 
 void
-unlock(Lock *l)
+runtime_unlock(Lock *l)
 {
 	if(pthread_mutex_unlock(&l->mutex) != 0)
-		throw("unlock failed");
+		runtime_throw("unlock failed");
 }
 
 void
-destroylock(Lock *l)
+runtime_destroylock(Lock *l)
 {
 	pthread_mutex_destroy(&l->mutex);
 }
 
 bool
-trylock(Lock *l)
+runtime_trylock(Lock *l)
 {
 	return pthread_mutex_trylock(&l->mutex) == 0;
 }
diff -r bb880434e617 libgo/syscalls/sysfile_posix.go
--- a/libgo/syscalls/sysfile_posix.go	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/syscalls/sysfile_posix.go	Wed Nov 10 21:37:28 2010 -0800
@@ -24,6 +24,7 @@ 
 func libc_rmdir(name *byte) int __asm__ ("rmdir");
 func libc_fcntl(fd int, cmd int, arg int) int __asm__ ("fcntl");
 func libc_mkdir(name *byte, perm Mode_t) int __asm__ ("mkdir");
+func libc_dup(int) int __asm__ ("dup")
 func libc_gettimeofday(tv *Timeval, tz *byte) int __asm__ ("gettimeofday");
 func libc_select(int, *byte, *byte, *byte, *Timeval) int __asm__ ("select");
 func libc_chdir(name *byte) int __asm__ ("chdir");
@@ -161,6 +162,14 @@ 
   return;
 }
 
+func Dup(oldfd int) (fd int, errno int) {
+	fd = libc_dup(oldfd)
+	if fd < 0 {
+		errno = GetErrno()
+	}
+	return
+}
+
 func Gettimeofday(tv *Timeval) (errno int) {
   r := libc_gettimeofday(tv, nil);
   if r < 0 { errno = GetErrno() }
diff -r bb880434e617 libgo/testsuite/gotest
--- a/libgo/testsuite/gotest	Wed Nov 10 21:22:25 2010 -0800
+++ b/libgo/testsuite/gotest	Wed Nov 10 21:37:28 2010 -0800
@@ -290,19 +290,20 @@ 
 	fi
 	if [ $package != "testing" ]; then
 		echo 'import "testing"'
+		echo 'import __regexp__ "regexp"' # rename in case tested package is called regexp
 	fi
 	# test array
 	echo
-	echo 'var tests = []testing.Test {'
+	echo 'var tests = []testing.InternalTest {'
 	for i in $tests
 	do
-		echo '	testing.Test{ "'$i'", '$i' },'
+		echo '	{ "'$i'", '$i' },'
 	done
 	echo '}'
 	# body
 	echo
 	echo 'func main() {'
-	echo '	testing.Main(tests)'
+	echo '	testing.Main(__regexp__.MatchString, tests)'
 	echo '}'
 }>_testmain.go