@@ -916,7 +916,9 @@ RESOURCE_H = resource.h hard-reg-set.h $(DF_H)
DDG_H = ddg.h sbitmap.h $(DF_H)
GCC_H = gcc.h version.h $(DIAGNOSTIC_CORE_H)
GGC_H = ggc.h gtype-desc.h statistics.h
-GGC_INTERNAL_H = ggc-internal.h $(GGC_H)
+GGC_INTERNAL_H = ggc-internal.h $(GGC_H) \
+ $(VEC_H) $(HASH_TABLE_H) \
+ $(FUNCTION_H) $(BASIC_BLOCK_H) $(CGRAPH_H) $(CFGLOOP_H)
TIMEVAR_H = timevar.h timevar.def
INSN_ATTR_H = insn-attr.h insn-attr-common.h $(INSN_ADDR_H)
INSN_ADDR_H = $(srcdir)/insn-addr.h
@@ -2727,7 +2729,7 @@ toplev.o : toplev.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) \
$(OPTS_H) params.def tree-mudflap.h $(TREE_PASS_H) $(GIMPLE_H) \
tree-ssa-alias.h $(PLUGIN_H) realmpfr.h tree-diagnostic.h \
$(TREE_PRETTY_PRINT_H) opts-diagnostic.h $(COMMON_TARGET_H) \
- tsan.h diagnostic-color.h
+ tsan.h diagnostic-color.h $(GGC_INTERNAL_H)
hwint.o : hwint.c $(CONFIG_H) $(SYSTEM_H) $(DIAGNOSTIC_CORE_H)
@@ -23,7 +23,6 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "hash-table.h"
#include "ggc.h"
#include "ggc-internal.h"
#include "diagnostic-core.h"
@@ -34,22 +33,12 @@ along with GCC; see the file COPYING3. If not see
#include "vec.h"
#include "timevar.h"
-/* When set, ggc_collect will do collection. */
-bool ggc_force_collect;
-
-/* When true, protect the contents of the identifier hash table. */
-bool ggc_protect_identifiers = true;
-
-/* Statistics about the allocation. */
-static ggc_statistics *ggc_stats;
+using namespace ggc_impl;
struct traversal_state;
static int ggc_htab_delete (void **, void *);
static int compare_ptr_data (const void *, const void *);
-static void relocate_ptrs (void *, void *);
-static void write_pch_globals (const struct ggc_root_tab * const *tab,
- struct traversal_state *state);
/* Maintain global roots that are preserved during GC. */
@@ -68,63 +57,46 @@ ggc_htab_delete (void **slot, void *info)
return 1;
}
-
-/* This extra vector of dynamically registered root_tab-s is used by
- ggc_mark_roots and gives the ability to dynamically add new GGC root
- tables, for instance from some plugins; this vector is on the heap
- since it is used by GGC internally. */
-typedef const struct ggc_root_tab *const_ggc_root_tab_t;
-static vec<const_ggc_root_tab_t> extra_root_vec;
-
/* Dynamically register a new GGC root table RT. This is useful for
plugins. */
void
ggc_register_root_tab (const struct ggc_root_tab* rt)
{
- if (rt)
- extra_root_vec.safe_push (rt);
+ the_heap.register_root_tab (rt);
}
-/* This extra vector of dynamically registered cache_tab-s is used by
- ggc_mark_roots and gives the ability to dynamically add new GGC cache
- tables, for instance from some plugins; this vector is on the heap
- since it is used by GGC internally. */
-typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
-static vec<const_ggc_cache_tab_t> extra_cache_vec;
-
/* Dynamically register a new GGC cache table CT. This is useful for
plugins. */
void
ggc_register_cache_tab (const struct ggc_cache_tab* ct)
{
- if (ct)
- extra_cache_vec.safe_push (ct);
+ the_heap.register_cache_tab (ct);
}
/* Scan a hash table that has objects which are to be deleted if they are not
already marked. */
-static void
-ggc_scan_cache_tab (const_ggc_cache_tab_t ctp)
+void
+gc_heap::scan_cache_tab (const_ggc_cache_tab_t ctp)
{
const struct ggc_cache_tab *cti;
for (cti = ctp; cti->base != NULL; cti++)
if (*cti->base)
{
- ggc_set_mark (*cti->base);
+ set_mark (*cti->base);
htab_traverse_noresize (*cti->base, ggc_htab_delete,
CONST_CAST (void *, (const void *)cti));
- ggc_set_mark ((*cti->base)->entries);
+ set_mark ((*cti->base)->entries);
}
}
/* Mark all the roots in the table RT. */
-static void
-ggc_mark_root_tab (const_ggc_root_tab_t rt)
+void
+gc_heap::mark_root_tab (const_ggc_root_tab_t rt)
{
size_t i;
@@ -136,7 +108,7 @@ ggc_mark_root_tab (const_ggc_root_tab_t rt)
/* Iterate through all registered roots and mark each element. */
void
-ggc_mark_roots (void)
+gc_heap::mark_roots (void)
{
const struct ggc_root_tab *const *rt;
const_ggc_root_tab_t rtp, rti;
@@ -149,27 +121,27 @@ ggc_mark_roots (void)
memset (rti->base, 0, rti->stride);
for (rt = gt_ggc_rtab; *rt; rt++)
- ggc_mark_root_tab (*rt);
+ mark_root_tab (*rt);
FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
- ggc_mark_root_tab (rtp);
+ mark_root_tab (rtp);
- if (ggc_protect_identifiers)
- ggc_mark_stringpool ();
+ if (protect_identifiers_)
+ mark_stringpool ();
/* Now scan all hash tables that have objects which are to be deleted if
they are not already marked. */
for (ct = gt_ggc_cache_rtab; *ct; ct++)
- ggc_scan_cache_tab (*ct);
+ scan_cache_tab (*ct);
FOR_EACH_VEC_ELT (extra_cache_vec, i, ctp)
- ggc_scan_cache_tab (ctp);
+ scan_cache_tab (ctp);
- if (! ggc_protect_identifiers)
- ggc_purge_stringpool ();
+ if (! protect_identifiers_)
+ purge_stringpool ();
/* Some plugins may call ggc_set_mark from here. */
- invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
+ invoke_plugin_callbacks (PLUGIN_GGC_MARKING, this);
}
/* Allocate a block of memory, then clear it. */
@@ -267,66 +239,40 @@ ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
void
-ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
- ggc_statistics *stats)
+gc_heap::print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
+ ggc_statistics *stats)
{
/* Set the pointer so that during collection we will actually gather
the statistics. */
- ggc_stats = stats;
+ stats_ = stats;
/* Then do one collection to fill in the statistics. */
- ggc_collect ();
+ collect ();
/* At present, we don't really gather any interesting statistics. */
/* Don't gather statistics any more. */
- ggc_stats = NULL;
+ stats_ = NULL;
}
/* Functions for saving and restoring GCable memory to disk. */
-struct ptr_data
-{
- void *obj;
- void *note_ptr_cookie;
- gt_note_pointers note_ptr_fn;
- gt_handle_reorder reorder_fn;
- size_t size;
- void *new_addr;
-};
-
#define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
-/* Helper for hashing saving_htab. */
-
-struct saving_hasher : typed_free_remove <ptr_data>
-{
- typedef ptr_data value_type;
- typedef void compare_type;
- static inline hashval_t hash (const value_type *);
- static inline bool equal (const value_type *, const compare_type *);
-};
-
-inline hashval_t
-saving_hasher::hash (const value_type *p)
-{
- return POINTER_HASH (p->obj);
-}
-
-inline bool
-saving_hasher::equal (const value_type *p1, const compare_type *p2)
-{
- return p1->obj == p2;
-}
-
-static hash_table <saving_hasher> saving_htab;
-
/* Register an object in the hash table. */
int
gt_pch_note_object (void *obj, void *note_ptr_cookie,
gt_note_pointers note_ptr_fn)
{
+ return the_heap.pch_note_object (obj, note_ptr_cookie, note_ptr_fn);
+}
+
+int
+gc_heap_pages::
+pch_note_object (void *obj, void *note_ptr_cookie,
+ gt_note_pointers note_ptr_fn)
+{
struct ptr_data **slot;
if (obj == NULL || obj == (void *) 1)
@@ -348,7 +294,7 @@ gt_pch_note_object (void *obj, void *note_ptr_cookie,
if (note_ptr_fn == gt_pch_p_S)
(*slot)->size = strlen ((const char *)obj) + 1;
else
- (*slot)->size = ggc_get_size (obj);
+ (*slot)->size = get_size (obj);
return 1;
}
@@ -358,6 +304,14 @@ void
gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
gt_handle_reorder reorder_fn)
{
+ the_heap.pch_note_reorder (obj, note_ptr_cookie, reorder_fn);
+}
+
+void
+gc_heap_pages::
+pch_note_reorder (void *obj, void *note_ptr_cookie,
+ gt_handle_reorder reorder_fn)
+{
struct ptr_data *data;
if (obj == NULL || obj == (void *) 1)
@@ -418,7 +372,8 @@ compare_ptr_data (const void *p1_p, const void *p2_p)
/* Callbacks for note_ptr_fn. */
-static void
+void
+gc_heap_pages::
relocate_ptrs (void *ptr_p, void *state_p)
{
void **ptr = (void **)ptr_p;
@@ -430,15 +385,15 @@ relocate_ptrs (void *ptr_p, void *state_p)
return;
result = (struct ptr_data *)
- saving_htab.find_with_hash (*ptr, POINTER_HASH (*ptr));
+ the_heap.saving_htab.find_with_hash (*ptr, POINTER_HASH (*ptr));
gcc_assert (result);
*ptr = result->new_addr;
}
/* Write out, after relocation, the pointers in TAB. */
-static void
-write_pch_globals (const struct ggc_root_tab * const *tab,
- struct traversal_state *state)
+void
+gc_heap_pages::write_pch_globals (const struct ggc_root_tab * const *tab,
+ struct traversal_state *state)
{
const struct ggc_root_tab *const *rt;
const struct ggc_root_tab *rti;
@@ -481,6 +436,12 @@ struct mmap_info
void
gt_pch_save (FILE *f)
{
+ the_heap.pch_save (f);
+}
+
+void
+gc_heap_pages::pch_save (FILE *f)
+{
const struct ggc_root_tab *const *rt;
const struct ggc_root_tab *rti;
size_t i;
@@ -901,79 +862,9 @@ init_ggc_heuristics (void)
#endif
}
-/* Datastructure used to store per-call-site statistics. */
-struct loc_descriptor
-{
- const char *file;
- int line;
- const char *function;
- int times;
- size_t allocated;
- size_t overhead;
- size_t freed;
- size_t collected;
-};
-
-/* Hash table helper. */
-
-struct loc_desc_hasher : typed_noop_remove <loc_descriptor>
-{
- typedef loc_descriptor value_type;
- typedef loc_descriptor compare_type;
- static inline hashval_t hash (const value_type *);
- static inline bool equal (const value_type *, const compare_type *);
-};
-
-inline hashval_t
-loc_desc_hasher::hash (const value_type *d)
-{
- return htab_hash_pointer (d->function) | d->line;
-}
-
-inline bool
-loc_desc_hasher::equal (const value_type *d, const compare_type *d2)
-{
- return (d->file == d2->file && d->line == d2->line
- && d->function == d2->function);
-}
-
-/* Hashtable used for statistics. */
-static hash_table <loc_desc_hasher> loc_hash;
-
-struct ptr_hash_entry
-{
- void *ptr;
- struct loc_descriptor *loc;
- size_t size;
-};
-
-/* Helper for ptr_hash table. */
-
-struct ptr_hash_hasher : typed_noop_remove <ptr_hash_entry>
-{
- typedef ptr_hash_entry value_type;
- typedef void compare_type;
- static inline hashval_t hash (const value_type *);
- static inline bool equal (const value_type *, const compare_type *);
-};
-
-inline hashval_t
-ptr_hash_hasher::hash (const value_type *d)
-{
- return htab_hash_pointer (d->ptr);
-}
-
-inline bool
-ptr_hash_hasher::equal (const value_type *p, const compare_type *p2)
-{
- return (p->ptr == p2);
-}
-
-/* Hashtable converting address of allocated field to loc descriptor. */
-static hash_table <ptr_hash_hasher> ptr_hash;
-
/* Return descriptor for given call site, create new one if needed. */
-static struct loc_descriptor *
+struct loc_descriptor *
+gc_heap_common::
make_loc_descriptor (const char *name, int line, const char *function)
{
struct loc_descriptor loc;
@@ -997,8 +888,9 @@ make_loc_descriptor (const char *name, int line, const char *function)
/* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
void
-ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
- const char *name, int line, const char *function)
+gc_heap_common::record_overhead (size_t allocated, size_t overhead,
+ void *ptr, const char *name, int line,
+ const char *function)
{
struct loc_descriptor *loc = make_loc_descriptor (name, line, function);
struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
@@ -1021,13 +913,15 @@ ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
/* Helper function for prune_overhead_list. See if SLOT is still marked and
remove it from hashtable if it is not. */
int
-ggc_prune_ptr (ptr_hash_entry **slot, void *b ATTRIBUTE_UNUSED)
+gc_heap_pages::
+prune_ptr (ptr_hash_entry **slot, void *b)
{
+ gc_heap_pages *heap = static_cast<gc_heap_pages *>(b);
struct ptr_hash_entry *p = *slot;
if (!ggc_marked_p (p->ptr))
{
p->loc->collected += p->size;
- ptr_hash.clear_slot (slot);
+ heap->ptr_hash.clear_slot (slot);
free (p);
}
return 1;
@@ -1036,14 +930,15 @@ ggc_prune_ptr (ptr_hash_entry **slot, void *b ATTRIBUTE_UNUSED)
/* After live values has been marked, walk all recorded pointers and see if
they are still live. */
void
-ggc_prune_overhead_list (void)
+gc_heap_pages::prune_overhead_list ()
{
- ptr_hash.traverse <void *, ggc_prune_ptr> (NULL);
+ ptr_hash.traverse <void *, prune_ptr> (this);
}
/* Notice that the pointer has been freed. */
void
-ggc_free_overhead (void *ptr)
+gc_heap_pages::
+free_overhead (void *ptr)
{
ptr_hash_entry **slot;
slot = ptr_hash.find_slot_with_hash (ptr, htab_hash_pointer (ptr), NO_INSERT);
@@ -1107,6 +1002,12 @@ ggc_add_statistics (loc_descriptor **slot, int *n)
void
dump_ggc_loc_statistics (bool final)
{
+ the_heap.dump_ggc_loc_statistics (final);
+}
+
+void
+gc_heap::dump_ggc_loc_statistics (bool final)
+{
int nentries = 0;
char s[4096];
size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
@@ -1115,8 +1016,8 @@ dump_ggc_loc_statistics (bool final)
if (! GATHER_STATISTICS)
return;
- ggc_force_collect = true;
- ggc_collect ();
+ force_collect_ = true;
+ collect ();
loc_array = XCNEWVEC (struct loc_descriptor *,
loc_hash.elements_with_deleted ());
@@ -1167,5 +1068,5 @@ dump_ggc_loc_statistics (bool final)
fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
"source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
fprintf (stderr, "-------------------------------------------------------\n");
- ggc_force_collect = false;
+ force_collect_ = false;
}
@@ -23,15 +23,18 @@ along with GCC; see the file COPYING3. If not see
#define GCC_GGC_INTERNAL_H
#include "ggc.h"
+#include "vec.h"
+#include "hash-table.h"
+#include "function.h"
+#include "basic-block.h"
+#include "cgraph.h"
+#include "cfgloop.h"
/* Call ggc_set_mark on all the roots. */
extern void ggc_mark_roots (void);
/* Stringpool. */
-/* Mark the entries in the string pool. */
-extern void ggc_mark_stringpool (void);
-
/* Purge the entries in the string pool. */
extern void ggc_purge_stringpool (void);
@@ -87,17 +90,6 @@ extern void ggc_pch_finish (struct ggc_pch_data *, FILE *);
extern void ggc_pch_read (FILE *, void *);
-/* Allocation and collection. */
-
-/* When set, ggc_collect will do collection. */
-extern bool ggc_force_collect;
-
-extern void ggc_record_overhead (size_t, size_t, void * FINAL_MEM_STAT_DECL);
-
-extern void ggc_free_overhead (void *);
-
-extern void ggc_prune_overhead_list (void);
-
/* Return the number of bytes allocated at the indicated address. */
extern size_t ggc_get_size (const void *);
@@ -116,4 +108,666 @@ typedef struct ggc_statistics
do not depend on the collector in use. */
extern void ggc_print_common_statistics (FILE *, ggc_statistics *);
+#ifndef HOST_BITS_PER_PTR
+#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
+#endif
+
+
+
+/* Implementation details of ggc, shared between ggc-page.c and
+ gcc-common.c. We place these within the "ggc_impl" namespace. */
+
+namespace ggc_impl {
+
+/* Strategy:
+
+ This garbage-collecting allocator allocates objects on one of a set
+ of pages. Each page can allocate objects of a single size only;
+ available sizes are powers of two starting at four bytes. The size
+ of an allocation request is rounded up to the next power of two
+ (`order'), and satisfied from the appropriate page.
+
+ Each page is recorded in a page-entry, which also maintains an
+ in-use bitmap of object positions on the page. This allows the
+ allocation state of a particular object to be flipped without
+ touching the page itself.
+
+ Each page-entry also has a context depth, which is used to track
+ pushing and popping of allocation contexts. Only objects allocated
+ in the current (highest-numbered) context may be collected.
+
+ Page entries are arranged in an array of singly-linked lists. The
+ array is indexed by the allocation size, in bits, of the pages on
+ it; i.e. all pages on a list allocate objects of the same size.
+ Pages are ordered on the list such that all non-full pages precede
+ all full pages, with non-full pages arranged in order of decreasing
+ context depth.
+
+ Empty pages (of all orders) are kept on a single page cache list,
+ and are considered first when new pages are required; they are
+ deallocated at the start of the next collection if they haven't
+ been recycled by then. */
+
+/* A two-level tree is used to look up the page-entry for a given
+ pointer. Two chunks of the pointer's bits are extracted to index
+ the first and second levels of the tree, as follows:
+
+ HOST_PAGE_SIZE_BITS
+ 32 | |
+ msb +----------------+----+------+------+ lsb
+ | | |
+ PAGE_L1_BITS |
+ | |
+ PAGE_L2_BITS
+
+ The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
+ pages are aligned on system page boundaries. The next most
+ significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
+ index values in the lookup table, respectively.
+
+ For 32-bit architectures and the settings below, there are no
+ leftover bits. For architectures with wider pointers, the lookup
+ tree points to a list of pages, which must be scanned to find the
+ correct one. */
+
+#define PAGE_L1_BITS (8)
+#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
+#define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
+#define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
+
+#define LOOKUP_L1(p) \
+ (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
+
+#define LOOKUP_L2(p) \
+ (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
+
+/* The number of objects per allocation page, for objects on a page of
+ the indicated ORDER. */
+#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
+
+/* The number of objects in P. */
+#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
+
+/* The size of an object on a page of the indicated ORDER. */
+#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
+
+/* For speed, we avoid doing a general integer divide to locate the
+ offset in the allocation bitmap, by precalculating numbers M, S
+ such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
+ within the page which is evenly divisible by the object size Z. */
+#define DIV_MULT(ORDER) inverse_table[ORDER].mult
+#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
+#define OFFSET_TO_BIT(OFFSET, ORDER) \
+ (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
+
+/* We use this structure to determine the alignment required for
+ allocations. For power-of-two sized allocations, that's not a
+ problem, but it does matter for odd-sized allocations.
+ We do not care about alignment for floating-point types. */
+
+struct max_alignment {
+ char c;
+ union {
+ HOST_WIDEST_INT i;
+ void *p;
+ } u;
+};
+
+/* The biggest alignment required. */
+
+#define MAX_ALIGNMENT (offsetof (struct ggc_impl::max_alignment, u))
+
+
+/* The number of extra orders, not corresponding to power-of-two sized
+ objects. */
+
+#define NUM_EXTRA_ORDERS ARRAY_SIZE (ggc_impl::extra_order_size_table)
+
+#define RTL_SIZE(NSLOTS) \
+ (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
+
+#define TREE_EXP_SIZE(OPS) \
+ (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
+
+/* The Ith entry is the maximum size of an object to be stored in the
+ Ith extra order. Adding a new entry to this array is the *only*
+ thing you need to do to add a new special allocation size. */
+
+static const size_t extra_order_size_table[] = {
+ /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
+ There are a lot of structures with these sizes and explicitly
+ listing them risks orders being dropped because they changed size. */
+ MAX_ALIGNMENT * 3,
+ MAX_ALIGNMENT * 5,
+ MAX_ALIGNMENT * 6,
+ MAX_ALIGNMENT * 7,
+ MAX_ALIGNMENT * 9,
+ MAX_ALIGNMENT * 10,
+ MAX_ALIGNMENT * 11,
+ MAX_ALIGNMENT * 12,
+ MAX_ALIGNMENT * 13,
+ MAX_ALIGNMENT * 14,
+ MAX_ALIGNMENT * 15,
+ sizeof (struct tree_decl_non_common),
+ sizeof (struct tree_field_decl),
+ sizeof (struct tree_parm_decl),
+ sizeof (struct tree_var_decl),
+ sizeof (struct tree_type_non_common),
+ sizeof (struct function),
+ sizeof (struct basic_block_def),
+ sizeof (struct cgraph_node),
+ sizeof (struct loop),
+};
+
+/* The total number of orders. */
+
+#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
+
+/* Compute the smallest nonnegative number which when added to X gives
+ a multiple of F. */
+
+#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
+
+/* Compute the smallest multiple of F that is >= X. */
+
+#define ROUND_UP(x, f) (CEIL (x, f) * (f))
+
+/* Round X to next multiple of the page size */
+
+#define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
+
+/* A page_entry records the status of an allocation page. This
+ structure is dynamically sized to fit the bitmap in_use_p. */
+typedef struct page_entry
+{
+ /* The next page-entry with objects of the same size, or NULL if
+ this is the last page-entry. */
+ struct page_entry *next;
+
+ /* The previous page-entry with objects of the same size, or NULL if
+ this is the first page-entry. The PREV pointer exists solely to
+ keep the cost of ggc_free manageable. */
+ struct page_entry *prev;
+
+ /* The number of bytes allocated. (This will always be a multiple
+ of the host system page size.) */
+ size_t bytes;
+
+ /* The address at which the memory is allocated. */
+ char *page;
+
+#ifdef USING_MALLOC_PAGE_GROUPS
+ /* Back pointer to the page group this page came from. */
+ struct page_group *group;
+#endif
+
+ /* This is the index in the by_depth varray where this page table
+ can be found. */
+ unsigned long index_by_depth;
+
+ /* Context depth of this page. */
+ unsigned short context_depth;
+
+ /* The number of free objects remaining on this page. */
+ unsigned short num_free_objects;
+
+ /* A likely candidate for the bit position of a free object for the
+ next allocation from this page. */
+ unsigned short next_bit_hint;
+
+ /* The lg of size of objects allocated from this page. */
+ unsigned char order;
+
+ /* Discarded page? */
+ bool discarded;
+
+ /* A bit vector indicating whether or not objects are in use. The
+ Nth bit is one if the Nth object on this page is allocated. This
+ array is dynamically sized. */
+ unsigned long in_use_p[1];
+} page_entry;
+
+#ifdef USING_MALLOC_PAGE_GROUPS
+/* A page_group describes a large allocation from malloc, from which
+ we parcel out aligned pages. */
+typedef struct page_group
+{
+ /* A linked list of all extant page groups. */
+ struct page_group *next;
+
+ /* The address we received from malloc. */
+ char *allocation;
+
+ /* The size of the block. */
+ size_t alloc_size;
+
+ /* A bitmask of pages in use. */
+ unsigned int in_use;
+} page_group;
+#endif
+
+#if HOST_BITS_PER_PTR <= 32
+
+/* On 32-bit hosts, we use a two level page table, as pictured above. */
+typedef page_entry **page_table[PAGE_L1_SIZE];
+
+#else
+
+/* On 64-bit hosts, we use the same two level page tables plus a linked
+ list that disambiguates the top 32-bits. There will almost always be
+ exactly one entry in the list. */
+typedef struct page_table_chain
+{
+ struct page_table_chain *next;
+ size_t high_bits;
+ page_entry **table[PAGE_L1_SIZE];
+} *page_table;
+
+#endif
+
+#ifdef ENABLE_GC_ALWAYS_COLLECT
+/* List of free objects to be verified as actually free on the
+ next collection. */
+struct free_object
+{
+ void *object;
+ struct free_object *next;
+};
+#endif
+
+/* The rest of the global variables. */
+struct globals
+{
+ /* The Nth element in this array is a page with objects of size 2^N.
+ If there are any pages with free objects, they will be at the
+ head of the list. NULL if there are no page-entries for this
+ object size. */
+ page_entry *pages[NUM_ORDERS];
+
+ /* The Nth element in this array is the last page with objects of
+ size 2^N. NULL if there are no page-entries for this object
+ size. */
+ page_entry *page_tails[NUM_ORDERS];
+
+ /* Lookup table for associating allocation pages with object addresses. */
+ page_table lookup;
+
+ /* The system's page size. */
+ size_t pagesize;
+ size_t lg_pagesize;
+
+ /* Bytes currently allocated. */
+ size_t allocated;
+
+ /* Bytes currently allocated at the end of the last collection. */
+ size_t allocated_last_gc;
+
+ /* Total amount of memory mapped. */
+ size_t bytes_mapped;
+
+ /* Bit N set if any allocations have been done at context depth N. */
+ unsigned long context_depth_allocations;
+
+ /* Bit N set if any collections have been done at context depth N. */
+ unsigned long context_depth_collections;
+
+ /* The current depth in the context stack. */
+ unsigned short context_depth;
+
+ /* A file descriptor open to /dev/zero for reading. */
+#if defined (HAVE_MMAP_DEV_ZERO)
+ int dev_zero_fd;
+#endif
+
+ /* A cache of free system pages. */
+ page_entry *free_pages;
+
+#ifdef USING_MALLOC_PAGE_GROUPS
+ page_group *page_groups;
+#endif
+
+ /* The file descriptor for debugging output. */
+ FILE *debug_file;
+
+ /* Current number of elements in use in depth below. */
+ unsigned int depth_in_use;
+
+ /* Maximum number of elements that can be used before resizing. */
+ unsigned int depth_max;
+
+ /* Each element of this array is an index in by_depth where the given
+ depth starts. This structure is indexed by that given depth we
+ are interested in. */
+ unsigned int *depth;
+
+ /* Current number of elements in use in by_depth below. */
+ unsigned int by_depth_in_use;
+
+ /* Maximum number of elements that can be used before resizing. */
+ unsigned int by_depth_max;
+
+ /* Each element of this array is a pointer to a page_entry, all
+ page_entries can be found in here by increasing depth.
+ index_by_depth in the page_entry is the index into this data
+ structure where that page_entry can be found. This is used to
+ speed up finding all page_entries at a particular depth. */
+ page_entry **by_depth;
+
+ /* Each element is a pointer to the saved in_use_p bits, if any,
+ zero otherwise. We allocate them all together, to enable a
+ better runtime data access pattern. */
+ unsigned long **save_in_use;
+
+#ifdef ENABLE_GC_ALWAYS_COLLECT
+ /* List of free objects to be verified as actually free on the
+ next collection. */
+ struct free_object *free_object_list;
+#endif
+
+ struct
+ {
+ /* Total GC-allocated memory. */
+ unsigned long long total_allocated;
+ /* Total overhead for GC-allocated memory. */
+ unsigned long long total_overhead;
+
+ /* Total allocations and overhead for sizes less than 32, 64 and 128.
+ These sizes are interesting because they are typical cache line
+ sizes. */
+
+ unsigned long long total_allocated_under32;
+ unsigned long long total_overhead_under32;
+
+ unsigned long long total_allocated_under64;
+ unsigned long long total_overhead_under64;
+
+ unsigned long long total_allocated_under128;
+ unsigned long long total_overhead_under128;
+
+ /* The allocations for each of the allocation orders. */
+ unsigned long long total_allocated_per_order[NUM_ORDERS];
+
+ /* The overhead for each of the allocation orders. */
+ unsigned long long total_overhead_per_order[NUM_ORDERS];
+ } stats;
+};
+
+/* Structures for saving and restoring GCable memory to disk. */
+
+struct ptr_data
+{
+ void *obj;
+ void *note_ptr_cookie;
+ gt_note_pointers note_ptr_fn;
+ gt_handle_reorder reorder_fn;
+ size_t size;
+ void *new_addr;
+};
+
+#define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
+
+/* Helper for hashing saving_htab. */
+
+struct saving_hasher : typed_free_remove <ptr_data>
+{
+ typedef ptr_data value_type;
+ typedef void compare_type;
+ static inline hashval_t hash (const value_type *);
+ static inline bool equal (const value_type *, const compare_type *);
+};
+
+inline hashval_t
+saving_hasher::hash (const value_type *p)
+{
+ return POINTER_HASH (p->obj);
+}
+
+inline bool
+saving_hasher::equal (const value_type *p1, const compare_type *p2)
+{
+ return p1->obj == p2;
+}
+
+/* Datastructure used to store per-call-site statistics. */
+struct loc_descriptor
+{
+ const char *file;
+ int line;
+ const char *function;
+ int times;
+ size_t allocated;
+ size_t overhead;
+ size_t freed;
+ size_t collected;
+};
+
+/* Hash table helper. */
+
+struct loc_desc_hasher : typed_noop_remove <loc_descriptor>
+{
+ typedef loc_descriptor value_type;
+ typedef loc_descriptor compare_type;
+ static inline hashval_t hash (const value_type *);
+ static inline bool equal (const value_type *, const compare_type *);
+};
+
+inline hashval_t
+loc_desc_hasher::hash (const value_type *d)
+{
+ return htab_hash_pointer (d->function) | d->line;
+}
+
+inline bool
+loc_desc_hasher::equal (const value_type *d, const compare_type *d2)
+{
+ return (d->file == d2->file && d->line == d2->line
+ && d->function == d2->function);
+}
+
+struct ptr_hash_entry
+{
+ void *ptr;
+ struct loc_descriptor *loc;
+ size_t size;
+};
+
+/* Helper for ptr_hash table. */
+
+struct ptr_hash_hasher : typed_noop_remove <ptr_hash_entry>
+{
+ typedef ptr_hash_entry value_type;
+ typedef void compare_type;
+ static inline hashval_t hash (const value_type *);
+ static inline bool equal (const value_type *, const compare_type *);
+};
+
+inline hashval_t
+ptr_hash_hasher::hash (const value_type *d)
+{
+ return htab_hash_pointer (d->ptr);
+}
+
+inline bool
+ptr_hash_hasher::equal (const value_type *p, const compare_type *p2)
+{
+ return (p->ptr == p2);
+}
+
+} // namespace ggc_impl
+
+class gc_heap_common
+{
+public:
+ /* When true, protect the contents of the identifier hash table. */
+ bool protect_identifiers_;
+
+protected:
+ void record_overhead (size_t, size_t, void * FINAL_MEM_STAT_DECL);
+
+private:
+ struct ggc_impl::loc_descriptor *
+ make_loc_descriptor (const char *name, int line, const char *function);
+
+protected:
+
+ /* Helper for hashing saving_htab. */
+ hash_table <ggc_impl::saving_hasher> saving_htab;
+
+ /* Hashtable used for statistics. */
+ hash_table <ggc_impl::loc_desc_hasher> loc_hash;
+
+ /* Hashtable converting address of allocated field to loc descriptor. */
+ hash_table <ggc_impl::ptr_hash_hasher> ptr_hash;
+
+}; // class gc_heap_common
+
+/* "Bag-of-pages" garbage collector. */
+
+class gc_heap_pages : public gc_heap_common
+{
+private:
+ typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
+ typedef const struct ggc_root_tab *const_ggc_root_tab_t;
+
+public:
+ void init ();
+
+ void collect ();
+
+ void register_root_tab (const struct ggc_root_tab* rt);
+ void register_cache_tab (const struct ggc_cache_tab* ct);
+
+ void print_statistics ();
+ void dump_ggc_loc_statistics (bool final);
+
+ DEBUG_FUNCTION void debug_print_page_list (int order) const;
+
+ size_t round_alloc_size (size_t requested_size) const;
+
+ void gt_ggc_m_S (const void *p);
+
+ int set_mark (const void *p);
+ int marked_p (const void *p) const;
+
+ /* Return the size of the gc-able object P. */
+ size_t get_size (const void *p) const;
+
+ /* Release the memory for object P. We prefix this with "gc_" to
+ make a clear distinction between this method and the ::free
+ function within method bodies. */
+ void gc_free (void *p);
+
+ void pch_count_object (struct ggc_pch_data *d, size_t size) const;
+ size_t pch_total_size (struct ggc_pch_data *d) const;
+ void pch_this_base (struct ggc_pch_data *d, void *base) const;
+ char * pch_alloc_object (struct ggc_pch_data *d, size_t size);
+ void pch_write_object (struct ggc_pch_data *d, FILE *f, void *x,
+ size_t size);
+ void pch_read (FILE *f, void *addr);
+
+ int
+ pch_note_object (void *obj, void *note_ptr_cookie,
+ gt_note_pointers note_ptr_fn);
+ void
+ pch_note_reorder (void *obj, void *note_ptr_cookie,
+ gt_handle_reorder reorder_fn);
+
+ void pch_save (FILE *f);
+
+private:
+ void scan_cache_tab (const_ggc_cache_tab_t ctp);
+ void mark_root_tab (const_ggc_root_tab_t rt);
+
+ void mark_roots ();
+
+ void print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
+ ggc_statistics *stats);
+
+ /* The following are implemented in ggc-page.c. */
+
+ void push_depth (unsigned int i);
+ void push_by_depth (ggc_impl::page_entry *p, unsigned long *s);
+ int allocated_p (const void *p) const;
+ ggc_impl::page_entry * lookup_page_table_entry (const void *p) const;
+ void set_page_table_entry (void *p, ggc_impl::page_entry *entry);
+ char * alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check);
+ struct ggc_impl::page_entry * alloc_page (unsigned order);
+ void adjust_depth ();
+ void free_page (ggc_impl::page_entry *entry);
+ void release_pages (void);
+ void round_alloc_size_1 (size_t requested_size,
+ size_t *size_order,
+ size_t *alloced_size) const;
+
+ void * internal_alloc_stat (size_t size MEM_STAT_DECL);
+ friend void * ggc_internal_alloc_stat (size_t size MEM_STAT_DECL);
+
+ void compute_inverse (unsigned order);
+ void recalculate_in_use_p (ggc_impl::page_entry *p);
+
+ void clear_marks ();
+ void sweep_pages ();
+ void poison_pages ();
+
+ void move_ptes_to_front (int count_old_page_tables,
+ int count_new_page_tables);
+
+/* Allocation and collection. */
+ static int prune_ptr (ggc_impl::ptr_hash_entry **slot, void *b);
+ void prune_overhead_list ();
+ void free_overhead (void *ptr);
+
+ static void relocate_ptrs (void *ptr_p, void *state_p);
+ void write_pch_globals (const struct ggc_root_tab * const *tab,
+ struct traversal_state *state);
+
+ /* Methods implemented in stringpool.c. */
+ void mark_stringpool ();
+ void purge_stringpool ();
+
+private:
+ /* When set, ggc_collect will do collection. */
+ bool force_collect_;
+
+ /* Statistics about the allocation. */
+ ggc_statistics *stats_;
+
+ /* This extra vector of dynamically registered root_tab-s is used by
+ ggc_mark_roots and gives the ability to dynamically add new GGC root
+ tables, for instance from some plugins; this vector is on the heap
+ since it is used by GGC internally. */
+ vec<const_ggc_root_tab_t> extra_root_vec;
+
+ /* This extra vector of dynamically registered cache_tab-s is used by
+ ggc_mark_roots and gives the ability to dynamically add new GGC cache
+ tables, for instance from some plugins; this vector is on the heap
+ since it is used by GGC internally. */
+ vec<const_ggc_cache_tab_t> extra_cache_vec;
+
+ /* The Ith entry is the number of objects on a page or order I. */
+
+ unsigned objects_per_page_table[NUM_ORDERS];
+
+ /* The Ith entry is the size of an object on a page of order I. */
+
+ size_t object_size_table[NUM_ORDERS];
+
+ /* The Ith entry is a pair of numbers (mult, shift) such that
+ ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
+ for all k evenly divisible by OBJECT_SIZE(I). */
+
+ struct inverse_table_d
+ {
+ size_t mult;
+ unsigned int shift;
+ }
+ inverse_table[NUM_ORDERS];
+
+ struct ggc_impl::globals G;
+}; // class gc_heap_pages
+
+typedef gc_heap_pages gc_heap;
+
+extern gc_heap the_heap;
+
#endif
@@ -54,35 +54,6 @@ along with GCC; see the file COPYING3. If not see
# define USING_MADVISE
#endif
-/* Strategy:
-
- This garbage-collecting allocator allocates objects on one of a set
- of pages. Each page can allocate objects of a single size only;
- available sizes are powers of two starting at four bytes. The size
- of an allocation request is rounded up to the next power of two
- (`order'), and satisfied from the appropriate page.
-
- Each page is recorded in a page-entry, which also maintains an
- in-use bitmap of object positions on the page. This allows the
- allocation state of a particular object to be flipped without
- touching the page itself.
-
- Each page-entry also has a context depth, which is used to track
- pushing and popping of allocation contexts. Only objects allocated
- in the current (highest-numbered) context may be collected.
-
- Page entries are arranged in an array of singly-linked lists. The
- array is indexed by the allocation size, in bits, of the pages on
- it; i.e. all pages on a list allocate objects of the same size.
- Pages are ordered on the list such that all non-full pages precede
- all full pages, with non-full pages arranged in order of decreasing
- context depth.
-
- Empty pages (of all orders) are kept on a single page cache list,
- and are considered first when new pages are required; they are
- deallocated at the start of the next collection if they haven't
- been recycled by then. */
-
/* Define GGC_DEBUG_LEVEL to print debugging information.
0: No debugging output.
1: GC statistics only.
@@ -91,373 +62,24 @@ along with GCC; see the file COPYING3. If not see
4: Object marks as well. */
#define GGC_DEBUG_LEVEL (0)
-#ifndef HOST_BITS_PER_PTR
-#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
-#endif
-
-
-/* A two-level tree is used to look up the page-entry for a given
- pointer. Two chunks of the pointer's bits are extracted to index
- the first and second levels of the tree, as follows:
-
- HOST_PAGE_SIZE_BITS
- 32 | |
- msb +----------------+----+------+------+ lsb
- | | |
- PAGE_L1_BITS |
- | |
- PAGE_L2_BITS
-
- The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
- pages are aligned on system page boundaries. The next most
- significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
- index values in the lookup table, respectively.
-
- For 32-bit architectures and the settings below, there are no
- leftover bits. For architectures with wider pointers, the lookup
- tree points to a list of pages, which must be scanned to find the
- correct one. */
-
-#define PAGE_L1_BITS (8)
-#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
-#define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
-#define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
-
-#define LOOKUP_L1(p) \
- (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
-
-#define LOOKUP_L2(p) \
- (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
-
-/* The number of objects per allocation page, for objects on a page of
- the indicated ORDER. */
-#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
-
-/* The number of objects in P. */
-#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
-
-/* The size of an object on a page of the indicated ORDER. */
-#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
-
-/* For speed, we avoid doing a general integer divide to locate the
- offset in the allocation bitmap, by precalculating numbers M, S
- such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
- within the page which is evenly divisible by the object size Z. */
-#define DIV_MULT(ORDER) inverse_table[ORDER].mult
-#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
-#define OFFSET_TO_BIT(OFFSET, ORDER) \
- (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
-
-/* We use this structure to determine the alignment required for
- allocations. For power-of-two sized allocations, that's not a
- problem, but it does matter for odd-sized allocations.
- We do not care about alignment for floating-point types. */
-
-struct max_alignment {
- char c;
- union {
- HOST_WIDEST_INT i;
- void *p;
- } u;
-};
-
-/* The biggest alignment required. */
-
-#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
-
-
-/* The number of extra orders, not corresponding to power-of-two sized
- objects. */
-
-#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
-
-#define RTL_SIZE(NSLOTS) \
- (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
-
-#define TREE_EXP_SIZE(OPS) \
- (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
-
-/* The Ith entry is the maximum size of an object to be stored in the
- Ith extra order. Adding a new entry to this array is the *only*
- thing you need to do to add a new special allocation size. */
-
-static const size_t extra_order_size_table[] = {
- /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
- There are a lot of structures with these sizes and explicitly
- listing them risks orders being dropped because they changed size. */
- MAX_ALIGNMENT * 3,
- MAX_ALIGNMENT * 5,
- MAX_ALIGNMENT * 6,
- MAX_ALIGNMENT * 7,
- MAX_ALIGNMENT * 9,
- MAX_ALIGNMENT * 10,
- MAX_ALIGNMENT * 11,
- MAX_ALIGNMENT * 12,
- MAX_ALIGNMENT * 13,
- MAX_ALIGNMENT * 14,
- MAX_ALIGNMENT * 15,
- sizeof (struct tree_decl_non_common),
- sizeof (struct tree_field_decl),
- sizeof (struct tree_parm_decl),
- sizeof (struct tree_var_decl),
- sizeof (struct tree_type_non_common),
- sizeof (struct function),
- sizeof (struct basic_block_def),
- sizeof (struct cgraph_node),
- sizeof (struct loop),
-};
-
-/* The total number of orders. */
-
-#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
-
-/* Compute the smallest nonnegative number which when added to X gives
- a multiple of F. */
-
-#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
-
-/* Compute the smallest multiple of F that is >= X. */
+gc_heap the_heap;
-#define ROUND_UP(x, f) (CEIL (x, f) * (f))
+using namespace ggc_impl;
-/* Round X to next multiple of the page size */
-
-#define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
-
-/* The Ith entry is the number of objects on a page or order I. */
-
-static unsigned objects_per_page_table[NUM_ORDERS];
-
-/* The Ith entry is the size of an object on a page of order I. */
-
-static size_t object_size_table[NUM_ORDERS];
-
-/* The Ith entry is a pair of numbers (mult, shift) such that
- ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
- for all k evenly divisible by OBJECT_SIZE(I). */
-
-static struct
+void
+gc_heap_pages::register_root_tab (const struct ggc_root_tab* rt)
{
- size_t mult;
- unsigned int shift;
+ if (rt)
+ extra_root_vec.safe_push (rt);
}
-inverse_table[NUM_ORDERS];
-
-/* A page_entry records the status of an allocation page. This
- structure is dynamically sized to fit the bitmap in_use_p. */
-typedef struct page_entry
-{
- /* The next page-entry with objects of the same size, or NULL if
- this is the last page-entry. */
- struct page_entry *next;
-
- /* The previous page-entry with objects of the same size, or NULL if
- this is the first page-entry. The PREV pointer exists solely to
- keep the cost of ggc_free manageable. */
- struct page_entry *prev;
-
- /* The number of bytes allocated. (This will always be a multiple
- of the host system page size.) */
- size_t bytes;
- /* The address at which the memory is allocated. */
- char *page;
-
-#ifdef USING_MALLOC_PAGE_GROUPS
- /* Back pointer to the page group this page came from. */
- struct page_group *group;
-#endif
-
- /* This is the index in the by_depth varray where this page table
- can be found. */
- unsigned long index_by_depth;
-
- /* Context depth of this page. */
- unsigned short context_depth;
-
- /* The number of free objects remaining on this page. */
- unsigned short num_free_objects;
-
- /* A likely candidate for the bit position of a free object for the
- next allocation from this page. */
- unsigned short next_bit_hint;
-
- /* The lg of size of objects allocated from this page. */
- unsigned char order;
-
- /* Discarded page? */
- bool discarded;
-
- /* A bit vector indicating whether or not objects are in use. The
- Nth bit is one if the Nth object on this page is allocated. This
- array is dynamically sized. */
- unsigned long in_use_p[1];
-} page_entry;
-
-#ifdef USING_MALLOC_PAGE_GROUPS
-/* A page_group describes a large allocation from malloc, from which
- we parcel out aligned pages. */
-typedef struct page_group
-{
- /* A linked list of all extant page groups. */
- struct page_group *next;
-
- /* The address we received from malloc. */
- char *allocation;
-
- /* The size of the block. */
- size_t alloc_size;
-
- /* A bitmask of pages in use. */
- unsigned int in_use;
-} page_group;
-#endif
-
-#if HOST_BITS_PER_PTR <= 32
-
-/* On 32-bit hosts, we use a two level page table, as pictured above. */
-typedef page_entry **page_table[PAGE_L1_SIZE];
-
-#else
-
-/* On 64-bit hosts, we use the same two level page tables plus a linked
- list that disambiguates the top 32-bits. There will almost always be
- exactly one entry in the list. */
-typedef struct page_table_chain
-{
- struct page_table_chain *next;
- size_t high_bits;
- page_entry **table[PAGE_L1_SIZE];
-} *page_table;
-
-#endif
-
-#ifdef ENABLE_GC_ALWAYS_COLLECT
-/* List of free objects to be verified as actually free on the
- next collection. */
-struct free_object
-{
- void *object;
- struct free_object *next;
-};
-#endif
-
-/* The rest of the global variables. */
-static struct globals
+void
+gc_heap_pages::register_cache_tab (const struct ggc_cache_tab* ct)
{
- /* The Nth element in this array is a page with objects of size 2^N.
- If there are any pages with free objects, they will be at the
- head of the list. NULL if there are no page-entries for this
- object size. */
- page_entry *pages[NUM_ORDERS];
-
- /* The Nth element in this array is the last page with objects of
- size 2^N. NULL if there are no page-entries for this object
- size. */
- page_entry *page_tails[NUM_ORDERS];
-
- /* Lookup table for associating allocation pages with object addresses. */
- page_table lookup;
-
- /* The system's page size. */
- size_t pagesize;
- size_t lg_pagesize;
-
- /* Bytes currently allocated. */
- size_t allocated;
-
- /* Bytes currently allocated at the end of the last collection. */
- size_t allocated_last_gc;
-
- /* Total amount of memory mapped. */
- size_t bytes_mapped;
-
- /* Bit N set if any allocations have been done at context depth N. */
- unsigned long context_depth_allocations;
-
- /* Bit N set if any collections have been done at context depth N. */
- unsigned long context_depth_collections;
-
- /* The current depth in the context stack. */
- unsigned short context_depth;
-
- /* A file descriptor open to /dev/zero for reading. */
-#if defined (HAVE_MMAP_DEV_ZERO)
- int dev_zero_fd;
-#endif
-
- /* A cache of free system pages. */
- page_entry *free_pages;
-
-#ifdef USING_MALLOC_PAGE_GROUPS
- page_group *page_groups;
-#endif
-
- /* The file descriptor for debugging output. */
- FILE *debug_file;
-
- /* Current number of elements in use in depth below. */
- unsigned int depth_in_use;
-
- /* Maximum number of elements that can be used before resizing. */
- unsigned int depth_max;
-
- /* Each element of this array is an index in by_depth where the given
- depth starts. This structure is indexed by that given depth we
- are interested in. */
- unsigned int *depth;
-
- /* Current number of elements in use in by_depth below. */
- unsigned int by_depth_in_use;
-
- /* Maximum number of elements that can be used before resizing. */
- unsigned int by_depth_max;
-
- /* Each element of this array is a pointer to a page_entry, all
- page_entries can be found in here by increasing depth.
- index_by_depth in the page_entry is the index into this data
- structure where that page_entry can be found. This is used to
- speed up finding all page_entries at a particular depth. */
- page_entry **by_depth;
-
- /* Each element is a pointer to the saved in_use_p bits, if any,
- zero otherwise. We allocate them all together, to enable a
- better runtime data access pattern. */
- unsigned long **save_in_use;
-
-#ifdef ENABLE_GC_ALWAYS_COLLECT
- /* List of free objects to be verified as actually free on the
- next collection. */
- struct free_object *free_object_list;
-#endif
-
- struct
- {
- /* Total GC-allocated memory. */
- unsigned long long total_allocated;
- /* Total overhead for GC-allocated memory. */
- unsigned long long total_overhead;
-
- /* Total allocations and overhead for sizes less than 32, 64 and 128.
- These sizes are interesting because they are typical cache line
- sizes. */
-
- unsigned long long total_allocated_under32;
- unsigned long long total_overhead_under32;
-
- unsigned long long total_allocated_under64;
- unsigned long long total_overhead_under64;
-
- unsigned long long total_allocated_under128;
- unsigned long long total_overhead_under128;
-
- /* The allocations for each of the allocation orders. */
- unsigned long long total_allocated_per_order[NUM_ORDERS];
+ if (ct)
+ extra_cache_vec.safe_push (ct);
+}
- /* The overhead for each of the allocation orders. */
- unsigned long long total_overhead_per_order[NUM_ORDERS];
- } stats;
-} G;
/* The size in bytes required to maintain a bitmap for the objects
on a page-entry. */
@@ -480,35 +102,18 @@ static struct globals
/* Initial guess as to how many page table entries we might need. */
#define INITIAL_PTE_COUNT 128
-static int ggc_allocated_p (const void *);
-static page_entry *lookup_page_table_entry (const void *);
-static void set_page_table_entry (void *, page_entry *);
-#ifdef USING_MMAP
-static char *alloc_anon (char *, size_t, bool check);
-#endif
#ifdef USING_MALLOC_PAGE_GROUPS
static size_t page_group_index (char *, char *);
static void set_page_group_in_use (page_group *, char *);
static void clear_page_group_in_use (page_group *, char *);
#endif
-static struct page_entry * alloc_page (unsigned);
-static void free_page (struct page_entry *);
-static void release_pages (void);
-static void clear_marks (void);
-static void sweep_pages (void);
-static void ggc_recalculate_in_use_p (page_entry *);
-static void compute_inverse (unsigned);
-static inline void adjust_depth (void);
-static void move_ptes_to_front (int, int);
void debug_print_page_list (int);
-static void push_depth (unsigned int);
-static void push_by_depth (page_entry *, unsigned long *);
/* Push an entry onto G.depth. */
-inline static void
-push_depth (unsigned int i)
+inline void
+gc_heap_pages::push_depth (unsigned int i)
{
if (G.depth_in_use >= G.depth_max)
{
@@ -520,8 +125,8 @@ push_depth (unsigned int i)
/* Push an entry onto G.by_depth and G.save_in_use. */
-inline static void
-push_by_depth (page_entry *p, unsigned long *s)
+inline void
+gc_heap_pages::push_by_depth (ggc_impl::page_entry *p, unsigned long *s)
{
if (G.by_depth_in_use >= G.by_depth_max)
{
@@ -547,8 +152,8 @@ push_by_depth (page_entry *p, unsigned long *s)
/* Returns nonzero if P was allocated in GC'able memory. */
-static inline int
-ggc_allocated_p (const void *p)
+inline int
+gc_heap_pages::allocated_p (const void *p) const
{
page_entry ***base;
size_t L1, L2;
@@ -579,8 +184,8 @@ ggc_allocated_p (const void *p)
/* Traverse the page table and find the entry for a page.
Die (probably) if the object wasn't allocated via GC. */
-static inline page_entry *
-lookup_page_table_entry (const void *p)
+page_entry *
+gc_heap_pages::lookup_page_table_entry (const void *p) const
{
page_entry ***base;
size_t L1, L2;
@@ -604,8 +209,8 @@ lookup_page_table_entry (const void *p)
/* Set the page table entry for a page. */
-static void
-set_page_table_entry (void *p, page_entry *entry)
+void
+gc_heap_pages::set_page_table_entry (void *p, page_entry *entry)
{
page_entry ***base;
size_t L1, L2;
@@ -643,6 +248,12 @@ found:
DEBUG_FUNCTION void
debug_print_page_list (int order)
{
+ the_heap.debug_print_page_list (order);
+}
+
+DEBUG_FUNCTION void
+gc_heap_pages::debug_print_page_list (int order) const
+{
page_entry *p;
printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
(void *) G.page_tails[order]);
@@ -662,7 +273,8 @@ debug_print_page_list (int order)
(if non-null). The ifdef structure here is intended to cause a
compile error unless exactly one of the HAVE_* is defined. */
-static inline char *
+inline char *
+gc_heap_pages::
alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
{
#ifdef HAVE_MMAP_ANON
@@ -721,8 +333,8 @@ clear_page_group_in_use (page_group *group, char *page)
and return an entry for it. The entry is not added to the
appropriate page_table list. */
-static inline struct page_entry *
-alloc_page (unsigned order)
+inline struct page_entry *
+gc_heap_pages::alloc_page (unsigned order)
{
struct page_entry *entry, *p, **pp;
char *page;
@@ -913,8 +525,8 @@ alloc_page (unsigned order)
/* Adjust the size of G.depth so that no index greater than the one
used by the top of the G.by_depth is used. */
-static inline void
-adjust_depth (void)
+inline void
+gc_heap_pages::adjust_depth ()
{
page_entry *top;
@@ -932,8 +544,8 @@ adjust_depth (void)
/* For a page that is no longer needed, put it on the free page list. */
-static void
-free_page (page_entry *entry)
+void
+gc_heap_pages::free_page (page_entry *entry)
{
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
@@ -974,8 +586,8 @@ free_page (page_entry *entry)
/* Release the free page cache to the system. */
-static void
-release_pages (void)
+void
+gc_heap_pages::release_pages (void)
{
#ifdef USING_MADVISE
page_entry *p, *start_p;
@@ -1162,10 +774,10 @@ static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
actual size that is going to be allocated, as well as the size
order. */
-static void
-ggc_round_alloc_size_1 (size_t requested_size,
- size_t *size_order,
- size_t *alloced_size)
+void
+gc_heap_pages::round_alloc_size_1 (size_t requested_size,
+ size_t *size_order,
+ size_t *alloced_size) const
{
size_t order, object_size;
@@ -1193,9 +805,15 @@ ggc_round_alloc_size_1 (size_t requested_size,
size_t
ggc_round_alloc_size (size_t requested_size)
{
+ return the_heap.round_alloc_size(requested_size);
+}
+
+size_t
+gc_heap_pages::round_alloc_size (size_t requested_size) const
+{
size_t size = 0;
-
- ggc_round_alloc_size_1 (requested_size, NULL, &size);
+
+ round_alloc_size_1 (requested_size, NULL, &size);
return size;
}
@@ -1204,11 +822,17 @@ ggc_round_alloc_size (size_t requested_size)
void *
ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
{
+ return the_heap.internal_alloc_stat (size PASS_MEM_STAT);
+}
+
+void *
+gc_heap_pages::internal_alloc_stat (size_t size MEM_STAT_DECL)
+{
size_t order, word, bit, object_offset, object_size;
struct page_entry *entry;
void *result;
- ggc_round_alloc_size_1 (size, &order, &object_size);
+ round_alloc_size_1 (size, &order, &object_size);
/* If there are non-full pages for this size allocation, they are at
the head of the list. */
@@ -1313,8 +937,8 @@ ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
/* Calculate the object's address. */
result = entry->page + object_offset;
if (GATHER_STATISTICS)
- ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
- result FINAL_PASS_MEM_STAT);
+ record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
+ result FINAL_PASS_MEM_STAT);
#ifdef ENABLE_GC_CHECKING
/* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
@@ -1385,12 +1009,18 @@ ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
void
gt_ggc_m_S (const void *p)
{
+ the_heap.gt_ggc_m_S (p);
+}
+
+void
+gc_heap_pages::gt_ggc_m_S (const void *p)
+{
page_entry *entry;
unsigned bit, word;
unsigned long mask;
unsigned long offset;
- if (!p || !ggc_allocated_p (p))
+ if (!p || !allocated_p (p))
return;
/* Look up the page on which the object is alloced. . */
@@ -1458,6 +1088,12 @@ gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
int
ggc_set_mark (const void *p)
{
+ return the_heap.set_mark (p);
+}
+
+int
+gc_heap_pages::set_mark (const void *p)
+{
page_entry *entry;
unsigned bit, word;
unsigned long mask;
@@ -1494,6 +1130,12 @@ ggc_set_mark (const void *p)
int
ggc_marked_p (const void *p)
{
+ return the_heap.marked_p (p);
+}
+
+int
+gc_heap_pages::marked_p (const void *p) const
+{
page_entry *entry;
unsigned bit, word;
unsigned long mask;
@@ -1517,6 +1159,12 @@ ggc_marked_p (const void *p)
size_t
ggc_get_size (const void *p)
{
+ return the_heap.get_size (p);
+}
+
+size_t
+gc_heap_pages::get_size (const void *p) const
+{
page_entry *pe = lookup_page_table_entry (p);
return OBJECT_SIZE (pe->order);
}
@@ -1526,12 +1174,18 @@ ggc_get_size (const void *p)
void
ggc_free (void *p)
{
+ the_heap.gc_free (p);
+}
+
+void
+gc_heap_pages::gc_free (void *p)
+{
page_entry *pe = lookup_page_table_entry (p);
size_t order = pe->order;
size_t size = OBJECT_SIZE (order);
if (GATHER_STATISTICS)
- ggc_free_overhead (p);
+ free_overhead (p);
if (GGC_DEBUG_LEVEL >= 3)
fprintf (G.debug_file,
@@ -1608,16 +1262,17 @@ ggc_free (void *p)
#endif
}
-/* Subroutine of init_ggc which computes the pair of numbers used to
- perform division by OBJECT_SIZE (order) and fills in inverse_table[].
+/* Subroutine of gc_heap_pages::init which computes the pair of numbers
+ used to perform division by OBJECT_SIZE (order) and fills in
+ inverse_table[].
This algorithm is taken from Granlund and Montgomery's paper
"Division by Invariant Integers using Multiplication"
(Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
constants). */
-static void
-compute_inverse (unsigned order)
+void
+gc_heap_pages::compute_inverse (unsigned order)
{
size_t size, inv;
unsigned int e;
@@ -1639,8 +1294,9 @@ compute_inverse (unsigned order)
}
/* Initialize the ggc-mmap allocator. */
+
void
-init_ggc (void)
+gc_heap_pages::init ()
{
unsigned order;
@@ -1737,8 +1393,8 @@ init_ggc (void)
/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
-static void
-ggc_recalculate_in_use_p (page_entry *p)
+void
+gc_heap_pages::recalculate_in_use_p (page_entry *p)
{
unsigned int i;
size_t num_objects;
@@ -1772,8 +1428,8 @@ ggc_recalculate_in_use_p (page_entry *p)
/* Unmark all objects. */
-static void
-clear_marks (void)
+void
+gc_heap_pages::clear_marks ()
{
unsigned order;
@@ -1814,8 +1470,8 @@ clear_marks (void)
/* Free all empty pages. Partially empty pages need no attention
because the `mark' bit doubles as an `unused' bit. */
-static void
-sweep_pages (void)
+void
+gc_heap_pages::sweep_pages ()
{
unsigned order;
@@ -1940,16 +1596,16 @@ sweep_pages (void)
other than the current one. */
for (p = G.pages[order]; p; p = p->next)
if (p->context_depth != G.context_depth)
- ggc_recalculate_in_use_p (p);
+ recalculate_in_use_p (p);
}
}
-#ifdef ENABLE_GC_CHECKING
/* Clobber all free objects. */
-static void
-poison_pages (void)
+void
+gc_heap_pages::poison_pages ()
{
+#ifdef ENABLE_GC_CHECKING
unsigned order;
for (order = 2; order < NUM_ORDERS; order++)
@@ -1993,10 +1649,8 @@ poison_pages (void)
}
}
}
-}
-#else
-#define poison_pages()
#endif
+}
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* Validate that the reportedly free objects actually are. */
@@ -2043,6 +1697,12 @@ validate_free_objects (void)
void
ggc_collect (void)
{
+ the_heap.collect ();
+}
+
+void
+gc_heap_pages::collect ()
+{
/* Avoid frequent unnecessary work by skipping collection if the
total allocations haven't expanded much since the last
collection. */
@@ -2051,7 +1711,7 @@ ggc_collect (void)
float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
- if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
+ if (G.allocated < allocated_last_gc + min_expand && !force_collect_)
return;
timevar_push (TV_GC);
@@ -2074,10 +1734,10 @@ ggc_collect (void)
invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
clear_marks ();
- ggc_mark_roots ();
+ mark_roots ();
if (GATHER_STATISTICS)
- ggc_prune_overhead_list ();
+ prune_overhead_list ();
poison_pages ();
validate_free_objects ();
@@ -2106,6 +1766,12 @@ ggc_collect (void)
void
ggc_print_statistics (void)
{
+ the_heap.print_statistics ();
+}
+
+void
+gc_heap_pages::print_statistics ()
+{
struct ggc_statistics stats;
unsigned int i;
size_t total_overhead = 0;
@@ -2117,7 +1783,7 @@ ggc_print_statistics (void)
G.allocated_last_gc = 0;
/* Collect and print the statistics common across collectors. */
- ggc_print_common_statistics (stderr, &stats);
+ print_common_statistics (stderr, &stats);
/* Release free pages so that we will not count the bytes allocated
there as part of the total allocated memory. */
@@ -2223,6 +1889,12 @@ void
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
size_t size, bool is_string ATTRIBUTE_UNUSED)
{
+ the_heap.pch_count_object (d, size);
+}
+
+void
+gc_heap_pages::pch_count_object (struct ggc_pch_data *d, size_t size) const
+{
unsigned order;
if (size < NUM_SIZE_LOOKUP)
@@ -2240,6 +1912,12 @@ ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
size_t
ggc_pch_total_size (struct ggc_pch_data *d)
{
+ return the_heap.pch_total_size (d);
+}
+
+size_t
+gc_heap_pages::pch_total_size (struct ggc_pch_data *d) const
+{
size_t a = 0;
unsigned i;
@@ -2251,6 +1929,12 @@ ggc_pch_total_size (struct ggc_pch_data *d)
void
ggc_pch_this_base (struct ggc_pch_data *d, void *base)
{
+ return the_heap.pch_this_base (d, base);
+}
+
+void
+gc_heap_pages::pch_this_base (struct ggc_pch_data *d, void *base) const
+{
uintptr_t a = (uintptr_t) base;
unsigned i;
@@ -2266,6 +1950,12 @@ char *
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
size_t size, bool is_string ATTRIBUTE_UNUSED)
{
+ return the_heap.pch_alloc_object (d, size);
+}
+
+char *
+gc_heap_pages::pch_alloc_object (struct ggc_pch_data *d, size_t size)
+{
unsigned order;
char *result;
@@ -2291,10 +1981,17 @@ ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
}
void
-ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
+ggc_pch_write_object (struct ggc_pch_data *d,
FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
size_t size, bool is_string ATTRIBUTE_UNUSED)
{
+ the_heap.pch_write_object (d, f, x, size);
+}
+
+void
+gc_heap_pages::pch_write_object (struct ggc_pch_data *d, FILE *f, void *x,
+ size_t size)
+{
unsigned order;
static const char emptyBytes[256] = { 0 };
@@ -2353,7 +2050,8 @@ ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
/* Move the PCH PTE entries just added to the end of by_depth, to the
front. */
-static void
+void
+gc_heap_pages::
move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
{
unsigned i;
@@ -2403,6 +2101,12 @@ move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
void
ggc_pch_read (FILE *f, void *addr)
{
+ the_heap.pch_read (f, addr);
+}
+
+void
+gc_heap_pages::pch_read (FILE *f, void *addr)
+{
struct ggc_pch_ondisk d;
unsigned i;
char *offs = (char *) addr;
@@ -120,15 +120,6 @@ extern void gt_ggc_m_S (const void *);
/* Initialize the string pool. */
extern void init_stringpool (void);
-/* Initialize the garbage collector. */
-extern void init_ggc (void);
-
-/* When true, identifier nodes are considered as GC roots. When
- false, identifier nodes are treated like any other GC-allocated
- object, and the identifier hash table is treated as a weak
- hash. */
-extern bool ggc_protect_identifiers;
-
/* Write out all GCed objects to F. */
extern void gt_pch_save (FILE *f);
@@ -177,7 +177,7 @@ maybe_delete_ident (struct cpp_reader *pfile ATTRIBUTE_UNUSED, hashnode h,
roots during one part of compilation. */
void
-ggc_mark_stringpool (void)
+gc_heap_pages::mark_stringpool ()
{
ht_forall (ident_hash, mark_ident, NULL);
}
@@ -186,7 +186,7 @@ ggc_mark_stringpool (void)
referenced. */
void
-ggc_purge_stringpool (void)
+gc_heap_pages::purge_stringpool ()
{
ht_purge (ident_hash, maybe_delete_ident, NULL);
}
@@ -47,6 +47,7 @@ along with GCC; see the file COPYING3. If not see
#include "basic-block.h"
#include "intl.h"
#include "ggc.h"
+#include "ggc-internal.h" /* for the_heap */
#include "regs.h"
#include "timevar.h"
#include "diagnostic.h"
@@ -552,7 +553,7 @@ compile_file (void)
if (flag_syntax_only || flag_wpa)
return;
- ggc_protect_identifiers = false;
+ the_heap.protect_identifiers_ = false;
/* This must also call finalize_compilation_unit. */
lang_hooks.decls.final_write_globals ();
@@ -1139,7 +1140,7 @@ general_init (const char *argv0)
/* Initialize the garbage-collector, string pools and tree type hash
table. */
- init_ggc ();
+ the_heap.init ();
init_stringpool ();
line_table = ggc_alloc_line_maps ();
linemap_init (line_table);
@@ -1859,7 +1860,7 @@ do_compile (void)
{
/* Initialize yet another pass. */
- ggc_protect_identifiers = true;
+ the_heap.protect_identifiers_ = true;
init_cgraph ();
init_final (main_input_filename);