From 9702a6ea0b60985f08ff28b0977c1dc46c25f27b Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 24 Jun 2015 13:42:52 +0200
Subject: [PATCH] Add new object_allocator.
gcc/c-family/ChangeLog:
2015-06-24 Martin Liska <mliska@suse.cz>
* c-format.c (static void check_format_info_main): Use
object_allocator instead of pool_allocator.
(check_format_arg): Likewise.
(check_format_info_main): Likewise.
gcc/ChangeLog:
2015-06-24 Martin Liska <mliska@suse.cz>
* alloc-pool.h (pool_allocator::initialize): Change to type-less
pool_allocator.
(pool_allocator::allocate): Likewise.
(pool_allocator::remove): Likewise.
* asan.c (struct asan_mem_ref): Likewise.
* cfg.c (initialize_original_copy_tables): Use object_allocator
instead of pool_allocator.
* cselib.c (struct elt_list): Change declaration of used
pool_allocator.
(new_cselib_val): Likewise.
* cselib.h (struct cselib_val): Change to type-less pool_allocator..
(struct elt_loc_list): Likewise.
* df-problems.c (df_chain_alloc): Use object_allocator instead of pool_allocator..
* df-scan.c (struct df_scan_problem_data): Likewise.
(df_scan_alloc): Likewise.
* df.h (struct dataflow): Likewise.
* dse.c (struct read_info_type): Change to type-less pool_allocator.
(struct insn_info_type): Likewise.
(struct dse_bb_info_type): Likewise.
(struct group_info): Likewise.
(struct deferred_change): Likewise.
* et-forest.c (struct et_occ): Likewise.
* et-forest.h (struct et_node): Likewise.
* ipa-cp.c: Use object_allocator instead of pool_allocator.
* ipa-inline-analysis.c: Likewise.
* ipa-profile.c: Likewise.
* ipa-prop.c: Likewise.
* ipa-prop.h: Likewise.
* ira-build.c (initiate_cost_vectors): Change to type-less pool_allocator.
(ira_allocate_cost_vector): Likewise.
* ira-color.c (struct update_cost_record): Use object_allocator instead of pool_allocator.
* lra-int.h (struct lra_live_range): Change to type-less pool_allocator.
(struct lra_copy): Likewise.
(struct lra_insn_reg): Likewise.
* lra-lives.c: Likewise.
* lra.c: Likewise.
* regcprop.c (struct queued_debug_insn_change): Use object_allocator instead of pool_allocator.
* sched-deps.c (sched_deps_init): Likewise.
* sel-sched-ir.c: Likewise.
* sel-sched-ir.h: Likewise.
* stmt.c (expand_case): Likewise.
(expand_sjlj_dispatch_table): Likewise.
* tree-sra.c (struct access): Change to type-less pool_allocator.
(struct assign_link): Likewise.
* tree-ssa-math-opts.c (pass_cse_reciprocals::execute): Use object_allocator instead of pool_allocator.
* tree-ssa-pre.c: Likewise.
* tree-ssa-reassoc.c: Likewise.
* tree-ssa-sccvn.c (allocate_vn_table): Likewise.
* tree-ssa-strlen.c: Likewise.
* tree-ssa-structalias.c: Likewise.
* var-tracking.c (onepart_pool_allocate): Change to type-less pool_allocator.
(unshare_variable): Likewise.
(variable_merge_over_cur): Likewise.
(variable_from_dropped): Likewise.
(variable_was_changed): Likewise.
(set_slot_part): Likewise.
(emit_notes_for_differences_1): Likewise.
---
gcc/alloc-pool.h | 176 +++++++++++++++++++++++++--------------------
gcc/asan.c | 8 +--
gcc/c-family/c-format.c | 7 +-
gcc/cfg.c | 5 +-
gcc/cselib.c | 17 +++--
gcc/cselib.h | 12 ++--
gcc/df-problems.c | 2 +-
gcc/df-scan.c | 24 +++----
gcc/df.h | 2 +-
gcc/dse.c | 50 +++++++------
gcc/et-forest.c | 11 ++-
gcc/et-forest.h | 6 +-
gcc/ipa-cp.c | 8 +--
gcc/ipa-inline-analysis.c | 2 +-
gcc/ipa-profile.c | 2 +-
gcc/ipa-prop.c | 2 +-
gcc/ipa-prop.h | 8 +--
gcc/ira-build.c | 18 ++---
gcc/ira-color.c | 8 +--
gcc/lra-int.h | 19 +++--
gcc/lra-lives.c | 3 +-
gcc/lra.c | 4 +-
gcc/regcprop.c | 4 +-
gcc/sched-deps.c | 8 +--
gcc/sel-sched-ir.c | 2 +-
gcc/sel-sched-ir.h | 2 +-
gcc/stmt.c | 7 +-
gcc/tree-sra.c | 16 ++---
gcc/tree-ssa-math-opts.c | 4 +-
gcc/tree-ssa-pre.c | 4 +-
gcc/tree-ssa-reassoc.c | 2 +-
gcc/tree-ssa-sccvn.c | 10 +--
gcc/tree-ssa-strlen.c | 3 +-
gcc/tree-ssa-structalias.c | 4 +-
gcc/var-tracking.c | 47 ++++++------
35 files changed, 270 insertions(+), 237 deletions(-)
@@ -25,6 +25,9 @@ extern void dump_alloc_pool_statistics (void);
typedef unsigned long ALLOC_POOL_ID_TYPE;
+/* Last used ID. */
+extern ALLOC_POOL_ID_TYPE last_id;
+
/* Pool allocator memory usage. */
struct pool_usage: public mem_usage
{
@@ -92,21 +95,18 @@ struct pool_usage: public mem_usage
extern mem_alloc_description<pool_usage> pool_allocator_usage;
-/* Type based memory pool allocator. */
-template <typename T>
+/* Generic pool allocator. */
class pool_allocator
{
public:
/* Default constructor for pool allocator called NAME. Each block
- has NUM elements. The allocator support EXTRA_SIZE and can
- potentially IGNORE_TYPE_SIZE. */
- pool_allocator (const char *name, size_t num, size_t extra_size = 0,
- bool ignore_type_size = false CXX_MEM_STAT_INFO);
+ has NUM elements. */
+ pool_allocator (const char *name, size_t num, size_t size CXX_MEM_STAT_INFO);
~pool_allocator ();
void release ();
void release_if_empty ();
- T *allocate () ATTRIBUTE_MALLOC;
- void remove (T *object);
+ void *allocate () ATTRIBUTE_MALLOC;
+ void remove (void *object);
private:
struct allocation_pool_list
@@ -117,7 +117,6 @@ private:
/* Initialize a pool allocator. */
void initialize ();
- template <typename U>
struct allocation_object
{
/* The ID of alloc pool which the object was allocated from. */
@@ -136,18 +135,18 @@ private:
int64_t align_i;
} u;
- static inline allocation_object<U> *
+ static inline allocation_object*
get_instance (void *data_ptr)
{
- return (allocation_object<U> *)(((char *)(data_ptr))
- - offsetof (allocation_object<U>,
+ return (allocation_object *)(((char *)(data_ptr))
+ - offsetof (allocation_object,
u.data));
}
- static inline U *
+ static inline void*
get_data (void *instance_ptr)
{
- return (U*)(((allocation_object<U> *) instance_ptr)->u.data);
+ return (void*)(((allocation_object *) instance_ptr)->u.data);
}
};
@@ -185,66 +184,33 @@ private:
size_t m_block_size;
/* Size of a pool elements in bytes. */
size_t m_elt_size;
- /* Flag if we shoul ignore size of a type. */
- bool m_ignore_type_size;
- /* Extra size in bytes that should be allocated for each element. */
- size_t m_extra_size;
+ /* Size in bytes that should be allocated for each element. */
+ size_t m_size;
/* Flag if a pool allocator is initialized. */
bool m_initialized;
/* Memory allocation location. */
mem_location m_location;
};
-/* Last used ID. */
-extern ALLOC_POOL_ID_TYPE last_id;
-
-/* Store information about each particular alloc_pool. Note that this
- will underestimate the amount the amount of storage used by a small amount:
- 1) The overhead in a pool is not accounted for.
- 2) The unallocated elements in a block are not accounted for. Note
- that this can at worst case be one element smaller that the block
- size for that pool. */
-struct alloc_pool_descriptor
-{
- /* Number of pools allocated. */
- unsigned long created;
- /* Gross allocated storage. */
- unsigned long allocated;
- /* Amount of currently active storage. */
- unsigned long current;
- /* Peak amount of storage used. */
- unsigned long peak;
- /* Size of element in the pool. */
- int elt_size;
-};
-
-
-/* Hashtable mapping alloc_pool names to descriptors. */
-extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
-
-template <typename T>
inline
-pool_allocator<T>::pool_allocator (const char *name, size_t num,
- size_t extra_size, bool ignore_type_size
- MEM_STAT_DECL):
+pool_allocator::pool_allocator (const char *name, size_t num,
+ size_t size MEM_STAT_DECL):
m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL),
m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
- m_block_size (0), m_ignore_type_size (ignore_type_size),
- m_extra_size (extra_size), m_initialized (false),
+ m_block_size (0), m_size (size), m_initialized (false),
m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
/* Initialize a pool allocator. */
-template <typename T>
-void
-pool_allocator<T>::initialize ()
+inline void
+pool_allocator::initialize ()
{
gcc_checking_assert (!m_initialized);
m_initialized = true;
size_t header_size;
- size_t size = (m_ignore_type_size ? 0 : sizeof (T)) + m_extra_size;
+ size_t size = m_size;
gcc_checking_assert (m_name);
@@ -256,7 +222,7 @@ pool_allocator<T>::initialize ()
size = align_eight (size);
/* Add the aligned size of ID. */
- size += offsetof (allocation_object<T>, u.data);
+ size += offsetof (allocation_object, u.data);
/* Um, we can't really allocate 0 elements per block. */
gcc_checking_assert (m_elts_per_block);
@@ -289,9 +255,8 @@ pool_allocator<T>::initialize ()
}
/* Free all memory allocated for the given memory pool. */
-template <typename T>
inline void
-pool_allocator<T>::release ()
+pool_allocator::release ()
{
if (!m_initialized)
return;
@@ -320,24 +285,21 @@ pool_allocator<T>::release ()
m_block_list = NULL;
}
-template <typename T>
void
-inline pool_allocator<T>::release_if_empty ()
+inline pool_allocator::release_if_empty ()
{
if (m_elts_free == m_elts_allocated)
release ();
}
-template <typename T>
-inline pool_allocator<T>::~pool_allocator ()
+inline pool_allocator::~pool_allocator ()
{
release ();
}
/* Allocates one element from the pool specified. */
-template <typename T>
-inline T *
-pool_allocator<T>::allocate ()
+inline void*
+pool_allocator::allocate ()
{
if (!m_initialized)
initialize ();
@@ -353,7 +315,7 @@ pool_allocator<T>::allocate ()
}
#ifdef ENABLE_VALGRIND_ANNOTATIONS
- size = m_elt_size - offsetof (allocation_object<T>, u.data);
+ size = m_elt_size - offsetof (allocation_object, u.data);
#endif
/* If there are no more free elements, make some more!. */
@@ -387,11 +349,11 @@ pool_allocator<T>::allocate ()
/* We now know that we can take the first elt off the virgin list and
put it on the returned list. */
block = m_virgin_free_list;
- header = (allocation_pool_list*) allocation_object<T>::get_data (block);
+ header = (allocation_pool_list*) allocation_object::get_data (block);
header->next = NULL;
#ifdef ENABLE_CHECKING
/* Mark the element to be free. */
- ((allocation_object<T> *) block)->id = 0;
+ ((allocation_object*) block)->id = 0;
#endif
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
m_returned_free_list = header;
@@ -408,36 +370,34 @@ pool_allocator<T>::allocate ()
#ifdef ENABLE_CHECKING
/* Set the ID for element. */
- allocation_object<T>::get_instance (header)->id = m_id;
+ allocation_object::get_instance (header)->id = m_id;
#endif
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
- /* Call default constructor. */
- return (T *)(header);
+ return (void *)(header);
}
/* Puts PTR back on POOL's free list. */
-template <typename T>
-void
-pool_allocator<T>::remove (T *object)
+inline void
+pool_allocator::remove (void *object)
{
gcc_checking_assert (m_initialized);
allocation_pool_list *header;
int size ATTRIBUTE_UNUSED;
- size = m_elt_size - offsetof (allocation_object<T>, u.data);
+ size = m_elt_size - offsetof (allocation_object, u.data);
#ifdef ENABLE_CHECKING
gcc_assert (object
/* Check if we free more than we allocated, which is Bad (TM). */
&& m_elts_free < m_elts_allocated
/* Check whether the PTR was allocated from POOL. */
- && m_id == allocation_object<T>::get_instance (object)->id);
+ && m_id == allocation_object::get_instance (object)->id);
memset (object, 0xaf, size);
/* Mark the element to be free. */
- allocation_object<T>::get_instance (object)->id = 0;
+ allocation_object::get_instance (object)->id = 0;
#endif
header = (allocation_pool_list*) object;
@@ -452,4 +412,66 @@ pool_allocator<T>::remove (T *object)
}
}
+/* Type based memory pool allocator. */
+template <typename T>
+class object_allocator
+{
+public:
+ /* Default constructor for pool allocator called NAME. Each block
+ has NUM elements. */
+ object_allocator (const char *name, size_t num CXX_MEM_STAT_INFO):
+ m_allocator (name, num, sizeof (T) PASS_MEM_STAT) {}
+
+ inline void
+ release ()
+ {
+ m_allocator.release ();
+ }
+
+ inline void release_if_empty ()
+ {
+ m_allocator.release_if_empty ();
+ }
+
+ inline T *
+ allocate () ATTRIBUTE_MALLOC
+ {
+ return ::new (m_allocator.allocate ()) T ();
+ }
+
+ inline void
+ remove (T *object)
+ {
+ m_allocator.remove (object);
+ }
+
+private:
+ pool_allocator m_allocator;
+};
+
+/* Store information about each particular alloc_pool. Note that this
+ will underestimate the amount the amount of storage used by a small amount:
+ 1) The overhead in a pool is not accounted for.
+ 2) The unallocated elements in a block are not accounted for. Note
+ that this can at worst case be one element smaller that the block
+ size for that pool. */
+struct alloc_pool_descriptor
+{
+ /* Number of pools allocated. */
+ unsigned long created;
+ /* Gross allocated storage. */
+ unsigned long allocated;
+ /* Amount of currently active storage. */
+ unsigned long current;
+ /* Peak amount of storage used. */
+ unsigned long peak;
+ /* Size of element in the pool. */
+ int elt_size;
+};
+
+
+/* Hashtable mapping alloc_pool names to descriptors. */
+extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
+
+
#endif
@@ -362,20 +362,20 @@ struct asan_mem_ref
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) asan_mem_ref ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((asan_mem_ref *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<asan_mem_ref> pool;
+ static pool_allocator pool;
};
-pool_allocator<asan_mem_ref> asan_mem_ref::pool ("asan_mem_ref", 10);
+pool_allocator asan_mem_ref::pool ("asan_mem_ref", 10, sizeof (asan_mem_ref));
/* Initializes an instance of asan_mem_ref. */
@@ -1026,7 +1026,7 @@ static void check_format_info_main (format_check_results *,
function_format_info *,
const char *, int, tree,
unsigned HOST_WIDE_INT,
- pool_allocator<format_wanted_type> &);
+ object_allocator<format_wanted_type> &);
static void init_dollar_format_checking (int, tree);
static int maybe_read_dollar_number (const char **, int,
@@ -1688,7 +1688,8 @@ check_format_arg (void *ctx, tree format_tree,
will decrement it if it finds there are extra arguments, but this way
need not adjust it for every return. */
res->number_other++;
- pool_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool", 10);
+ object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool",
+ 10);
check_format_info_main (res, info, format_chars, format_length,
params, arg_num, fwt_pool);
}
@@ -1706,7 +1707,7 @@ check_format_info_main (format_check_results *res,
function_format_info *info, const char *format_chars,
int format_length, tree params,
unsigned HOST_WIDE_INT arg_num,
- pool_allocator<format_wanted_type> &fwt_pool)
+ object_allocator <format_wanted_type> &fwt_pool)
{
const char *orig_format_chars = format_chars;
tree first_fillin_param = params;
@@ -1052,15 +1052,14 @@ static hash_table<bb_copy_hasher> *bb_copy;
/* And between loops and copies. */
static hash_table<bb_copy_hasher> *loop_copy;
-static pool_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
+static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
/* Initialize the data structures to maintain mapping between blocks
and its copies. */
void
initialize_original_copy_tables (void)
{
-
- original_copy_bb_pool = new pool_allocator<htab_bb_copy_original_entry>
+ original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
("original_copy", 10);
bb_original = new hash_table<bb_copy_hasher> (10);
bb_copy = new hash_table<bb_copy_hasher> (10);
@@ -54,17 +54,17 @@ struct elt_list
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) elt_list ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((elt_list *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<elt_list> pool;
+ static pool_allocator pool;
};
static bool cselib_record_memory;
@@ -267,12 +267,11 @@ static unsigned int cfa_base_preserved_regno = INVALID_REGNUM;
each time memory is invalidated. */
static cselib_val *first_containing_mem = &dummy_val;
-pool_allocator<elt_list> elt_list::pool ("elt_list", 10);
-pool_allocator<elt_loc_list> elt_loc_list::pool ("elt_loc_list", 10);
-pool_allocator<cselib_val> cselib_val::pool ("cselib_val_list", 10);
+pool_allocator elt_list::pool ("elt_list", 10, sizeof (elt_list));
+pool_allocator elt_loc_list::pool ("elt_loc_list", 10, sizeof (elt_loc_list));
+pool_allocator cselib_val::pool ("cselib_val_list", 10, sizeof (cselib_val));
-static pool_allocator<rtx_def> value_pool ("value", 100, RTX_CODE_SIZE (VALUE),
- true);
+static pool_allocator value_pool ("value", 100, RTX_CODE_SIZE (VALUE));
/* If nonnull, cselib will call this function before freeing useless
VALUEs. A VALUE is deemed useless if its "locs" field is null. */
@@ -1329,7 +1328,7 @@ new_cselib_val (unsigned int hash, machine_mode mode, rtx x)
precisely when we can have VALUE RTXen (when cselib is active)
so we don't need to put them in garbage collected memory.
??? Why should a VALUE be an RTX in the first place? */
- e->val_rtx = value_pool.allocate ();
+ e->val_rtx = (rtx_def*) value_pool.allocate ();
memset (e->val_rtx, 0, RTX_HDR_SIZE);
PUT_CODE (e->val_rtx, VALUE);
PUT_MODE (e->val_rtx, mode);
@@ -45,17 +45,17 @@ struct cselib_val
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) cselib_val ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((cselib_val *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<cselib_val> pool;
+ static pool_allocator pool;
};
/* A list of rtl expressions that hold the same value. */
@@ -70,17 +70,17 @@ struct elt_loc_list {
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) elt_loc_list ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((elt_loc_list *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<elt_loc_list> pool;
+ static pool_allocator pool;
};
/* Describe a single set that is part of an insn. */
@@ -2005,7 +2005,7 @@ static void
df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
df_chain_remove_problem ();
- df_chain->block_pool = new pool_allocator<df_link> ("df_chain_block pool",
+ df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool",
50);
df_chain->optional_p = true;
}
@@ -154,12 +154,12 @@ static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
/* Problem data for the scanning dataflow function. */
struct df_scan_problem_data
{
- pool_allocator<df_base_ref> *ref_base_pool;
- pool_allocator<df_artificial_ref> *ref_artificial_pool;
- pool_allocator<df_regular_ref> *ref_regular_pool;
- pool_allocator<df_insn_info> *insn_pool;
- pool_allocator<df_reg_info> *reg_pool;
- pool_allocator<df_mw_hardreg> *mw_reg_pool;
+ object_allocator<df_base_ref> *ref_base_pool;
+ object_allocator<df_artificial_ref> *ref_artificial_pool;
+ object_allocator<df_regular_ref> *ref_regular_pool;
+ object_allocator<df_insn_info> *insn_pool;
+ object_allocator<df_reg_info> *reg_pool;
+ object_allocator<df_mw_hardreg> *mw_reg_pool;
bitmap_obstack reg_bitmaps;
bitmap_obstack insn_bitmaps;
@@ -268,17 +268,17 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
df_scan->problem_data = problem_data;
df_scan->computed = true;
- problem_data->ref_base_pool = new pool_allocator<df_base_ref>
+ problem_data->ref_base_pool = new object_allocator<df_base_ref>
("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->ref_artificial_pool = new pool_allocator<df_artificial_ref>
+ problem_data->ref_artificial_pool = new object_allocator<df_artificial_ref>
("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->ref_regular_pool = new pool_allocator<df_regular_ref>
+ problem_data->ref_regular_pool = new object_allocator<df_regular_ref>
("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->insn_pool = new pool_allocator<df_insn_info>
+ problem_data->insn_pool = new object_allocator<df_insn_info>
("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->reg_pool = new pool_allocator<df_reg_info>
+ problem_data->reg_pool = new object_allocator<df_reg_info>
("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->mw_reg_pool = new pool_allocator<df_mw_hardreg>
+ problem_data->mw_reg_pool = new object_allocator<df_mw_hardreg>
("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
bitmap_obstack_initialize (&problem_data->reg_bitmaps);
@@ -300,7 +300,7 @@ struct dataflow
unsigned int block_info_size;
/* The pool to allocate the block_info from. */
- pool_allocator<df_link> *block_pool;
+ object_allocator<df_link> *block_pool;
/* The lr and live problems have their transfer functions recomputed
only if necessary. This is possible for them because, the
@@ -316,10 +316,10 @@ lowpart_bitmask (int n)
}
typedef struct store_info *store_info_t;
-static pool_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
+static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
100);
-static pool_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
+static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
100);
/* This structure holds information about a load. These are only
@@ -346,21 +346,22 @@ struct read_info_type
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) read_info_type ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((read_info_type *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<read_info_type> pool;
+ static pool_allocator pool;
};
typedef struct read_info_type *read_info_t;
-pool_allocator<read_info_type> read_info_type::pool ("read_info_pool", 100);
+pool_allocator read_info_type::pool ("read_info_pool", 100,
+ sizeof (read_info_type));
/* One of these records is created for each insn. */
@@ -450,21 +451,22 @@ struct insn_info_type
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) insn_info_type ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((insn_info_type *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<insn_info_type> pool;
+ static pool_allocator pool;
};
typedef struct insn_info_type *insn_info_t;
-pool_allocator<insn_info_type> insn_info_type::pool ("insn_info_pool", 100);
+pool_allocator insn_info_type::pool ("insn_info_pool", 100,
+ sizeof (insn_info_type));
/* The linked list of stores that are under consideration in this
basic block. */
@@ -530,21 +532,22 @@ struct dse_bb_info_type
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) dse_bb_info_type ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((dse_bb_info_type *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<dse_bb_info_type> pool;
+ static pool_allocator pool;
};
typedef struct dse_bb_info_type *bb_info_t;
-pool_allocator<dse_bb_info_type> dse_bb_info_type::pool ("bb_info_pool", 100);
+pool_allocator dse_bb_info_type::pool ("bb_info_pool", 100,
+ sizeof (dse_bb_info_type));
/* Table to hold all bb_infos. */
static bb_info_t *bb_table;
@@ -616,22 +619,23 @@ struct group_info
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) group_info ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((group_info *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<group_info> pool;
+ static pool_allocator pool;
};
typedef struct group_info *group_info_t;
typedef const struct group_info *const_group_info_t;
-pool_allocator<group_info> group_info::pool ("rtx_group_info_pool", 100);
+pool_allocator group_info::pool ("rtx_group_info_pool", 100,
+ sizeof (group_info));
/* Index into the rtx_group_vec. */
static int rtx_group_next_id;
@@ -656,23 +660,23 @@ struct deferred_change
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) deferred_change ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((deferred_change *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<deferred_change> pool;
+ static pool_allocator pool;
};
typedef struct deferred_change *deferred_change_t;
-pool_allocator<deferred_change> deferred_change::pool
- ("deferred_change_pool", 10);
+pool_allocator deferred_change::pool
+ ("deferred_change_pool", 10, sizeof (deferred_change));
static deferred_change_t deferred_change_list = NULL;
@@ -58,22 +58,21 @@ struct et_occ
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) et_occ ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((et_occ *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<et_occ> pool;
-
+ static pool_allocator pool;
};
-pool_allocator<et_node> et_node::pool ("et_nodes pool", 300);
-pool_allocator<et_occ> et_occ::pool ("et_occ pool", 300);
+pool_allocator et_node::pool ("et_nodes pool", 300, sizeof (et_node));
+pool_allocator et_occ::pool ("et_occ pool", 300, sizeof (et_occ));
/* Changes depth of OCC to D. */
@@ -70,17 +70,17 @@ struct et_node
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) et_node ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((et_node *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<et_node> pool;
+ static pool_allocator pool;
};
struct et_node *et_new_tree (void *data);
@@ -281,16 +281,16 @@ public:
/* Allocation pools for values and their sources in ipa-cp. */
-pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool
+object_allocator<ipcp_value<tree> > ipcp_cst_values_pool
("IPA-CP constant values", 32);
-pool_allocator<ipcp_value<ipa_polymorphic_call_context> >
+object_allocator<ipcp_value<ipa_polymorphic_call_context> >
ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts", 32);
-pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool
+object_allocator<ipcp_value_source<tree> > ipcp_sources_pool
("IPA-CP value sources", 64);
-pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
+object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
("IPA_CP aggregate lattices", 32);
/* Maximal count found in program. */
@@ -159,7 +159,7 @@ vec<inline_edge_summary_t> inline_edge_summary_vec;
vec<edge_growth_cache_entry> edge_growth_cache;
/* Edge predicates goes here. */
-static pool_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
+static object_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
/* Return true predicate (tautology).
We represent it by empty list of clauses. */
@@ -97,7 +97,7 @@ struct histogram_entry
duplicate entries. */
vec<histogram_entry *> histogram;
-static pool_allocator<histogram_entry> histogram_pool
+static object_allocator<histogram_entry> histogram_pool
("IPA histogram", 10);
/* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR. */
@@ -163,7 +163,7 @@ struct ipa_cst_ref_desc
/* Allocation pool for reference descriptions. */
-static pool_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
+static object_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
("IPA-PROP ref descriptions", 32);
/* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated
@@ -598,18 +598,18 @@ void ipcp_verify_propagated_values (void);
template <typename value>
class ipcp_value;
-extern pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool;
-extern pool_allocator<ipcp_value<ipa_polymorphic_call_context> >
+extern object_allocator<ipcp_value<tree> > ipcp_cst_values_pool;
+extern object_allocator<ipcp_value<ipa_polymorphic_call_context> >
ipcp_poly_ctx_values_pool;
template <typename valtype>
class ipcp_value_source;
-extern pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
+extern object_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
class ipcp_agg_lattice;
-extern pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
+extern object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
/* Operation to be performed for the parameter in ipa_parm_adjustment
below. */
@@ -423,9 +423,9 @@ rebuild_regno_allocno_maps (void)
/* Pools for allocnos, allocno live ranges and objects. */
-static pool_allocator<live_range> live_range_pool ("live ranges", 100);
-static pool_allocator<ira_allocno> allocno_pool ("allocnos", 100);
-static pool_allocator<ira_object> object_pool ("objects", 100);
+static object_allocator<live_range> live_range_pool ("live ranges", 100);
+static object_allocator<ira_allocno> allocno_pool ("allocnos", 100);
+static object_allocator<ira_object> object_pool ("objects", 100);
/* Vec containing references to all created allocnos. It is a
container of array allocnos. */
@@ -1173,7 +1173,7 @@ finish_allocnos (void)
/* Pools for allocno preferences. */
-static pool_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
+static object_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
/* Vec containing references to all created preferences. It is a
container of array ira_prefs. */
@@ -1360,7 +1360,7 @@ finish_prefs (void)
/* Pools for copies. */
-static pool_allocator<ira_allocno_copy> copy_pool ("copies", 100);
+static object_allocator<ira_allocno_copy> copy_pool ("copies", 100);
/* Vec containing references to all created copies. It is a
container of array ira_copies. */
@@ -1619,7 +1619,7 @@ finish_copies (void)
/* Pools for cost vectors. It is defined only for allocno classes. */
-static pool_allocator<int> * cost_vector_pool[N_REG_CLASSES];
+static pool_allocator *cost_vector_pool[N_REG_CLASSES];
/* The function initiates work with hard register cost vectors. It
creates allocation pool for each allocno class. */
@@ -1632,9 +1632,9 @@ initiate_cost_vectors (void)
for (i = 0; i < ira_allocno_classes_num; i++)
{
aclass = ira_allocno_classes[i];
- cost_vector_pool[aclass] = new pool_allocator<int>
+ cost_vector_pool[aclass] = new pool_allocator
("cost vectors", 100,
- sizeof (int) * (ira_class_hard_regs_num[aclass] - 1));
+ sizeof (int) * (ira_class_hard_regs_num[aclass]));
}
}
@@ -1642,7 +1642,7 @@ initiate_cost_vectors (void)
int *
ira_allocate_cost_vector (reg_class_t aclass)
{
- return cost_vector_pool[(int) aclass]->allocate ();
+ return (int*) cost_vector_pool[(int) aclass]->allocate ();
}
/* Free a cost vector VEC for ACLASS. */
@@ -115,17 +115,17 @@ struct update_cost_record
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) update_cost_record ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((update_cost_record *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<update_cost_record> pool;
+ static pool_allocator pool;
};
/* To decrease footprint of ira_allocno structure we store all data
@@ -1169,7 +1169,7 @@ setup_profitable_hard_regs (void)
allocnos. */
/* Pool for update cost records. */
-static pool_allocator<update_cost_record> update_cost_record_pool
+static object_allocator<update_cost_record> update_cost_record_pool
("update cost records", 100);
/* Return new update cost record with given params. */
@@ -58,17 +58,17 @@ struct lra_live_range
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) lra_live_range ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((lra_live_range *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<lra_live_range> pool;
+ static pool_allocator pool;
};
typedef struct lra_copy *lra_copy_t;
@@ -88,18 +88,17 @@ struct lra_copy
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) lra_copy ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((lra_copy *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<lra_copy> pool;
-
+ static pool_allocator pool;
};
/* Common info about a register (pseudo or hard register). */
@@ -211,17 +210,17 @@ struct lra_insn_reg
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) lra_insn_reg ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((lra_insn_reg *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<lra_insn_reg> pool;
+ static pool_allocator pool;
};
/* Static part (common info for insns with the same ICODE) of LRA
@@ -110,7 +110,8 @@ static sparseset unused_set, dead_set;
static bitmap_head temp_bitmap;
/* Pool for pseudo live ranges. */
-pool_allocator <lra_live_range> lra_live_range::pool ("live ranges", 100);
+pool_allocator lra_live_range::pool ("live ranges", 100,
+ sizeof (lra_live_range));
/* Free live range list LR. */
static void
@@ -537,7 +537,7 @@ lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
insns. */
/* Pools for insn reg info. */
-pool_allocator<lra_insn_reg> lra_insn_reg::pool ("insn regs", 100);
+pool_allocator lra_insn_reg::pool ("insn regs", 100, sizeof (lra_insn_reg));
/* Create LRA insn related info about a reference to REGNO in INSN with
TYPE (in/out/inout), biggest reference mode MODE, flag that it is
@@ -1281,7 +1281,7 @@ get_new_reg_value (void)
}
/* Pools for copies. */
-pool_allocator<lra_copy> lra_copy::pool ("lra copies", 100);
+pool_allocator lra_copy::pool ("lra copies", 100, sizeof (lra_copy));
/* Vec referring to pseudo copies. */
static vec<lra_copy_t> copy_vec;
@@ -72,7 +72,7 @@ struct queued_debug_insn_change
}
/* Memory allocation pool. */
- static pool_allocator<queued_debug_insn_change> pool;
+ static object_allocator<queued_debug_insn_change> pool;
};
/* For each register, we have a list of registers that contain the same
@@ -96,7 +96,7 @@ struct value_data
unsigned int n_debug_insn_changes;
};
-pool_allocator<queued_debug_insn_change> queued_debug_insn_change::pool
+object_allocator<queued_debug_insn_change> queued_debug_insn_change::pool
("debug insn changes pool", 256);
static bool skip_debug_insn_p;
@@ -327,7 +327,7 @@ dep_link_is_detached_p (dep_link_t link)
}
/* Pool to hold all dependency nodes (dep_node_t). */
-static pool_allocator<_dep_node> *dn_pool;
+static object_allocator<_dep_node> *dn_pool;
/* Number of dep_nodes out there. */
static int dn_pool_diff = 0;
@@ -368,7 +368,7 @@ delete_dep_node (dep_node_t n)
}
/* Pool to hold dependencies lists (deps_list_t). */
-static pool_allocator<_deps_list> *dl_pool;
+static object_allocator<_deps_list> *dl_pool;
/* Number of deps_lists out there. */
static int dl_pool_diff = 0;
@@ -4065,10 +4065,10 @@ sched_deps_init (bool global_p)
if (global_p)
{
- dl_pool = new pool_allocator<_deps_list> ("deps_list",
+ dl_pool = new object_allocator<_deps_list> ("deps_list",
/* Allocate lists for one block at a time. */
insns_in_block);
- dn_pool = new pool_allocator<_dep_node> ("dep_node",
+ dn_pool = new object_allocator<_dep_node> ("dep_node",
/* Allocate nodes for one block at a time.
We assume that average insn has
5 producers. */
@@ -62,7 +62,7 @@ vec<sel_region_bb_info_def>
sel_region_bb_info = vNULL;
/* A pool for allocating all lists. */
-pool_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
+object_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
/* This contains information about successors for compute_av_set. */
struct succs_info current_succs;
@@ -363,7 +363,7 @@ struct _list_node
/* _list_t functions.
All of _*list_* functions are used through accessor macros, thus
we can't move them in sel-sched-ir.c. */
-extern pool_allocator<_list_node> sched_lists_pool;
+extern object_allocator<_list_node> sched_lists_pool;
static inline _list_t
_list_alloc (void)
@@ -736,7 +736,8 @@ do_jump_if_equal (machine_mode mode, rtx op0, rtx op1, rtx_code_label *label,
static struct case_node *
add_case_node (struct case_node *head, tree low, tree high,
- tree label, int prob, pool_allocator<case_node> &case_node_pool)
+ tree label, int prob,
+ object_allocator<case_node> &case_node_pool)
{
struct case_node *r;
@@ -1148,7 +1149,7 @@ expand_case (gswitch *stmt)
struct case_node *case_list = 0;
/* A pool for case nodes. */
- pool_allocator<case_node> case_node_pool ("struct case_node pool", 100);
+ object_allocator<case_node> case_node_pool ("struct case_node pool", 100);
/* An ERROR_MARK occurs for various reasons including invalid data type.
??? Can this still happen, with GIMPLE and all? */
@@ -1324,7 +1325,7 @@ expand_sjlj_dispatch_table (rtx dispatch_index,
{
/* Similar to expand_case, but much simpler. */
struct case_node *case_list = 0;
- pool_allocator<case_node> case_node_pool ("struct sjlj_case pool",
+ object_allocator<case_node> case_node_pool ("struct sjlj_case pool",
ncases);
tree index_expr = make_tree (index_type, dispatch_index);
tree minval = build_int_cst (index_type, 0);
@@ -290,24 +290,24 @@ struct access
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) access ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((access *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<access> pool;
+ static pool_allocator pool;
};
typedef struct access *access_p;
/* Alloc pool for allocating access structures. */
-pool_allocator<struct access> access::pool ("SRA accesses", 16);
+pool_allocator access::pool ("SRA accesses", 16, sizeof (access));
/* A structure linking lhs and rhs accesses from an aggregate assignment. They
are used to propagate subaccesses from rhs to lhs as long as they don't
@@ -320,21 +320,21 @@ struct assign_link
/* Pool allocation new operator. */
inline void *operator new (size_t)
{
- return pool.allocate ();
+ return ::new (pool.allocate ()) assign_link ();
}
/* Delete operator utilizing pool allocation. */
inline void operator delete (void *ptr)
{
- pool.remove ((assign_link *) ptr);
+ pool.remove (ptr);
}
/* Memory allocation pool. */
- static pool_allocator<assign_link> pool;
+ static pool_allocator pool;
};
/* Alloc pool for allocating assign link structures. */
-pool_allocator<assign_link> assign_link::pool ("SRA links", 16);
+pool_allocator assign_link::pool ("SRA links", 16, sizeof (assign_link));
/* Base (tree) -> Vector (vec<access_p> *) map. */
static hash_map<tree, auto_vec<access_p> > *base_access_vec;
@@ -217,7 +217,7 @@ static struct
static struct occurrence *occ_head;
/* Allocation pool for getting instances of "struct occurrence". */
-static pool_allocator<occurrence> *occ_pool;
+static object_allocator<occurrence> *occ_pool;
@@ -560,7 +560,7 @@ pass_cse_reciprocals::execute (function *fun)
basic_block bb;
tree arg;
- occ_pool = new pool_allocator<occurrence>
+ occ_pool = new object_allocator<occurrence>
("dominators for recip", n_basic_blocks_for_fn (fun) / 3 + 1);
memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
@@ -366,7 +366,7 @@ clear_expression_ids (void)
expressions.release ();
}
-static pool_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
+static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
/* Given an SSA_NAME NAME, get or create a pre_expr to represent it. */
@@ -505,7 +505,7 @@ static unsigned int get_expr_value_id (pre_expr);
/* We can add and remove elements and entries to and from sets
and hash tables, so we use alloc pools for them. */
-static pool_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
+static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
static bitmap_obstack grand_bitmap_obstack;
/* Set of blocks with statements that have had their EH properties changed. */
@@ -221,7 +221,7 @@ typedef struct operand_entry
unsigned int count;
} *operand_entry_t;
-static pool_allocator<operand_entry> operand_entry_pool ("operand entry pool",
+static object_allocator<operand_entry> operand_entry_pool ("operand entry pool",
30);
/* This is used to assign a unique ID to each struct operand_entry
@@ -280,8 +280,8 @@ typedef struct vn_tables_s
vn_phi_table_type *phis;
vn_reference_table_type *references;
struct obstack nary_obstack;
- pool_allocator<vn_phi_s> *phis_pool;
- pool_allocator<vn_reference_s> *references_pool;
+ object_allocator<vn_phi_s> *phis_pool;
+ object_allocator<vn_reference_s> *references_pool;
} *vn_tables_t;
@@ -4147,9 +4147,9 @@ allocate_vn_table (vn_tables_t table)
table->references = new vn_reference_table_type (23);
gcc_obstack_init (&table->nary_obstack);
- table->phis_pool = new pool_allocator<vn_phi_s> ("VN phis", 30);
- table->references_pool = new pool_allocator<vn_reference_s> ("VN references",
- 30);
+ table->phis_pool = new object_allocator<vn_phi_s> ("VN phis", 30);
+ table->references_pool = new object_allocator<vn_reference_s>
+ ("VN references", 30);
}
/* Free a value number table. */
@@ -128,7 +128,8 @@ typedef struct strinfo_struct
} *strinfo;
/* Pool for allocating strinfo_struct entries. */
-static pool_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool", 64);
+static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool",
+ 64);
/* Vector mapping positive string indexes to strinfo, for the
current basic block. The first pointer in the vector is special,
@@ -340,7 +340,7 @@ static varinfo_t lookup_vi_for_tree (tree);
static inline bool type_can_have_subvars (const_tree);
/* Pool of variable info structures. */
-static pool_allocator<variable_info> variable_info_pool
+static object_allocator<variable_info> variable_info_pool
("Variable info pool", 30);
/* Map varinfo to final pt_solution. */
@@ -541,7 +541,7 @@ struct constraint
/* List of constraints that we use to build the constraint graph from. */
static vec<constraint_t> constraints;
-static pool_allocator<constraint> constraint_pool ("Constraint pool", 30);
+static object_allocator<constraint> constraint_pool ("Constraint pool", 30);
/* The constraint graph is represented as an array of bitmaps
containing successor nodes. */
@@ -283,7 +283,7 @@ typedef struct attrs_def
}
/* Memory allocation pool. */
- static pool_allocator<attrs_def> pool;
+ static object_allocator<attrs_def> pool;
} *attrs;
/* Structure for chaining the locations. */
@@ -314,7 +314,7 @@ typedef struct location_chain_def
}
/* Memory allocation pool. */
- static pool_allocator<location_chain_def> pool;
+ static object_allocator<location_chain_def> pool;
} *location_chain;
/* A vector of loc_exp_dep holds the active dependencies of a one-part
@@ -346,7 +346,7 @@ typedef struct loc_exp_dep_s
}
/* Memory allocation pool. */
- static pool_allocator<loc_exp_dep_s> pool;
+ static object_allocator<loc_exp_dep_s> pool;
} loc_exp_dep;
@@ -600,7 +600,7 @@ typedef struct shared_hash_def
}
/* Memory allocation pool. */
- static pool_allocator<shared_hash_def> pool;
+ static object_allocator<shared_hash_def> pool;
} *shared_hash;
/* Structure holding the IN or OUT set for a basic block. */
@@ -645,28 +645,28 @@ typedef struct variable_tracking_info_def
} *variable_tracking_info;
/* Alloc pool for struct attrs_def. */
-pool_allocator<attrs_def> attrs_def::pool ("attrs_def pool", 1024);
+object_allocator<attrs_def> attrs_def::pool ("attrs_def pool", 1024);
/* Alloc pool for struct variable_def with MAX_VAR_PARTS entries. */
-static pool_allocator<variable_def> var_pool
- ("variable_def pool", 64,
+static pool_allocator var_pool
+ ("variable_def pool", 64, sizeof (variable_def) +
(MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
/* Alloc pool for struct variable_def with a single var_part entry. */
-static pool_allocator<variable_def> valvar_pool
- ("small variable_def pool", 256);
+static pool_allocator valvar_pool
+ ("small variable_def pool", 256, sizeof (variable_def));
/* Alloc pool for struct location_chain_def. */
-pool_allocator<location_chain_def> location_chain_def::pool
+object_allocator<location_chain_def> location_chain_def::pool
("location_chain_def pool", 1024);
/* Alloc pool for struct shared_hash_def. */
-pool_allocator<shared_hash_def> shared_hash_def::pool
+object_allocator<shared_hash_def> shared_hash_def::pool
("shared_hash_def pool", 256);
/* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables. */
-pool_allocator<loc_exp_dep> loc_exp_dep::pool ("loc_exp_dep pool", 64);
+object_allocator<loc_exp_dep> loc_exp_dep::pool ("loc_exp_dep pool", 64);
/* Changed variables, notes will be emitted for them. */
static variable_table_type *changed_variables;
@@ -1427,12 +1427,19 @@ dv_onepart_p (decl_or_value dv)
}
/* Return the variable pool to be used for a dv of type ONEPART. */
-static inline pool_allocator <variable_def> &
+static inline pool_allocator &
onepart_pool (onepart_enum_t onepart)
{
return onepart ? valvar_pool : var_pool;
}
+/* Allocate a variable_def from the corresponding variable pool. */
+static inline variable_def *
+onepart_pool_allocate (onepart_enum_t onepart)
+{
+ return (variable_def*) onepart_pool (onepart).allocate ();
+}
+
/* Build a decl_or_value out of a decl. */
static inline decl_or_value
dv_from_decl (tree decl)
@@ -1787,7 +1794,7 @@ unshare_variable (dataflow_set *set, variable_def **slot, variable var,
variable new_var;
int i;
- new_var = onepart_pool (var->onepart).allocate ();
+ new_var = onepart_pool_allocate (var->onepart);
new_var->dv = var->dv;
new_var->refcount = 1;
var->refcount--;
@@ -4065,7 +4072,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
{
if (node)
{
- dvar = onepart_pool (onepart).allocate ();
+ dvar = onepart_pool_allocate (onepart);
dvar->dv = dv;
dvar->refcount = 1;
dvar->n_var_parts = 1;
@@ -4201,7 +4208,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
INSERT);
if (!*slot)
{
- variable var = onepart_pool (ONEPART_VALUE).allocate ();
+ variable var = onepart_pool_allocate (ONEPART_VALUE);
var->dv = dv;
var->refcount = 1;
var->n_var_parts = 1;
@@ -7350,7 +7357,7 @@ variable_from_dropped (decl_or_value dv, enum insert_option insert)
gcc_checking_assert (onepart == ONEPART_VALUE || onepart == ONEPART_DEXPR);
- empty_var = onepart_pool (onepart).allocate ();
+ empty_var = onepart_pool_allocate (onepart);
empty_var->dv = dv;
empty_var->refcount = 1;
empty_var->n_var_parts = 0;
@@ -7454,7 +7461,7 @@ variable_was_changed (variable var, dataflow_set *set)
if (!empty_var)
{
- empty_var = onepart_pool (onepart).allocate ();
+ empty_var = onepart_pool_allocate (onepart);
empty_var->dv = var->dv;
empty_var->refcount = 1;
empty_var->n_var_parts = 0;
@@ -7578,7 +7585,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
if (!var)
{
/* Create new variable information. */
- var = onepart_pool (onepart).allocate ();
+ var = onepart_pool_allocate (onepart);
var->dv = dv;
var->refcount = 1;
var->n_var_parts = 1;
@@ -9058,7 +9065,7 @@ emit_notes_for_differences_1 (variable_def **slot, variable_table_type *new_vars
if (!empty_var)
{
- empty_var = onepart_pool (old_var->onepart).allocate ();
+ empty_var = onepart_pool_allocate (old_var->onepart);
empty_var->dv = old_var->dv;
empty_var->refcount = 0;
empty_var->n_var_parts = 0;
--
2.1.4